summaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-12-15 16:43:42 +0000
committerPeter Maydell <peter.maydell@linaro.org>2014-12-15 16:43:42 +0000
commitdfa9c2a0f4d0a0c8b2c1449ecdbb1297427e1560 (patch)
tree8700fd36af5cff7e69f6648140b16cc1f8f2d6ae /target-i386
parent54600752a1dd67844c2cf3c467db562c39499838 (diff)
parent224d10ff5aea9e74a1792fc21188bc9752c43ee9 (diff)
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
- Migration and linuxboot fixes for 2.2 regressions - valgrind/KVM support - small i386 patches - PCI SD host controller support - malloc/free cleanups from Markus (x86/scsi) - IvyBridge model - XSAVES support for KVM - initial patches from record/replay # gpg: Signature made Mon 15 Dec 2014 16:35:08 GMT using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (47 commits) sdhci: Support SDHCI devices on PCI sdhci: Define SDHCI PCI ids sdhci: Add "sysbus" to sdhci QOM types and methods sdhci: Remove class "virtual" methods sdhci: Set a default frequency clock serial: only resample THR interrupt on rising edge of IER.THRI serial: update LSR on enabling/disabling FIFOs serial: clean up THRE/TEMT handling serial: reset thri_pending on IER writes with THRI=0 linuxboot: fix loading old kernels kvm/apic: fix 2.2->2.1 migration target-i386: add Ivy Bridge CPU model target-i386: add f16c and rdrand to Haswell and Broadwell target-i386: add VME to all CPUs pc: add 2.3 machine types i386: do not cross the pages boundaries in replay mode cpus: make icount warp behave well with respect to stop/cont timer: introduce new QEMU_CLOCK_VIRTUAL_RT clock cpu-exec: invalidate nocache translation if they are interrupted icount: introduce cpu_get_icount_raw ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/arch_dump.c16
-rw-r--r--target-i386/cpu.c96
-rw-r--r--target-i386/cpu.h11
-rw-r--r--target-i386/kvm.c44
-rw-r--r--target-i386/machine.c21
-rw-r--r--target-i386/ops_sse.h16
-rw-r--r--target-i386/translate.c14
7 files changed, 170 insertions, 48 deletions
diff --git a/target-i386/arch_dump.c b/target-i386/arch_dump.c
index 0bbed239f8..eccd8031af 100644
--- a/target-i386/arch_dump.c
+++ b/target-i386/arch_dump.c
@@ -78,9 +78,7 @@ static int x86_64_write_elf64_note(WriteCoreDumpFunction f,
descsz = sizeof(x86_64_elf_prstatus);
note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
(descsz + 3) / 4) * 4;
- note = g_malloc(note_size);
-
- memset(note, 0, note_size);
+ note = g_malloc0(note_size);
note->n_namesz = cpu_to_le32(name_size);
note->n_descsz = cpu_to_le32(descsz);
note->n_type = cpu_to_le32(NT_PRSTATUS);
@@ -159,9 +157,7 @@ static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env,
descsz = sizeof(x86_elf_prstatus);
note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
(descsz + 3) / 4) * 4;
- note = g_malloc(note_size);
-
- memset(note, 0, note_size);
+ note = g_malloc0(note_size);
note->n_namesz = cpu_to_le32(name_size);
note->n_descsz = cpu_to_le32(descsz);
note->n_type = cpu_to_le32(NT_PRSTATUS);
@@ -216,9 +212,7 @@ int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
descsz = sizeof(x86_elf_prstatus);
note_size = ((sizeof(Elf32_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
(descsz + 3) / 4) * 4;
- note = g_malloc(note_size);
-
- memset(note, 0, note_size);
+ note = g_malloc0(note_size);
note->n_namesz = cpu_to_le32(name_size);
note->n_descsz = cpu_to_le32(descsz);
note->n_type = cpu_to_le32(NT_PRSTATUS);
@@ -345,9 +339,7 @@ static inline int cpu_write_qemu_note(WriteCoreDumpFunction f,
}
note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
(descsz + 3) / 4) * 4;
- note = g_malloc(note_size);
-
- memset(note, 0, note_size);
+ note = g_malloc0(note_size);
if (type == 0) {
note32 = note;
note32->n_namesz = cpu_to_le32(name_size);
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index e9df33e5c3..b81ac5cda1 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -274,6 +274,17 @@ static const char *cpuid_apm_edx_feature_name[] = {
NULL, NULL, NULL, NULL,
};
+static const char *cpuid_xsave_feature_name[] = {
+ "xsaveopt", "xsavec", "xgetbv1", "xsaves",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+};
+
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
@@ -391,6 +402,13 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.tcg_features = TCG_APM_FEATURES,
.unmigratable_flags = CPUID_APM_INVTSC,
},
+ [FEAT_XSAVE] = {
+ .feat_names = cpuid_xsave_feature_name,
+ .cpuid_eax = 0xd,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 1,
+ .cpuid_reg = R_EAX,
+ .tcg_features = 0,
+ },
};
typedef struct X86RegisterInfo32 {
@@ -742,9 +760,9 @@ static X86CPUDefinition builtin_x86_defs[] = {
.family = 15,
.model = 6,
.stepping = 1,
- /* Missing: CPUID_VME, CPUID_HT */
+ /* Missing: CPUID_HT */
.features[FEAT_1_EDX] =
- PPRO_FEATURES |
+ PPRO_FEATURES | CPUID_VME |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
CPUID_PSE36,
/* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
@@ -784,7 +802,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 6,
.stepping = 1,
.features[FEAT_1_EDX] =
- PPRO_FEATURES |
+ PPRO_FEATURES | CPUID_VME |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
.features[FEAT_1_ECX] =
CPUID_EXT_SSE3,
@@ -910,7 +928,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 15,
.stepping = 3,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -932,7 +950,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 23,
.stepping = 3,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -955,7 +973,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 26,
.stepping = 3,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -978,7 +996,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 44,
.stepping = 1,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -1002,7 +1020,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 42,
.stepping = 1,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -1018,10 +1036,44 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT2_SYSCALL,
.features[FEAT_8000_0001_ECX] =
CPUID_EXT3_LAHF_LM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
.xlevel = 0x8000000A,
.model_id = "Intel Xeon E312xx (Sandy Bridge)",
},
{
+ .name = "IvyBridge",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 58,
+ .stepping = 9,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
+ CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_ERMS,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
+ },
+ {
.name = "Haswell",
.level = 0xd,
.vendor = CPUID_VENDOR_INTEL,
@@ -1029,7 +1081,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 60,
.stepping = 1,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -1040,7 +1092,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
- CPUID_EXT_PCID,
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
.features[FEAT_8000_0001_EDX] =
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
@@ -1051,6 +1103,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
CPUID_7_0_EBX_RTM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
.xlevel = 0x8000000A,
.model_id = "Intel Core Processor (Haswell)",
},
@@ -1062,7 +1116,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 61,
.stepping = 2,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -1073,7 +1127,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
- CPUID_EXT_PCID,
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
.features[FEAT_8000_0001_EDX] =
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
@@ -1085,6 +1139,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
CPUID_7_0_EBX_SMAP,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
.xlevel = 0x8000000A,
.model_id = "Intel Core Processor (Broadwell)",
},
@@ -1096,7 +1152,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 6,
.stepping = 1,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -1121,7 +1177,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 6,
.stepping = 1,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -1149,7 +1205,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 6,
.stepping = 1,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -1179,7 +1235,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 1,
.stepping = 2,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -1202,6 +1258,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
CPUID_EXT3_LAHF_LM,
+ /* no xsaveopt! */
.xlevel = 0x8000001A,
.model_id = "AMD Opteron 62xx class CPU",
},
@@ -1213,7 +1270,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model = 2,
.stepping = 0,
.features[FEAT_1_EDX] =
- CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
@@ -1236,6 +1293,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
CPUID_EXT3_LAHF_LM,
+ /* no xsaveopt! */
.xlevel = 0x8000001A,
.model_id = "AMD Opteron 63xx class CPU",
},
@@ -1530,7 +1588,7 @@ static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
CPUX86State *env = &cpu->env;
char *value;
- value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
+ value = g_malloc(CPUID_VENDOR_SZ + 1);
x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
env->cpuid_vendor3);
return value;
@@ -2377,7 +2435,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
*ebx = *ecx;
} else if (count == 1) {
- *eax = kvm_arch_get_supported_cpuid(s, 0xd, 1, R_EAX);
+ *eax = env->features[FEAT_XSAVE];
} else if (count < ARRAY_SIZE(ext_save_areas)) {
const ExtSaveArea *esa = &ext_save_areas[count];
if ((env->features[esa->feature] & esa->bits) == esa->bits &&
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 015f5b5276..3ecff96325 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -28,6 +28,9 @@
#define TARGET_LONG_BITS 32
#endif
+/* Maximum instruction code size */
+#define TARGET_MAX_INSN_SIZE 16
+
/* target supports implicit self modifying code */
#define TARGET_HAS_SMC
/* support for self modifying code even if the modified instruction is
@@ -389,6 +392,7 @@
#define MSR_VM_HSAVE_PA 0xc0010117
#define MSR_IA32_BNDCFGS 0x00000d90
+#define MSR_IA32_XSS 0x00000da0
#define XSTATE_FP (1ULL << 0)
#define XSTATE_SSE (1ULL << 1)
@@ -411,6 +415,7 @@ typedef enum FeatureWord {
FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
FEAT_SVM, /* CPUID[8000_000A].EDX */
+ FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
FEATURE_WORDS,
} FeatureWord;
@@ -571,6 +576,11 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
+#define CPUID_XSAVE_XSAVEOPT (1U << 0)
+#define CPUID_XSAVE_XSAVEC (1U << 1)
+#define CPUID_XSAVE_XGETBV1 (1U << 2)
+#define CPUID_XSAVE_XSAVES (1U << 3)
+
/* CPUID[0x80000007].EDX flags: */
#define CPUID_APM_INVTSC (1U << 8)
@@ -1019,6 +1029,7 @@ typedef struct CPUX86State {
uint64_t xstate_bv;
uint64_t xcr0;
+ uint64_t xss;
TPRAccess tpr_access_type;
} CPUX86State;
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index ccf36e8719..f92edfe14a 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -80,6 +80,7 @@ static bool has_msr_hv_hypercall;
static bool has_msr_hv_vapic;
static bool has_msr_hv_tsc;
static bool has_msr_mtrr;
+static bool has_msr_xss;
static bool has_msr_architectural_pmu;
static uint32_t num_architectural_pmu_counters;
@@ -95,7 +96,7 @@ static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
int r, size;
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
- cpuid = (struct kvm_cpuid2 *)g_malloc0(size);
+ cpuid = g_malloc0(size);
cpuid->nent = max;
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
if (r == 0 && cpuid->nent >= max) {
@@ -277,7 +278,7 @@ static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
return;
}
}
- page = g_malloc(sizeof(HWPoisonPage));
+ page = g_new(HWPoisonPage, 1);
page->ram_addr = ram_addr;
QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
}
@@ -826,6 +827,10 @@ static int kvm_get_supported_msrs(KVMState *s)
has_msr_bndcfgs = true;
continue;
}
+ if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
+ has_msr_xss = true;
+ continue;
+ }
}
}
@@ -1085,7 +1090,7 @@ static int kvm_put_xsave(X86CPU *cpu)
static int kvm_put_xcrs(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- struct kvm_xcrs xcrs;
+ struct kvm_xcrs xcrs = {};
if (!kvm_has_xcrs()) {
return 0;
@@ -1152,6 +1157,7 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
uint32_t index, uint64_t value)
{
entry->index = index;
+ entry->reserved = 0;
entry->data = value;
}
@@ -1170,7 +1176,9 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
- msr_data.info.nmsrs = 1;
+ msr_data.info = (struct kvm_msrs) {
+ .nmsrs = 1,
+ };
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
}
@@ -1190,7 +1198,11 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
cpu->env.msr_ia32_feature_control);
- msr_data.info.nmsrs = 1;
+
+ msr_data.info = (struct kvm_msrs) {
+ .nmsrs = 1,
+ };
+
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
}
@@ -1224,6 +1236,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
if (has_msr_bndcfgs) {
kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
}
+ if (has_msr_xss) {
+ kvm_msr_entry_set(&msrs[n++], MSR_IA32_XSS, env->xss);
+ }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
@@ -1339,7 +1354,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
}
}
- msr_data.info.nmsrs = n;
+ msr_data.info = (struct kvm_msrs) {
+ .nmsrs = n,
+ };
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
@@ -1570,6 +1587,10 @@ static int kvm_get_msrs(X86CPU *cpu)
if (has_msr_bndcfgs) {
msrs[n++].index = MSR_IA32_BNDCFGS;
}
+ if (has_msr_xss) {
+ msrs[n++].index = MSR_IA32_XSS;
+ }
+
if (!env->tsc_valid) {
msrs[n++].index = MSR_IA32_TSC;
@@ -1646,7 +1667,10 @@ static int kvm_get_msrs(X86CPU *cpu)
}
}
- msr_data.info.nmsrs = n;
+ msr_data.info = (struct kvm_msrs) {
+ .nmsrs = n,
+ };
+
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
if (ret < 0) {
return ret;
@@ -1717,6 +1741,9 @@ static int kvm_get_msrs(X86CPU *cpu)
case MSR_IA32_BNDCFGS:
env->msr_bndcfgs = msrs[i].data;
break;
+ case MSR_IA32_XSS:
+ env->xss = msrs[i].data;
+ break;
default:
if (msrs[i].index >= MSR_MC0_CTL &&
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
@@ -1872,7 +1899,7 @@ static int kvm_put_apic(X86CPU *cpu)
static int kvm_put_vcpu_events(X86CPU *cpu, int level)
{
CPUX86State *env = &cpu->env;
- struct kvm_vcpu_events events;
+ struct kvm_vcpu_events events = {};
if (!kvm_has_vcpu_events()) {
return 0;
@@ -2563,7 +2590,6 @@ void kvm_arch_init_irq_routing(KVMState *s)
* irqchip, so we can use irqfds, and on x86 we know
* we can use msi via irqfd and GSI routing.
*/
- kvm_irqfds_allowed = true;
kvm_msi_via_irqfd_allowed = true;
kvm_gsi_routing_allowed = true;
}
diff --git a/target-i386/machine.c b/target-i386/machine.c
index 1c13b14352..722d62e471 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -687,6 +687,24 @@ static const VMStateDescription vmstate_avx512 = {
}
};
+static bool xss_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->xss != 0;
+}
+
+static const VMStateDescription vmstate_xss = {
+ .name = "cpu/xss",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.xss, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
@@ -832,6 +850,9 @@ VMStateDescription vmstate_x86_cpu = {
}, {
.vmsd = &vmstate_avx512,
.needed = avx512_needed,
+ }, {
+ .vmsd = &vmstate_xss,
+ .needed = xss_needed,
} , {
/* empty */
}
diff --git a/target-i386/ops_sse.h b/target-i386/ops_sse.h
index 886e0a8243..0765073792 100644
--- a/target-i386/ops_sse.h
+++ b/target-i386/ops_sse.h
@@ -2228,7 +2228,7 @@ void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
Reg rk = *s;
for (i = 0; i < 16; i++) {
- d->B(i) = rk.B(i) ^ (AES_Td4[st.B(AES_ishifts[i])] & 0xff);
+ d->B(i) = rk.B(i) ^ (AES_isbox[st.B(AES_ishifts[i])]);
}
}
@@ -2253,7 +2253,7 @@ void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
Reg rk = *s;
for (i = 0; i < 16; i++) {
- d->B(i) = rk.B(i) ^ (AES_Te4[st.B(AES_shifts[i])] & 0xff);
+ d->B(i) = rk.B(i) ^ (AES_sbox[st.B(AES_shifts[i])]);
}
}
@@ -2264,10 +2264,10 @@ void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
Reg tmp = *s;
for (i = 0 ; i < 4 ; i++) {
- d->L(i) = bswap32(AES_Td0[AES_Te4[tmp.B(4*i+0)] & 0xff] ^
- AES_Td1[AES_Te4[tmp.B(4*i+1)] & 0xff] ^
- AES_Td2[AES_Te4[tmp.B(4*i+2)] & 0xff] ^
- AES_Td3[AES_Te4[tmp.B(4*i+3)] & 0xff]);
+ d->L(i) = bswap32(AES_imc[tmp.B(4*i+0)][0] ^
+ AES_imc[tmp.B(4*i+1)][1] ^
+ AES_imc[tmp.B(4*i+2)][2] ^
+ AES_imc[tmp.B(4*i+3)][3]);
}
}
@@ -2278,8 +2278,8 @@ void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
Reg tmp = *s;
for (i = 0 ; i < 4 ; i++) {
- d->B(i) = AES_Te4[tmp.B(i + 4)] & 0xff;
- d->B(i + 8) = AES_Te4[tmp.B(i + 12)] & 0xff;
+ d->B(i) = AES_sbox[tmp.B(i + 4)];
+ d->B(i + 8) = AES_sbox[tmp.B(i + 12)];
}
d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl;
d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl;
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 6243e36661..fc75da7fc0 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -8034,6 +8034,20 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
gen_eob(dc);
break;
}
+ /* Do not cross the boundary of the pages in icount mode,
+ it can cause an exception. Do it only when boundary is
+ crossed by the first instruction in the block.
+ If current instruction already crossed the bound - it's ok,
+ because an exception hasn't stopped this code.
+ */
+ if (use_icount
+ && ((pc_ptr & TARGET_PAGE_MASK)
+ != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
+ || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
+ gen_jmp_im(pc_ptr - dc->cs_base);
+ gen_eob(dc);
+ break;
+ }
/* if too long translation, stop generation too */
if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
(pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||