summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/fpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 15:15:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 15:15:17 -0700
commit168f1a7163b37294a0ef33829e1ed54d41e33c42 (patch)
tree16fa34f24156c28f0a3060d984e98bf4df878f91 /arch/x86/kernel/fpu
parent825a3b2605c3aa193e0075d0f9c72e33c17ab16a (diff)
parent4afd0565552c87f23834db9121dd9cf6955d0b43 (diff)
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "The main changes in this cycle were: - MSR access API fixes and enhancements (Andy Lutomirski) - early exception handling improvements (Andy Lutomirski) - user-space FS/GS prctl usage fixes and improvements (Andy Lutomirski) - Remove the cpu_has_*() APIs and replace them with equivalents (Borislav Petkov) - task switch micro-optimization (Brian Gerst) - 32-bit entry code simplification (Denys Vlasenko) - enhance PAT handling in enumated CPUs (Toshi Kani) ... and lots of other cleanups/fixlets" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) x86/arch_prctl/64: Restore accidentally removed put_cpu() in ARCH_SET_GS x86/entry/32: Remove asmlinkage_protect() x86/entry/32: Remove GET_THREAD_INFO() from entry code x86/entry, sched/x86: Don't save/restore EFLAGS on task switch x86/asm/entry/32: Simplify pushes of zeroed pt_regs->REGs selftests/x86/ldt_gdt: Test set_thread_area() deletion of an active segment x86/tls: Synchronize segment registers in set_thread_area() x86/asm/64: Rename thread_struct's fs and gs to fsbase and gsbase x86/arch_prctl/64: Remove FSBASE/GSBASE < 4G optimization x86/segments/64: When load_gs_index fails, clear the base x86/segments/64: When loadsegment(fs, ...) fails, clear the base x86/asm: Make asm/alternative.h safe from assembly x86/asm: Stop depending on ptrace.h in alternative.h x86/entry: Rename is_{ia32,x32}_task() to in_{ia32,x32}_syscall() x86/asm: Make sure verify_cpu() has a good stack x86/extable: Add a comment about early exception handlers x86/msr: Set the return value to zero when native_rdmsr_safe() fails x86/paravirt: Make "unsafe" MSR accesses unsafe even if PARAVIRT=y x86/paravirt: Add paravirt_{read,write}_msr() x86/msr: Carry on after a non-"safe" MSR access fails ...
Diffstat (limited to 'arch/x86/kernel/fpu')
-rw-r--r--arch/x86/kernel/fpu/bugs.c16
-rw-r--r--arch/x86/kernel/fpu/core.c50
-rw-r--r--arch/x86/kernel/fpu/init.c16
-rw-r--r--arch/x86/kernel/fpu/regset.c25
-rw-r--r--arch/x86/kernel/fpu/xstate.c18
5 files changed, 52 insertions, 73 deletions
diff --git a/arch/x86/kernel/fpu/bugs.c b/arch/x86/kernel/fpu/bugs.c
index dd9ca9b60ff3..aad34aafc0e0 100644
--- a/arch/x86/kernel/fpu/bugs.c
+++ b/arch/x86/kernel/fpu/bugs.c
@@ -21,11 +21,15 @@ static double __initdata y = 3145727.0;
* We should really only care about bugs here
* anyway. Not features.
*/
-static void __init check_fpu(void)
+void __init fpu__init_check_bugs(void)
{
u32 cr0_saved;
s32 fdiv_bug;
+ /* kernel_fpu_begin/end() relies on patched alternative instructions. */
+ if (!boot_cpu_has(X86_FEATURE_FPU))
+ return;
+
/* We might have CR0::TS set already, clear it: */
cr0_saved = read_cr0();
write_cr0(cr0_saved & ~X86_CR0_TS);
@@ -59,13 +63,3 @@ static void __init check_fpu(void)
pr_warn("Hmm, FPU with FDIV bug\n");
}
}
-
-void __init fpu__init_check_bugs(void)
-{
- /*
- * kernel_fpu_begin/end() in check_fpu() relies on the patched
- * alternative instructions.
- */
- if (cpu_has_fpu)
- check_fpu();
-}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 8e37cc8a539a..97027545a72d 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -217,14 +217,14 @@ static inline void fpstate_init_fstate(struct fregs_state *fp)
void fpstate_init(union fpregs_state *state)
{
- if (!cpu_has_fpu) {
+ if (!static_cpu_has(X86_FEATURE_FPU)) {
fpstate_init_soft(&state->soft);
return;
}
memset(state, 0, xstate_size);
- if (cpu_has_fxsr)
+ if (static_cpu_has(X86_FEATURE_FXSR))
fpstate_init_fxstate(&state->fxsave);
else
fpstate_init_fstate(&state->fsave);
@@ -237,7 +237,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
- if (!src_fpu->fpstate_active || !cpu_has_fpu)
+ if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
return 0;
WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -506,33 +506,6 @@ void fpu__clear(struct fpu *fpu)
* x87 math exception handling:
*/
-static inline unsigned short get_fpu_cwd(struct fpu *fpu)
-{
- if (cpu_has_fxsr) {
- return fpu->state.fxsave.cwd;
- } else {
- return (unsigned short)fpu->state.fsave.cwd;
- }
-}
-
-static inline unsigned short get_fpu_swd(struct fpu *fpu)
-{
- if (cpu_has_fxsr) {
- return fpu->state.fxsave.swd;
- } else {
- return (unsigned short)fpu->state.fsave.swd;
- }
-}
-
-static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
-{
- if (cpu_has_xmm) {
- return fpu->state.fxsave.mxcsr;
- } else {
- return MXCSR_DEFAULT;
- }
-}
-
int fpu__exception_code(struct fpu *fpu, int trap_nr)
{
int err;
@@ -547,10 +520,15 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
* so if this combination doesn't produce any single exception,
* then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to
- * fully reproduce the context of the exception
+ * fully reproduce the context of the exception.
*/
- cwd = get_fpu_cwd(fpu);
- swd = get_fpu_swd(fpu);
+ if (boot_cpu_has(X86_FEATURE_FXSR)) {
+ cwd = fpu->state.fxsave.cwd;
+ swd = fpu->state.fxsave.swd;
+ } else {
+ cwd = (unsigned short)fpu->state.fsave.cwd;
+ swd = (unsigned short)fpu->state.fsave.swd;
+ }
err = swd & ~cwd;
} else {
@@ -560,7 +538,11 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
* unmasked exception was caught we must mask the exception mask bits
* at 0x1f80, and then use these to mask the exception bits at 0x3f.
*/
- unsigned short mxcsr = get_fpu_mxcsr(fpu);
+ unsigned short mxcsr = MXCSR_DEFAULT;
+
+ if (boot_cpu_has(X86_FEATURE_XMM))
+ mxcsr = fpu->state.fxsave.mxcsr;
+
err = ~(mxcsr >> 7) & mxcsr;
}
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 54c86fffbf9f..aacfd7a82cec 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -29,22 +29,22 @@ static void fpu__init_cpu_generic(void)
unsigned long cr0;
unsigned long cr4_mask = 0;
- if (cpu_has_fxsr)
+ if (boot_cpu_has(X86_FEATURE_FXSR))
cr4_mask |= X86_CR4_OSFXSR;
- if (cpu_has_xmm)
+ if (boot_cpu_has(X86_FEATURE_XMM))
cr4_mask |= X86_CR4_OSXMMEXCPT;
if (cr4_mask)
cr4_set_bits(cr4_mask);
cr0 = read_cr0();
cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
- if (!cpu_has_fpu)
+ if (!boot_cpu_has(X86_FEATURE_FPU))
cr0 |= X86_CR0_EM;
write_cr0(cr0);
/* Flush out any pending x87 state: */
#ifdef CONFIG_MATH_EMULATION
- if (!cpu_has_fpu)
+ if (!boot_cpu_has(X86_FEATURE_FPU))
fpstate_init_soft(&current->thread.fpu.state.soft);
else
#endif
@@ -89,7 +89,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
}
#ifndef CONFIG_MATH_EMULATION
- if (!cpu_has_fpu) {
+ if (!boot_cpu_has(X86_FEATURE_FPU)) {
pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
for (;;)
asm volatile("hlt");
@@ -106,7 +106,7 @@ static void __init fpu__init_system_mxcsr(void)
{
unsigned int mask = 0;
- if (cpu_has_fxsr) {
+ if (boot_cpu_has(X86_FEATURE_FXSR)) {
/* Static because GCC does not get 16-byte stack alignment right: */
static struct fxregs_state fxregs __initdata;
@@ -212,7 +212,7 @@ static void __init fpu__init_system_xstate_size_legacy(void)
* fpu__init_system_xstate().
*/
- if (!cpu_has_fpu) {
+ if (!boot_cpu_has(X86_FEATURE_FPU)) {
/*
* Disable xsave as we do not support it if i387
* emulation is enabled.
@@ -221,7 +221,7 @@ static void __init fpu__init_system_xstate_size_legacy(void)
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
xstate_size = sizeof(struct swregs_state);
} else {
- if (cpu_has_fxsr)
+ if (boot_cpu_has(X86_FEATURE_FXSR))
xstate_size = sizeof(struct fxregs_state);
else
xstate_size = sizeof(struct fregs_state);
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index 8bd1c003942a..81422dfb152b 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -21,7 +21,10 @@ int regset_xregset_fpregs_active(struct task_struct *target, const struct user_r
{
struct fpu *target_fpu = &target->thread.fpu;
- return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
+ if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active)
+ return regset->n;
+ else
+ return 0;
}
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
@@ -30,7 +33,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
{
struct fpu *fpu = &target->thread.fpu;
- if (!cpu_has_fxsr)
+ if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
fpu__activate_fpstate_read(fpu);
@@ -47,7 +50,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
struct fpu *fpu = &target->thread.fpu;
int ret;
- if (!cpu_has_fxsr)
+ if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
fpu__activate_fpstate_write(fpu);
@@ -65,7 +68,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
* update the header bits in the xsave header, indicating the
* presence of FP and SSE state.
*/
- if (cpu_has_xsave)
+ if (boot_cpu_has(X86_FEATURE_XSAVE))
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
return ret;
@@ -79,7 +82,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
struct xregs_state *xsave;
int ret;
- if (!cpu_has_xsave)
+ if (!boot_cpu_has(X86_FEATURE_XSAVE))
return -ENODEV;
fpu__activate_fpstate_read(fpu);
@@ -108,7 +111,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
struct xregs_state *xsave;
int ret;
- if (!cpu_has_xsave)
+ if (!boot_cpu_has(X86_FEATURE_XSAVE))
return -ENODEV;
fpu__activate_fpstate_write(fpu);
@@ -275,10 +278,10 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
fpu__activate_fpstate_read(fpu);
- if (!static_cpu_has(X86_FEATURE_FPU))
+ if (!boot_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
- if (!cpu_has_fxsr)
+ if (!boot_cpu_has(X86_FEATURE_FXSR))
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&fpu->state.fsave, 0,
-1);
@@ -306,10 +309,10 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
fpu__activate_fpstate_write(fpu);
fpstate_sanitize_xstate(fpu);
- if (!static_cpu_has(X86_FEATURE_FPU))
+ if (!boot_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
- if (!cpu_has_fxsr)
+ if (!boot_cpu_has(X86_FEATURE_FXSR))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpu->state.fsave, 0,
-1);
@@ -325,7 +328,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
* update the header bit in the xsave header, indicating the
* presence of FP.
*/
- if (cpu_has_xsave)
+ if (boot_cpu_has(X86_FEATURE_XSAVE))
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
return ret;
}
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index b48ef35b28d4..4ea2a59483c7 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -190,7 +190,7 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
*/
void fpu__init_cpu_xstate(void)
{
- if (!cpu_has_xsave || !xfeatures_mask)
+ if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask)
return;
cr4_set_bits(X86_CR4_OSXSAVE);
@@ -280,7 +280,7 @@ static void __init setup_xstate_comp(void)
xstate_comp_offsets[0] = 0;
xstate_comp_offsets[1] = offsetof(struct fxregs_state, xmm_space);
- if (!cpu_has_xsaves) {
+ if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
if (xfeature_enabled(i)) {
xstate_comp_offsets[i] = xstate_offsets[i];
@@ -316,13 +316,13 @@ static void __init setup_init_fpu_buf(void)
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
- if (!cpu_has_xsave)
+ if (!boot_cpu_has(X86_FEATURE_XSAVE))
return;
setup_xstate_features();
print_xstate_features();
- if (cpu_has_xsaves) {
+ if (boot_cpu_has(X86_FEATURE_XSAVES)) {
init_fpstate.xsave.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
init_fpstate.xsave.header.xfeatures = xfeatures_mask;
}
@@ -417,7 +417,7 @@ static int xfeature_size(int xfeature_nr)
*/
static int using_compacted_format(void)
{
- return cpu_has_xsaves;
+ return boot_cpu_has(X86_FEATURE_XSAVES);
}
static void __xstate_dump_leaves(void)
@@ -549,7 +549,7 @@ static unsigned int __init calculate_xstate_size(void)
unsigned int eax, ebx, ecx, edx;
unsigned int calculated_xstate_size;
- if (!cpu_has_xsaves) {
+ if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
/*
* - CPUID function 0DH, sub-function 0:
* EBX enumerates the size (in bytes) required by
@@ -630,7 +630,7 @@ void __init fpu__init_system_xstate(void)
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
- if (!cpu_has_xsave) {
+ if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
pr_info("x86/fpu: Legacy x87 FPU detected.\n");
return;
}
@@ -667,7 +667,7 @@ void __init fpu__init_system_xstate(void)
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
xfeatures_mask,
xstate_size,
- cpu_has_xsaves ? "compacted" : "standard");
+ boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
}
/*
@@ -678,7 +678,7 @@ void fpu__resume_cpu(void)
/*
* Restore XCR0 on xsave capable CPUs:
*/
- if (cpu_has_xsave)
+ if (boot_cpu_has(X86_FEATURE_XSAVE))
xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
}