diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-23 10:59:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-23 10:59:07 -0700 |
commit | ec0d7f18ab7b5097d7c0c8f3d909ca1031b9d5cd (patch) | |
tree | 7d62c924592145f819ecaa5d60460a05a10dfdbd /arch/x86/kernel | |
parent | 269af9a1a08d368b46d72e74126564d04c354f7e (diff) | |
parent | 1dcc8d7ba235a316a056f993e88f0d18b92c60d9 (diff) |
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull fpu state cleanups from Ingo Molnar:
"This tree streamlines further aspects of FPU handling by eliminating
the prepare_to_copy() complication and moving that logic to
arch_dup_task_struct().
It also fixes the FPU dumps in threaded core dumps, removes and old
(and now invalid) assumption plus micro-optimizes the exit path by
avoiding an FPU save for dead tasks."
Fixed up trivial add-add conflict in arch/sh/kernel/process.c that came
in because we now do the FPU handling in arch_dup_task_struct() rather
than the legacy (and now gone) prepare_to_copy().
* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, fpu: drop the fpu state during thread exit
x86, xsave: remove thread_has_fpu() bug check in __sanitize_i387_state()
coredump: ensure the fpu state is flushed for proper multi-threaded core dump
fork: move the real prepare_to_copy() users to arch_dup_task_struct()
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/process.c | 25 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 2 |
4 files changed, 19 insertions, 26 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index f2e8c0f951d6..735279e54e59 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -56,10 +56,16 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister); struct kmem_cache *task_xstate_cachep; EXPORT_SYMBOL_GPL(task_xstate_cachep); +/* + * this gets called so that we can store lazy state into memory and copy the + * current task into the new thread. + */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { int ret; + unlazy_fpu(src); + *dst = *src; if (fpu_allocated(&src->thread.fpu)) { memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); @@ -89,6 +95,16 @@ void arch_task_cache_init(void) SLAB_PANIC | SLAB_NOTRACK, NULL); } +static inline void drop_fpu(struct task_struct *tsk) +{ + /* + * Forget coprocessor state.. + */ + tsk->fpu_counter = 0; + clear_fpu(tsk); + clear_used_math(); +} + /* * Free current thread data structures etc.. */ @@ -111,6 +127,8 @@ void exit_thread(void) put_cpu(); kfree(bp); } + + drop_fpu(me); } void show_regs_common(void) @@ -145,12 +163,7 @@ void flush_thread(void) flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); - /* - * Forget coprocessor state.. - */ - tsk->fpu_counter = 0; - clear_fpu(tsk); - clear_used_math(); + drop_fpu(tsk); } static void hard_disable_TSC(void) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 01d8d40ccaf6..516fa186121b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -126,15 +126,6 @@ void release_thread(struct task_struct *dead_task) release_vm86_irqs(dead_task); } -/* - * This gets called before we allocate a new thread and copy - * the current task into it. - */ -void prepare_to_copy(struct task_struct *tsk) -{ - unlazy_fpu(tsk); -} - int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 28e810255a0a..61cdf7fdf099 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -145,15 +145,6 @@ static inline u32 read_32bit_tls(struct task_struct *t, int tls) return get_desc_base(&t->thread.tls_array[tls]); } -/* - * This gets called before we allocate a new thread and copy - * the current task into it. - */ -void prepare_to_copy(struct task_struct *tsk) -{ - unlazy_fpu(tsk); -} - int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index e62728e30b01..bd18149b2b0f 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -48,8 +48,6 @@ void __sanitize_i387_state(struct task_struct *tsk) if (!fx) return; - BUG_ON(__thread_has_fpu(tsk)); - xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; /* |