diff options
author | Andy Lutomirski <luto@kernel.org> | 2016-10-31 15:18:44 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-11-01 07:47:54 +0100 |
commit | 5a83d60c074ddf4f6364be25654a643d0e941824 (patch) | |
tree | b4fea81759dc1bca3ba7eab08027d5ed7f243ffa /drivers/crypto/padlock-sha.c | |
parent | fc560a80bac91e512fc37cdfe03a982ef4543c6b (diff) |
x86/fpu: Remove irq_ts_save() and irq_ts_restore()
Now that lazy FPU is gone, we don't use CR0.TS (except possibly in
KVM guest mode). Remove irq_ts_save(), irq_ts_restore(), and all of
their callers.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm list <kvm@vger.kernel.org>
Link: http://lkml.kernel.org/r/70b9b9e7ba70659bedcb08aba63d0f9214f338f2.1477951965.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/crypto/padlock-sha.c')
-rw-r--r-- | drivers/crypto/padlock-sha.c | 18 |
1 files changed, 0 insertions, 18 deletions
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 8c5f90647b7a..bc72d20c32c3 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -89,7 +89,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, struct sha1_state state; unsigned int space; unsigned int leftover; - int ts_state; int err; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; @@ -120,14 +119,11 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, memcpy(result, &state.state, SHA1_DIGEST_SIZE); - /* prevent taking the spurious DNA fault with padlock. */ - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ : \ : "c"((unsigned long)state.count + count), \ "a"((unsigned long)state.count), \ "S"(in), "D"(result)); - irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); @@ -155,7 +151,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, struct sha256_state state; unsigned int space; unsigned int leftover; - int ts_state; int err; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; @@ -186,14 +181,11 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, memcpy(result, &state.state, SHA256_DIGEST_SIZE); - /* prevent taking the spurious DNA fault with padlock. */ - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ : \ : "c"((unsigned long)state.count + count), \ "a"((unsigned long)state.count), \ "S"(in), "D"(result)); - irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); @@ -312,7 +304,6 @@ static int padlock_sha1_update_nano(struct shash_desc *desc, u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ ((aligned(STACK_ALIGN))); u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - int ts_state; partial = sctx->count & 0x3f; sctx->count += len; @@ -328,23 +319,19 @@ static int padlock_sha1_update_nano(struct shash_desc *desc, memcpy(sctx->buffer + partial, data, done + SHA1_BLOCK_SIZE); src = sctx->buffer; - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" : "+S"(src), "+D"(dst) \ : "a"((long)-1), "c"((unsigned long)1)); - irq_ts_restore(ts_state); done += SHA1_BLOCK_SIZE; src = data + done; } /* Process the left bytes from the input data */ if (len - done >= SHA1_BLOCK_SIZE) { - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" : "+S"(src), "+D"(dst) : "a"((long)-1), "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); - irq_ts_restore(ts_state); done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); src = data + done; } @@ -401,7 +388,6 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ ((aligned(STACK_ALIGN))); u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - int ts_state; partial = sctx->count & 0x3f; sctx->count += len; @@ -417,23 +403,19 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, memcpy(sctx->buf + partial, data, done + SHA256_BLOCK_SIZE); src = sctx->buf; - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" : "+S"(src), "+D"(dst) : "a"((long)-1), "c"((unsigned long)1)); - irq_ts_restore(ts_state); done += SHA256_BLOCK_SIZE; src = data + done; } /* Process the left bytes from input data*/ if (len - done >= SHA256_BLOCK_SIZE) { - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" : "+S"(src), "+D"(dst) : "a"((long)-1), "c"((unsigned long)((len - done) / 64))); - irq_ts_restore(ts_state); done += ((len - done) - (len - done) % 64); src = data + done; } |