summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/tlv-1.ll
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2011-01-07 19:35:30 +0000
committerEvan Cheng <evan.cheng@apple.com>2011-01-07 19:35:30 +0000
commita5e1362f968568d66d76ddcdcff4ab98e203a48c (patch)
tree53e266c315432b49be8ad6f3a2d2a5873265ab53 /test/CodeGen/X86/tlv-1.ll
parent1434f66b2e132a707e2c8ccb3350ea13fb5aa051 (diff)
Revert r122955. It seems using movups to lower memcpy can cause massive regression (even on Nehalem) in edge cases. I also didn't see any real performance benefit.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@123015 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/tlv-1.ll')
-rw-r--r--test/CodeGen/X86/tlv-1.ll8
1 files changed, 2 insertions, 6 deletions
diff --git a/test/CodeGen/X86/tlv-1.ll b/test/CodeGen/X86/tlv-1.ll
index f9be15d1a95..42940f147ed 100644
--- a/test/CodeGen/X86/tlv-1.ll
+++ b/test/CodeGen/X86/tlv-1.ll
@@ -10,12 +10,8 @@ entry:
unreachable
; CHECK: movq _c@TLVP(%rip), %rdi
; CHECK-NEXT: callq *(%rdi)
- ; CHECK-NEXT: pxor %xmm0, %xmm0
- ; CHECK-NEXT: movups %xmm0, 32(%rax)
- ; CHECK-NEXT: movups %xmm0, 16(%rax)
- ; CHECK-NEXT: movups %xmm0, (%rax)
- ; CHECK-NEXT: movl $0, 56(%rax)
- ; CHECK-NEXT: movq $0, 48(%rax)
+ ; CHECK-NEXT: movl $0, 56(%rax)
+ ; CHECK-NEXT: movq $0, 48(%rax)
}
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind