diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2015-03-07 05:52:42 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2015-03-07 05:52:42 +0000 |
commit | 62ba058dea475a454a617295d3f63fb4a5f585b3 (patch) | |
tree | f10d4a90caf32b518c7152efdeb6da1e4c163eb5 /test | |
parent | b0b21de6278b967e302d44954d8d9f6235d233be (diff) |
[DAGCombiner] SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C)) -> VECTOR_SHUFFLE
This patch attempts to convert a SCALAR_TO_VECTOR using an operand from an EXTRACT_VECTOR_ELT into a VECTOR_SHUFFLE.
This prevents many cases of spilling scalar data between the gpr + simd registers.
At present the optimization only accepts cases where there is no TRUNC of the scalar type (i.e. all types must match).
Differential Revision: http://reviews.llvm.org/D8132
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231554 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/CodeGen/X86/mmx-arg-passing-x86-64.ll | 6 | ||||
-rw-r--r-- | test/CodeGen/X86/pr14161.ll | 5 |
2 files changed, 4 insertions, 7 deletions
diff --git a/test/CodeGen/X86/mmx-arg-passing-x86-64.ll b/test/CodeGen/X86/mmx-arg-passing-x86-64.ll index 89eb33e741a..36ccfe91866 100644 --- a/test/CodeGen/X86/mmx-arg-passing-x86-64.ll +++ b/test/CodeGen/X86/mmx-arg-passing-x86-64.ll @@ -25,11 +25,9 @@ define void @t4(x86_mmx %v1, x86_mmx %v2) nounwind { ; X86-64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp) ; X86-64-NEXT: movdq2q %xmm0, %mm0 ; X86-64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp) -; X86-64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X86-64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; X86-64-NEXT: paddb %xmm0, %xmm1 -; X86-64-NEXT: movd %xmm1, %rax -; X86-64-NEXT: movd %rax, %xmm0 +; X86-64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-64-NEXT: paddb %xmm1, %xmm0 ; X86-64-NEXT: movb $1, %al ; X86-64-NEXT: jmp _pass_v8qi ## TAILCALL %v1a = bitcast x86_mmx %v1 to <8 x i8> diff --git a/test/CodeGen/X86/pr14161.ll b/test/CodeGen/X86/pr14161.ll index b7084c02274..95c71405bc9 100644 --- a/test/CodeGen/X86/pr14161.ll +++ b/test/CodeGen/X86/pr14161.ll @@ -26,9 +26,8 @@ define <2 x i16> @bad(<4 x i32>*, <4 x i8>*) { ; CHECK: # BB#0: # %entry ; CHECK-NEXT: movdqa (%rdi), %xmm0 ; CHECK-NEXT: pminud {{.*}}(%rip), %xmm0 -; CHECK-NEXT: pextrd $1, %xmm0, %eax -; CHECK-NEXT: movd %eax, %xmm0 -; CHECK-NEXT: pmovzxwq %xmm0, %xmm0 +; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; CHECK-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero ; CHECK-NEXT: retq entry: %2 = load <4 x i32>, <4 x i32>* %0, align 16 |