summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-07-10 20:19:56 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-07-10 20:19:56 +0000
commitcc3ff76d51c8c54a09a2fb89220fe22828fe8704 (patch)
treedbaf70755ff4b0148ef93044d0b4b41fbde9756d
parent25fcf3e4e52432483f316f56f51d729c23072d12 (diff)
[X86][SSE] Added tests for combining shuffles to PSHUFLW/PSHUFHW
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@275019 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx2.ll28
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll33
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-ssse3.ll43
3 files changed, 104 insertions, 0 deletions
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index a76c4a76a16..4d164cec3c9 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -225,3 +225,31 @@ define <32 x i8> @combine_pshufb_as_psrldq(<32 x i8> %a0) {
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
ret <32 x i8> %res0
}
+
+define <32 x i8> @combine_pshufb_as_pshuflw(<32 x i8> %a0) {
+; CHECK-LABEL: combine_pshufb_as_pshuflw:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,8,9,10,11,12,13,14,15,18,19,16,17,22,23,20,21,24,25,26,27,28,29,30,31]
+; CHECK-NEXT: retq
+ %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
+ ret <32 x i8> %res0
+}
+
+define <32 x i8> @combine_pshufb_as_pshufhw(<32 x i8> %a0) {
+; CHECK-LABEL: combine_pshufb_as_pshufhw:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,10,11,8,9,14,15,12,13,16,17,18,19,20,21,22,23,26,27,24,25,30,31,28,29]
+; CHECK-NEXT: retq
+ %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
+ ret <32 x i8> %res0
+}
+
+define <32 x i8> @combine_pshufb_as_pshufw(<32 x i8> %a0) {
+; CHECK-LABEL: combine_pshufb_as_pshufw:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29]
+; CHECK-NEXT: retq
+ %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
+ %res1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %res0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
+ ret <32 x i8> %res1
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
index 59f81851730..0f994abba11 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
@@ -472,3 +472,36 @@ define <64 x i8> @combine_pshufb_as_psrldq_mask(<64 x i8> %a0, i64 %m) {
%res0 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %a0, <64 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>, <64 x i8> zeroinitializer, i64 %m)
ret <64 x i8> %res0
}
+
+define <32 x i16> @combine_permvar_as_pshuflw(<32 x i16> %a0) {
+; CHECK-LABEL: combine_permvar_as_pshuflw:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovdqu16 {{.*#+}} zmm1 = [1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
+; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res0 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 0, i16 3, i16 2, i16 4, i16 5, i16 6, i16 7, i16 9, i16 8, i16 11, i16 10, i16 12, i16 13, i16 14, i16 15, i16 17, i16 16, i16 19, i16 18, i16 20, i16 21, i16 22, i16 23, i16 25, i16 24, i16 27, i16 26, i16 28, i16 29, i16 30, i16 31>, <32 x i16> undef, i32 -1)
+ ret <32 x i16> %res0
+}
+
+define <32 x i16> @combine_pshufb_as_pshufhw(<32 x i16> %a0) {
+; CHECK-LABEL: combine_pshufb_as_pshufhw:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovdqu16 {{.*#+}} zmm1 = [0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
+; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res0 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 5, i16 4, i16 7, i16 6, i16 8, i16 9, i16 10, i16 11, i16 13, i16 12, i16 15, i16 14, i16 16, i16 17, i16 18, i16 19, i16 21, i16 20, i16 23, i16 22, i16 24, i16 25, i16 26, i16 27, i16 29, i16 28, i16 31, i16 30>, <32 x i16> undef, i32 -1)
+ ret <32 x i16> %res0
+}
+
+define <32 x i16> @combine_pshufb_as_pshufw(<32 x i16> %a0) {
+; CHECK-LABEL: combine_pshufb_as_pshufw:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovdqu16 {{.*#+}} zmm1 = [1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15,17,16,19,18,20,21,22,23,25,24,27,26,28,29,30,31]
+; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vmovdqu16 {{.*#+}} zmm1 = [0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14,16,17,18,19,21,20,23,22,24,25,26,27,29,28,31,30]
+; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res0 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 1, i16 0, i16 3, i16 2, i16 4, i16 5, i16 6, i16 7, i16 9, i16 8, i16 11, i16 10, i16 12, i16 13, i16 14, i16 15, i16 17, i16 16, i16 19, i16 18, i16 20, i16 21, i16 22, i16 23, i16 25, i16 24, i16 27, i16 26, i16 28, i16 29, i16 30, i16 31>, <32 x i16> undef, i32 -1)
+ %res1 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %res0, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 5, i16 4, i16 7, i16 6, i16 8, i16 9, i16 10, i16 11, i16 13, i16 12, i16 15, i16 14, i16 16, i16 17, i16 18, i16 19, i16 21, i16 20, i16 23, i16 22, i16 24, i16 25, i16 26, i16 27, i16 29, i16 28, i16 31, i16 30>, <32 x i16> undef, i32 -1)
+ ret <32 x i16> %res1
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
index 8ce96a4fdc7..7aedc0e2700 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
@@ -164,6 +164,49 @@ define <16 x i8> @combine_pshufb_as_psrldq(<16 x i8> %a0) {
ret <16 x i8> %res0
}
+define <16 x i8> @combine_pshufb_as_pshuflw(<16 x i8> %a0) {
+; SSE-LABEL: combine_pshufb_as_pshuflw:
+; SSE: # BB#0:
+; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,8,9,10,11,12,13,14,15]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pshufb_as_pshuflw:
+; AVX: # BB#0:
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,8,9,10,11,12,13,14,15]
+; AVX-NEXT: retq
+ %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
+ ret <16 x i8> %res0
+}
+
+define <16 x i8> @combine_pshufb_as_pshufhw(<16 x i8> %a0) {
+; SSE-LABEL: combine_pshufb_as_pshufhw:
+; SSE: # BB#0:
+; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,10,11,8,9,14,15,12,13]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pshufb_as_pshufhw:
+; AVX: # BB#0:
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,10,11,8,9,14,15,12,13]
+; AVX-NEXT: retq
+ %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
+ ret <16 x i8> %res0
+}
+
+define <16 x i8> @combine_pshufb_as_pshufw(<16 x i8> %a0) {
+; SSE-LABEL: combine_pshufb_as_pshufw:
+; SSE: # BB#0:
+; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pshufb_as_pshufw:
+; AVX: # BB#0:
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
+; AVX-NEXT: retq
+ %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
+ %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
+ ret <16 x i8> %res1
+}
+
define <16 x i8> @combine_pshufb_as_unary_unpcklbw(<16 x i8> %a0) {
; SSE-LABEL: combine_pshufb_as_unary_unpcklbw:
; SSE: # BB#0: