summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/vector-zext.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/vector-zext.ll')
-rw-r--r--test/CodeGen/X86/vector-zext.ll154
1 files changed, 154 insertions, 0 deletions
diff --git a/test/CodeGen/X86/vector-zext.ll b/test/CodeGen/X86/vector-zext.ll
index afd7a24062a..cd09deee455 100644
--- a/test/CodeGen/X86/vector-zext.ll
+++ b/test/CodeGen/X86/vector-zext.ll
@@ -204,3 +204,157 @@ entry:
%t = zext <16 x i8> %z to <16 x i16>
ret <16 x i16> %t
}
+
+define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_16i8_to_16i16:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklbw %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: punpckhbw %xmm1, %xmm1 # xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: retq
+
+; SSSE3-LABEL: load_zext_16i8_to_16i16:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklbw %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: punpckhbw %xmm1, %xmm1 # xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: retq
+
+; SSE41-LABEL: load_zext_16i8_to_16i16:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa (%rdi), %xmm1
+; SSE41-NEXT: pmovzxbw %xmm1, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: punpckhbw %xmm1, %xmm1 # xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE41-NEXT: pand %xmm2, %xmm1
+; SSE41-NEXT: retq
+
+; AVX1-LABEL: load_zext_16i8_to_16i16:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhbw %xmm1, %xmm0, %xmm1 # xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpmovzxbw %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+
+; AVX2-LABEL: load_zext_16i8_to_16i16:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovzxbw (%rdi), %ymm0
+; AVX2-NEXT: retq
+entry:
+ %X = load <16 x i8>* %ptr
+ %Y = zext <16 x i8> %X to <16 x i16>
+ ret <16 x i16> %Y
+}
+
+define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
+; SSE2-LABEL: load_zext_8i16_to_8i32:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: punpckhwd %xmm1, %xmm1 # xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: retq
+
+; SSSE3-LABEL: load_zext_8i16_to_8i32:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: punpckhwd %xmm1, %xmm1 # xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: retq
+
+; SSE41-LABEL: load_zext_8i16_to_8i32:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa (%rdi), %xmm1
+; SSE41-NEXT: pmovzxwd %xmm1, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: punpckhwd %xmm1, %xmm1 # xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE41-NEXT: pand %xmm2, %xmm1
+; SSE41-NEXT: retq
+
+; AVX1-LABEL: load_zext_8i16_to_8i32:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhwd %xmm1, %xmm0, %xmm1 # xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+
+; AVX2-LABEL: load_zext_8i16_to_8i32:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovzxwd (%rdi), %ymm0
+; AVX2-NEXT: retq
+entry:
+ %X = load <8 x i16>* %ptr
+ %Y = zext <8 x i16> %X to <8 x i32>
+ ret <8 x i32>%Y
+}
+
+define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
+; SSE2-LABEL: load_zext_4i32_to_4i64:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: pshufd $-44, %xmm1, %xmm0 # xmm0 = xmm1[0,1,1,3]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pshufd $-6, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: retq
+
+; SSSE3-LABEL: load_zext_4i32_to_4i64:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: pshufd $-44, %xmm1, %xmm0 # xmm0 = xmm1[0,1,1,3]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: pshufd $-6, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: retq
+
+; SSE41-LABEL: load_zext_4i32_to_4i64:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa (%rdi), %xmm1
+; SSE41-NEXT: pmovzxdq %xmm1, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pshufd $-6, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm1
+; SSE41-NEXT: retq
+
+; AVX1-LABEL: load_zext_4i32_to_4i64:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhdq %xmm1, %xmm0, %xmm1 # xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vpmovzxdq %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+
+; AVX2-LABEL: load_zext_4i32_to_4i64:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovzxdq (%rdi), %ymm0
+; AVX2-NEXT: retq
+entry:
+ %X = load <4 x i32>* %ptr
+ %Y = zext <4 x i32> %X to <4 x i64>
+ ret <4 x i64>%Y
+}