diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-05-24 13:07:23 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-05-24 13:07:23 +0000 |
commit | f36485f7ac2a8d72ad0e0f2134c17fd365272285 (patch) | |
tree | 782c2c0418febbdd7471b53330d3dcadceb3c30a | |
parent | 5ad80b222ac3d51038953f0b7d94d2ff1b49448d (diff) |
[X86][SSE] Added vector sitofp/uitofp folded load tests
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@270558 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | test/CodeGen/X86/vec_int_to_fp.ll | 1641 |
1 files changed, 1641 insertions, 0 deletions
diff --git a/test/CodeGen/X86/vec_int_to_fp.ll b/test/CodeGen/X86/vec_int_to_fp.ll index bc56ae7ae07..562884d8010 100644 --- a/test/CodeGen/X86/vec_int_to_fp.ll +++ b/test/CodeGen/X86/vec_int_to_fp.ll @@ -1802,6 +1802,1647 @@ define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) { } ; +; Load Signed Integer to Double +; + +define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) { +; SSE-LABEL: sitofp_load_2i64_to_2f64: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: cvtsi2sdq %rax, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: cvtsi2sdq %rax, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_2i64_to_2f64: +; AVX: # BB#0: +; AVX-NEXT: vmovdqa (%rdi), %xmm0 +; AVX-NEXT: vpextrq $1, %xmm0, %rax +; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1 +; AVX-NEXT: vmovq %xmm0, %rax +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm0 +; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: retq + %ld = load <2 x i64>, <2 x i64> *%a + %cvt = sitofp <2 x i64> %ld to <2 x double> + ret <2 x double> %cvt +} + +define <2 x double> @sitofp_load_2i32_to_2f64(<2 x i32> *%a) { +; SSE-LABEL: sitofp_load_2i32_to_2f64: +; SSE: # BB#0: +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvtdq2pd %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_2i32_to_2f64: +; AVX: # BB#0: +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load <2 x i32>, <2 x i32> *%a + %cvt = sitofp <2 x i32> %ld to <2 x double> + ret <2 x double> %cvt +} + +define <2 x double> @sitofp_load_2i16_to_2f64(<2 x i16> *%a) { +; SSE-LABEL: sitofp_load_2i16_to_2f64: +; SSE: # BB#0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE-NEXT: psrad $16, %xmm0 +; SSE-NEXT: cvtdq2pd %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_2i16_to_2f64: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxwq (%rdi), %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load <2 x i16>, <2 x i16> *%a + %cvt = sitofp <2 x i16> %ld to <2 x double> + ret <2 x double> %cvt +} + +define <2 x double> @sitofp_load_2i8_to_2f64(<2 x i8> *%a) { +; SSE-LABEL: sitofp_load_2i8_to_2f64: +; SSE: # BB#0: +; SSE-NEXT: movzwl (%rdi), %eax +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE-NEXT: psrad $24, %xmm0 +; SSE-NEXT: cvtdq2pd %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_2i8_to_2f64: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxbq (%rdi), %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load <2 x i8>, <2 x i8> *%a + %cvt = sitofp <2 x i8> %ld to <2 x double> + ret <2 x double> %cvt +} + +define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) { +; SSE-LABEL: sitofp_load_4i64_to_4f64: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: movdqa 16(%rdi), %xmm2 +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: cvtsi2sdq %rax, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: cvtsi2sdq %rax, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: cvtsi2sdq %rax, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: xorps %xmm2, %xmm2 +; SSE-NEXT: cvtsi2sdq %rax, %xmm2 +; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: retq +; +; AVX1-LABEL: sitofp_load_4i64_to_4f64: +; AVX1: # BB#0: +; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm0 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: sitofp_load_4i64_to_4f64: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1 +; AVX2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm0 +; AVX2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %ld = load <4 x i64>, <4 x i64> *%a + %cvt = sitofp <4 x i64> %ld to <4 x double> + ret <4 x double> %cvt +} + +define <4 x double> @sitofp_load_4i32_to_4f64(<4 x i32> *%a) { +; SSE-LABEL: sitofp_load_4i32_to_4f64: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: cvtdq2pd %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_4i32_to_4f64: +; AVX: # BB#0: +; AVX-NEXT: vcvtdq2pd (%rdi), %ymm0 +; AVX-NEXT: retq + %ld = load <4 x i32>, <4 x i32> *%a + %cvt = sitofp <4 x i32> %ld to <4 x double> + ret <4 x double> %cvt +} + +define <4 x double> @sitofp_load_4i16_to_4f64(<4 x i16> *%a) { +; SSE-LABEL: sitofp_load_4i16_to_4f64: +; SSE: # BB#0: +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE-NEXT: psrad $16, %xmm1 +; SSE-NEXT: cvtdq2pd %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_4i16_to_4f64: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxwd (%rdi), %xmm0 +; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX-NEXT: retq + %ld = load <4 x i16>, <4 x i16> *%a + %cvt = sitofp <4 x i16> %ld to <4 x double> + ret <4 x double> %cvt +} + +define <4 x double> @sitofp_load_4i8_to_4f64(<4 x i8> *%a) { +; SSE-LABEL: sitofp_load_4i8_to_4f64: +; SSE: # BB#0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE-NEXT: psrad $24, %xmm1 +; SSE-NEXT: cvtdq2pd %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_4i8_to_4f64: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxbd (%rdi), %xmm0 +; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX-NEXT: retq + %ld = load <4 x i8>, <4 x i8> *%a + %cvt = sitofp <4 x i8> %ld to <4 x double> + ret <4 x double> %cvt +} + +; +; Load Unsigned Integer to Double +; + +define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) { +; SSE-LABEL: uitofp_load_2i64_to_2f64: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE-NEXT: movapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] +; SSE-NEXT: subpd %xmm4, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE-NEXT: addpd %xmm1, %xmm0 +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE-NEXT: subpd %xmm4, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1] +; SSE-NEXT: addpd %xmm3, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: retq +; +; AVX-LABEL: uitofp_load_2i64_to_2f64: +; AVX: # BB#0: +; AVX-NEXT: vmovdqa (%rdi), %xmm0 +; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] +; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] +; AVX-NEXT: vsubpd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vhaddpd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vsubpd %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0] +; AVX-NEXT: retq + %ld = load <2 x i64>, <2 x i64> *%a + %cvt = uitofp <2 x i64> %ld to <2 x double> + ret <2 x double> %cvt +} + +define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) { +; SSE-LABEL: uitofp_load_2i32_to_2f64: +; SSE: # BB#0: +; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: pxor %xmm0, %xmm0 +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE-NEXT: movapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] +; SSE-NEXT: subpd %xmm4, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE-NEXT: addpd %xmm1, %xmm0 +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE-NEXT: subpd %xmm4, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1] +; SSE-NEXT: addpd %xmm3, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: retq +; +; AVX-LABEL: uitofp_load_2i32_to_2f64: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] +; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] +; AVX-NEXT: vsubpd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vhaddpd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vsubpd %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0] +; AVX-NEXT: retq + %ld = load <2 x i32>, <2 x i32> *%a + %cvt = uitofp <2 x i32> %ld to <2 x double> + ret <2 x double> %cvt +} + +define <2 x double> @uitofp_load_2i16_to_2f64(<2 x i16> *%a) { +; SSE-LABEL: uitofp_load_2i16_to_2f64: +; SSE: # BB#0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE-NEXT: cvtdq2pd %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: uitofp_load_2i16_to_2f64: +; AVX: # BB#0: +; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load <2 x i16>, <2 x i16> *%a + %cvt = uitofp <2 x i16> %ld to <2 x double> + ret <2 x double> %cvt +} + +define <2 x double> @uitofp_load_2i8_to_2f64(<2 x i8> *%a) { +; SSE-LABEL: uitofp_load_2i8_to_2f64: +; SSE: # BB#0: +; SSE-NEXT: movzwl (%rdi), %eax +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE-NEXT: cvtdq2pd %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: uitofp_load_2i8_to_2f64: +; AVX: # BB#0: +; AVX-NEXT: movzwl (%rdi), %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load <2 x i8>, <2 x i8> *%a + %cvt = uitofp <2 x i8> %ld to <2 x double> + ret <2 x double> %cvt +} + +define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) { +; SSE-LABEL: uitofp_load_4i64_to_4f64: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: movdqa 16(%rdi), %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1127219200,1160773632,0,0] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE-NEXT: movapd {{.*#+}} xmm5 = [4.503600e+15,1.934281e+25] +; SSE-NEXT: subpd %xmm5, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE-NEXT: addpd %xmm1, %xmm0 +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE-NEXT: subpd %xmm5, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,0,1] +; SSE-NEXT: addpd %xmm4, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: subpd %xmm5, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE-NEXT: addpd %xmm2, %xmm1 +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE-NEXT: subpd %xmm5, %xmm4 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,0,1] +; SSE-NEXT: addpd %xmm4, %xmm2 +; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: retq +; +; AVX1-LABEL: uitofp_load_4i64_to_4f64: +; AVX1: # BB#0: +; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] +; AVX1-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX1-NEXT: vmovapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] +; AVX1-NEXT: vsubpd %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX1-NEXT: vsubpd %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vhaddpd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm3[0],xmm1[0] +; AVX1-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX1-NEXT: vsubpd %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX1-NEXT: vsubpd %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_load_4i64_to_4f64: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX2-NEXT: vmovapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] +; AVX2-NEXT: vsubpd %xmm4, %xmm3, %xmm3 +; AVX2-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX2-NEXT: vsubpd %xmm4, %xmm1, %xmm1 +; AVX2-NEXT: vhaddpd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm3[0],xmm1[0] +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX2-NEXT: vsubpd %xmm4, %xmm3, %xmm3 +; AVX2-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX2-NEXT: vsubpd %xmm4, %xmm0, %xmm0 +; AVX2-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0] +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %ld = load <4 x i64>, <4 x i64> *%a + %cvt = uitofp <4 x i64> %ld to <4 x double> + ret <4 x double> %cvt +} + +define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) { +; SSE-LABEL: uitofp_load_4i32_to_4f64: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm2 +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: movdqa %xmm2, %xmm3 +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [1127219200,1160773632,0,0] +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE-NEXT: movapd {{.*#+}} xmm6 = [4.503600e+15,1.934281e+25] +; SSE-NEXT: subpd %xmm6, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] +; SSE-NEXT: addpd %xmm3, %xmm0 +; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; SSE-NEXT: subpd %xmm6, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[2,3,0,1] +; SSE-NEXT: addpd %xmm5, %xmm3 +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE-NEXT: subpd %xmm6, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE-NEXT: addpd %xmm2, %xmm1 +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE-NEXT: subpd %xmm6, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] +; SSE-NEXT: addpd %xmm3, %xmm2 +; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: retq +; +; AVX1-LABEL: uitofp_load_4i32_to_4f64: +; AVX1: # BB#0: +; AVX1-NEXT: vmovdqa (%rdi), %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1 +; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX1-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_load_4i32_to_4f64: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa (%rdi), %xmm0 +; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1 +; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1 +; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm2 +; AVX2-NEXT: vmulpd %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: retq + %ld = load <4 x i32>, <4 x i32> *%a + %cvt = uitofp <4 x i32> %ld to <4 x double> + ret <4 x double> %cvt +} + +define <4 x double> @uitofp_load_4i16_to_4f64(<4 x i16> *%a) { +; SSE-LABEL: uitofp_load_4i16_to_4f64: +; SSE: # BB#0: +; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: pxor %xmm0, %xmm0 +; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE-NEXT: cvtdq2pd %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: uitofp_load_4i16_to_4f64: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX-NEXT: retq + %ld = load <4 x i16>, <4 x i16> *%a + %cvt = uitofp <4 x i16> %ld to <4 x double> + ret <4 x double> %cvt +} + +define <4 x double> @uitofp_load_4i8_to_4f64(<4 x i8> *%a) { +; SSE-LABEL: uitofp_load_4i8_to_4f64: +; SSE: # BB#0: +; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: pxor %xmm0, %xmm0 +; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE-NEXT: cvtdq2pd %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: uitofp_load_4i8_to_4f64: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX-NEXT: retq + %ld = load <4 x i8>, <4 x i8> *%a + %cvt = uitofp <4 x i8> %ld to <4 x double> + ret <4 x double> %cvt +} + +; +; Load Signed Integer to Float +; + +define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) { +; SSE-LABEL: sitofp_load_4i64_to_4f32: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: movdqa 16(%rdi), %xmm2 +; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: cvtsi2ssq %rax, %xmm3 +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: cvtsi2ssq %rax, %xmm0 +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: xorps %xmm2, %xmm2 +; SSE-NEXT: cvtsi2ssq %rax, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: cvtsi2ssq %rax, %xmm1 +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE-NEXT: retq +; +; AVX1-LABEL: sitofp_load_4i64_to_4f32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovdqa (%rdi), %ymm0 +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: sitofp_load_4i64_to_4f32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %ld = load <4 x i64>, <4 x i64> *%a + %cvt = sitofp <4 x i64> %ld to <4 x float> + ret <4 x float> %cvt +} + +define <4 x float> @sitofp_load_4i32_to_4f32(<4 x i32> *%a) { +; SSE-LABEL: sitofp_load_4i32_to_4f32: +; SSE: # BB#0: +; SSE-NEXT: cvtdq2ps (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_4i32_to_4f32: +; AVX: # BB#0: +; AVX-NEXT: vcvtdq2ps (%rdi), %xmm0 +; AVX-NEXT: retq + %ld = load <4 x i32>, <4 x i32> *%a + %cvt = sitofp <4 x i32> %ld to <4 x float> + ret <4 x float> %cvt +} + +define <4 x float> @sitofp_load_4i16_to_4f32(<4 x i16> *%a) { +; SSE-LABEL: sitofp_load_4i16_to_4f32: +; SSE: # BB#0: +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE-NEXT: psrad $16, %xmm0 +; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_4i16_to_4f32: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxwd (%rdi), %xmm0 +; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load <4 x i16>, <4 x i16> *%a + %cvt = sitofp <4 x i16> %ld to <4 x float> + ret <4 x float> %cvt +} + +define <4 x float> @sitofp_load_4i8_to_4f32(<4 x i8> *%a) { +; SSE-LABEL: sitofp_load_4i8_to_4f32: +; SSE: # BB#0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE-NEXT: psrad $24, %xmm0 +; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_4i8_to_4f32: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxbd (%rdi), %xmm0 +; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load <4 x i8>, <4 x i8> *%a + %cvt = sitofp <4 x i8> %ld to <4 x float> + ret <4 x float> %cvt +} + +define <8 x float> @sitofp_load_8i64_to_8f32(<8 x i64> *%a) { +; SSE-LABEL: sitofp_load_8i64_to_8f32: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: movdqa 16(%rdi), %xmm2 +; SSE-NEXT: movdqa 32(%rdi), %xmm3 +; SSE-NEXT: movdqa 48(%rdi), %xmm4 +; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: cvtsi2ssq %rax, %xmm5 +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: cvtsi2ssq %rax, %xmm0 +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: xorps %xmm2, %xmm2 +; SSE-NEXT: cvtsi2ssq %rax, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: cvtsi2ssq %rax, %xmm1 +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE-NEXT: movd %xmm4, %rax +; SSE-NEXT: xorps %xmm2, %xmm2 +; SSE-NEXT: cvtsi2ssq %rax, %xmm2 +; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: cvtsi2ssq %rax, %xmm1 +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,0,1] +; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: xorps %xmm2, %xmm2 +; SSE-NEXT: cvtsi2ssq %rax, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: xorps %xmm3, %xmm3 +; SSE-NEXT: cvtsi2ssq %rax, %xmm3 +; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE-NEXT: retq +; +; AVX1-LABEL: sitofp_load_8i64_to_8f32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovdqa (%rdi), %ymm0 +; AVX1-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: sitofp_load_8i64_to_8f32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq + %ld = load <8 x i64>, <8 x i64> *%a + %cvt = sitofp <8 x i64> %ld to <8 x float> + ret <8 x float> %cvt +} + +define <8 x float> @sitofp_load_8i32_to_8f32(<8 x i32> *%a) { +; SSE-LABEL: sitofp_load_8i32_to_8f32: +; SSE: # BB#0: +; SSE-NEXT: cvtdq2ps (%rdi), %xmm0 +; SSE-NEXT: cvtdq2ps 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: sitofp_load_8i32_to_8f32: +; AVX: # BB#0: +; AVX-NEXT: vcvtdq2ps (%rdi), %ymm0 +; AVX-NEXT: retq + %ld = load <8 x i32>, <8 x i32> *%a + %cvt = sitofp <8 x i32> %ld to <8 x float> + ret <8 x float> %cvt +} + +define <8 x float> @sitofp_load_8i16_to_8f32(<8 x i16> *%a) { +; SSE-LABEL: sitofp_load_8i16_to_8f32: +; SSE: # BB#0: +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE-NEXT: psrad $16, %xmm0 +; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; SSE-NEXT: psrad $16, %xmm1 +; SSE-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: sitofp_load_8i16_to_8f32: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0 +; AVX1-NEXT: vpmovsxwd 8(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: sitofp_load_8i16_to_8f32: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovsxwd (%rdi), %ymm0 +; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX2-NEXT: retq + %ld = load <8 x i16>, <8 x i16> *%a + %cvt = sitofp <8 x i16> %ld to <8 x float> + ret <8 x float> %cvt +} + +define <8 x float> @sitofp_load_8i8_to_8f32(<8 x i8> *%a) { +; SSE-LABEL: sitofp_load_8i8_to_8f32: +; SSE: # BB#0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE-NEXT: psrad $24, %xmm0 +; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; SSE-NEXT: psrad $24, %xmm1 +; SSE-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: sitofp_load_8i8_to_8f32: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0 +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: sitofp_load_8i8_to_8f32: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovsxbd (%rdi), %ymm0 +; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX2-NEXT: retq + %ld = load <8 x i8>, <8 x i8> *%a + %cvt = sitofp <8 x i8> %ld to <8 x float> + ret <8 x float> %cvt +} + +; +; Load Unsigned Integer to Float +; + +define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) { +; SSE-LABEL: uitofp_load_4i64_to_4f32: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: movdqa 16(%rdi), %xmm3 +; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB74_1 +; SSE-NEXT: # BB#2: +; SSE-NEXT: cvtsi2ssq %rax, %xmm2 +; SSE-NEXT: jmp .LBB74_3 +; SSE-NEXT: .LBB74_1: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: cvtsi2ssq %rcx, %xmm2 +; SSE-NEXT: addss %xmm2, %xmm2 +; SSE-NEXT: .LBB74_3: +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB74_4 +; SSE-NEXT: # BB#5: +; SSE-NEXT: cvtsi2ssq %rax, %xmm0 +; SSE-NEXT: jmp .LBB74_6 +; SSE-NEXT: .LBB74_4: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: cvtsi2ssq %rcx, %xmm0 +; SSE-NEXT: addss %xmm0, %xmm0 +; SSE-NEXT: .LBB74_6: +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB74_7 +; SSE-NEXT: # BB#8: +; SSE-NEXT: xorps %xmm3, %xmm3 +; SSE-NEXT: cvtsi2ssq %rax, %xmm3 +; SSE-NEXT: jmp .LBB74_9 +; SSE-NEXT: .LBB74_7: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: xorps %xmm3, %xmm3 +; SSE-NEXT: cvtsi2ssq %rcx, %xmm3 +; SSE-NEXT: addss %xmm3, %xmm3 +; SSE-NEXT: .LBB74_9: +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB74_10 +; SSE-NEXT: # BB#11: +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: cvtsi2ssq %rax, %xmm1 +; SSE-NEXT: jmp .LBB74_12 +; SSE-NEXT: .LBB74_10: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: cvtsi2ssq %rcx, %xmm1 +; SSE-NEXT: addss %xmm1, %xmm1 +; SSE-NEXT: .LBB74_12: +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE-NEXT: retq +; +; AVX1-LABEL: uitofp_load_4i64_to_4f32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovdqa (%rdi), %ymm0 +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB74_1 +; AVX1-NEXT: # BB#2: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX1-NEXT: jmp .LBB74_3 +; AVX1-NEXT: .LBB74_1: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm1 +; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: .LBB74_3: +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB74_4 +; AVX1-NEXT: # BB#5: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: jmp .LBB74_6 +; AVX1-NEXT: .LBB74_4: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: .LBB74_6: +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB74_7 +; AVX1-NEXT: # BB#8: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: jmp .LBB74_9 +; AVX1-NEXT: .LBB74_7: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: .LBB74_9: +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB74_10 +; AVX1-NEXT: # BB#11: +; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; AVX1-NEXT: .LBB74_10: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm0 +; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_load_4i64_to_4f32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB74_1 +; AVX2-NEXT: # BB#2: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX2-NEXT: jmp .LBB74_3 +; AVX2-NEXT: .LBB74_1: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm1 +; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: .LBB74_3: +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB74_4 +; AVX2-NEXT: # BB#5: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: jmp .LBB74_6 +; AVX2-NEXT: .LBB74_4: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: .LBB74_6: +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB74_7 +; AVX2-NEXT: # BB#8: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: jmp .LBB74_9 +; AVX2-NEXT: .LBB74_7: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: .LBB74_9: +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB74_10 +; AVX2-NEXT: # BB#11: +; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; AVX2-NEXT: .LBB74_10: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm0 +; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %ld = load <4 x i64>, <4 x i64> *%a + %cvt = uitofp <4 x i64> %ld to <4 x float> + ret <4 x float> %cvt +} + +define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) { +; SSE-LABEL: uitofp_load_4i32_to_4f32: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] +; SSE-NEXT: pand %xmm0, %xmm1 +; SSE-NEXT: por {{.*}}(%rip), %xmm1 +; SSE-NEXT: psrld $16, %xmm0 +; SSE-NEXT: por {{.*}}(%rip), %xmm0 +; SSE-NEXT: addps {{.*}}(%rip), %xmm0 +; SSE-NEXT: addps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX1-LABEL: uitofp_load_4i32_to_4f32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovdqa (%rdi), %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] +; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] +; AVX1-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_load_4i32_to_4f32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa (%rdi), %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] +; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 +; AVX2-NEXT: vaddps %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: retq + %ld = load <4 x i32>, <4 x i32> *%a + %cvt = uitofp <4 x i32> %ld to <4 x float> + ret <4 x float> %cvt +} + +define <4 x float> @uitofp_load_4i16_to_4f32(<4 x i16> *%a) { +; SSE-LABEL: uitofp_load_4i16_to_4f32: +; SSE: # BB#0: +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: uitofp_load_4i16_to_4f32: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load <4 x i16>, <4 x i16> *%a + %cvt = uitofp <4 x i16> %ld to <4 x float> + ret <4 x float> %cvt +} + +define <4 x float> @uitofp_load_4i8_to_4f32(<4 x i8> *%a) { +; SSE-LABEL: uitofp_load_4i8_to_4f32: +; SSE: # BB#0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: uitofp_load_4i8_to_4f32: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load <4 x i8>, <4 x i8> *%a + %cvt = uitofp <4 x i8> %ld to <4 x float> + ret <4 x float> %cvt +} + +define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) { +; SSE-LABEL: uitofp_load_8i64_to_8f32: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: movdqa 16(%rdi), %xmm5 +; SSE-NEXT: movdqa 32(%rdi), %xmm2 +; SSE-NEXT: movdqa 48(%rdi), %xmm3 +; SSE-NEXT: movd %xmm5, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB78_1 +; SSE-NEXT: # BB#2: +; SSE-NEXT: cvtsi2ssq %rax, %xmm4 +; SSE-NEXT: jmp .LBB78_3 +; SSE-NEXT: .LBB78_1: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: cvtsi2ssq %rcx, %xmm4 +; SSE-NEXT: addss %xmm4, %xmm4 +; SSE-NEXT: .LBB78_3: +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB78_4 +; SSE-NEXT: # BB#5: +; SSE-NEXT: cvtsi2ssq %rax, %xmm0 +; SSE-NEXT: jmp .LBB78_6 +; SSE-NEXT: .LBB78_4: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: cvtsi2ssq %rcx, %xmm0 +; SSE-NEXT: addss %xmm0, %xmm0 +; SSE-NEXT: .LBB78_6: +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1] +; SSE-NEXT: movd %xmm5, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB78_7 +; SSE-NEXT: # BB#8: +; SSE-NEXT: cvtsi2ssq %rax, %xmm6 +; SSE-NEXT: jmp .LBB78_9 +; SSE-NEXT: .LBB78_7: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: cvtsi2ssq %rcx, %xmm6 +; SSE-NEXT: addss %xmm6, %xmm6 +; SSE-NEXT: .LBB78_9: +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE-NEXT: movd %xmm1, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB78_10 +; SSE-NEXT: # BB#11: +; SSE-NEXT: xorps %xmm5, %xmm5 +; SSE-NEXT: cvtsi2ssq %rax, %xmm5 +; SSE-NEXT: jmp .LBB78_12 +; SSE-NEXT: .LBB78_10: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: cvtsi2ssq %rcx, %xmm5 +; SSE-NEXT: addss %xmm5, %xmm5 +; SSE-NEXT: .LBB78_12: +; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB78_13 +; SSE-NEXT: # BB#14: +; SSE-NEXT: cvtsi2ssq %rax, %xmm7 +; SSE-NEXT: jmp .LBB78_15 +; SSE-NEXT: .LBB78_13: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: cvtsi2ssq %rcx, %xmm7 +; SSE-NEXT: addss %xmm7, %xmm7 +; SSE-NEXT: .LBB78_15: +; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB78_16 +; SSE-NEXT: # BB#17: +; SSE-NEXT: cvtsi2ssq %rax, %xmm1 +; SSE-NEXT: jmp .LBB78_18 +; SSE-NEXT: .LBB78_16: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: cvtsi2ssq %rcx, %xmm1 +; SSE-NEXT: addss %xmm1, %xmm1 +; SSE-NEXT: .LBB78_18: +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE-NEXT: movd %xmm3, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB78_19 +; SSE-NEXT: # BB#20: +; SSE-NEXT: xorps %xmm3, %xmm3 +; SSE-NEXT: cvtsi2ssq %rax, %xmm3 +; SSE-NEXT: jmp .LBB78_21 +; SSE-NEXT: .LBB78_19: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: xorps %xmm3, %xmm3 +; SSE-NEXT: cvtsi2ssq %rcx, %xmm3 +; SSE-NEXT: addss %xmm3, %xmm3 +; SSE-NEXT: .LBB78_21: +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE-NEXT: movd %xmm2, %rax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: andl $1, %ecx +; SSE-NEXT: testq %rax, %rax +; SSE-NEXT: js .LBB78_22 +; SSE-NEXT: # BB#23: +; SSE-NEXT: xorps %xmm2, %xmm2 +; SSE-NEXT: cvtsi2ssq %rax, %xmm2 +; SSE-NEXT: jmp .LBB78_24 +; SSE-NEXT: .LBB78_22: +; SSE-NEXT: shrq %rax +; SSE-NEXT: orq %rax, %rcx +; SSE-NEXT: xorps %xmm2, %xmm2 +; SSE-NEXT: cvtsi2ssq %rcx, %xmm2 +; SSE-NEXT: addss %xmm2, %xmm2 +; SSE-NEXT: .LBB78_24: +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE-NEXT: retq +; +; AVX1-LABEL: uitofp_load_8i64_to_8f32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovdqa (%rdi), %ymm0 +; AVX1-NEXT: vmovdqa 32(%rdi), %ymm2 +; AVX1-NEXT: vpextrq $1, %xmm2, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB78_1 +; AVX1-NEXT: # BB#2: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX1-NEXT: jmp .LBB78_3 +; AVX1-NEXT: .LBB78_1: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm1 +; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: .LBB78_3: +; AVX1-NEXT: vmovq %xmm2, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB78_4 +; AVX1-NEXT: # BB#5: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX1-NEXT: jmp .LBB78_6 +; AVX1-NEXT: .LBB78_4: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm3 +; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: .LBB78_6: +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2 +; AVX1-NEXT: vmovq %xmm2, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB78_7 +; AVX1-NEXT: # BB#8: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm4 +; AVX1-NEXT: jmp .LBB78_9 +; AVX1-NEXT: .LBB78_7: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm4 +; AVX1-NEXT: vaddss %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: .LBB78_9: +; AVX1-NEXT: vpextrq $1, %xmm2, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB78_10 +; AVX1-NEXT: # BB#11: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: jmp .LBB78_12 +; AVX1-NEXT: .LBB78_10: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: .LBB78_12: +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB78_13 +; AVX1-NEXT: # BB#14: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm5 +; AVX1-NEXT: jmp .LBB78_15 +; AVX1-NEXT: .LBB78_13: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm5 +; AVX1-NEXT: vaddss %xmm5, %xmm5, %xmm5 +; AVX1-NEXT: .LBB78_15: +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3] +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB78_16 +; AVX1-NEXT: # BB#17: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX1-NEXT: jmp .LBB78_18 +; AVX1-NEXT: .LBB78_16: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm3 +; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: .LBB78_18: +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vmovq %xmm4, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB78_19 +; AVX1-NEXT: # BB#20: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm5 +; AVX1-NEXT: jmp .LBB78_21 +; AVX1-NEXT: .LBB78_19: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm0 +; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm5 +; AVX1-NEXT: .LBB78_21: +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0] +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1],xmm5[0],xmm3[3] +; AVX1-NEXT: vpextrq $1, %xmm4, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB78_22 +; AVX1-NEXT: # BB#23: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: jmp .LBB78_24 +; AVX1-NEXT: .LBB78_22: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: .LBB78_24: +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_load_8i64_to_8f32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB78_1 +; AVX2-NEXT: # BB#2: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX2-NEXT: jmp .LBB78_3 +; AVX2-NEXT: .LBB78_1: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm1 +; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: .LBB78_3: +; AVX2-NEXT: vmovq %xmm2, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB78_4 +; AVX2-NEXT: # BB#5: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX2-NEXT: jmp .LBB78_6 +; AVX2-NEXT: .LBB78_4: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm3 +; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: .LBB78_6: +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 +; AVX2-NEXT: vmovq %xmm2, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB78_7 +; AVX2-NEXT: # BB#8: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm4 +; AVX2-NEXT: jmp .LBB78_9 +; AVX2-NEXT: .LBB78_7: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm4 +; AVX2-NEXT: vaddss %xmm4, %xmm4, %xmm4 +; AVX2-NEXT: .LBB78_9: +; AVX2-NEXT: vpextrq $1, %xmm2, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB78_10 +; AVX2-NEXT: # BB#11: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: jmp .LBB78_12 +; AVX2-NEXT: .LBB78_10: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: .LBB78_12: +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB78_13 +; AVX2-NEXT: # BB#14: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm5 +; AVX2-NEXT: jmp .LBB78_15 +; AVX2-NEXT: .LBB78_13: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm5 +; AVX2-NEXT: vaddss %xmm5, %xmm5, %xmm5 +; AVX2-NEXT: .LBB78_15: +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3] +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB78_16 +; AVX2-NEXT: # BB#17: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm3 +; AVX2-NEXT: jmp .LBB78_18 +; AVX2-NEXT: .LBB78_16: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm3 +; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: .LBB78_18: +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3] +; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4 +; AVX2-NEXT: vmovq %xmm4, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB78_19 +; AVX2-NEXT: # BB#20: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm5 +; AVX2-NEXT: jmp .LBB78_21 +; AVX2-NEXT: .LBB78_19: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm0 +; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm5 +; AVX2-NEXT: .LBB78_21: +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0] +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1],xmm5[0],xmm3[3] +; AVX2-NEXT: vpextrq $1, %xmm4, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB78_22 +; AVX2-NEXT: # BB#23: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: jmp .LBB78_24 +; AVX2-NEXT: .LBB78_22: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: .LBB78_24: +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] +; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: retq + %ld = load <8 x i64>, <8 x i64> *%a + %cvt = uitofp <8 x i64> %ld to <8 x float> + ret <8 x float> %cvt +} + +define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) { +; SSE-LABEL: uitofp_load_8i32_to_8f32: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm0 +; SSE-NEXT: movdqa 16(%rdi), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535] +; SSE-NEXT: movdqa %xmm0, %xmm3 +; SSE-NEXT: pand %xmm2, %xmm3 +; SSE-NEXT: movdqa {{.*#+}} xmm4 = [1258291200,1258291200,1258291200,1258291200] +; SSE-NEXT: por %xmm4, %xmm3 +; SSE-NEXT: psrld $16, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [1392508928,1392508928,1392508928,1392508928] +; SSE-NEXT: por %xmm5, %xmm0 +; SSE-NEXT: movaps {{.*#+}} xmm6 = [-5.497642e+11,-5.497642e+11,-5.497642e+11,-5.497642e+11] +; SSE-NEXT: addps %xmm6, %xmm0 +; SSE-NEXT: addps %xmm3, %xmm0 +; SSE-NEXT: pand %xmm1, %xmm2 +; SSE-NEXT: por %xmm4, %xmm2 +; SSE-NEXT: psrld $16, %xmm1 +; SSE-NEXT: por %xmm5, %xmm1 +; SSE-NEXT: addps %xmm6, %xmm1 +; SSE-NEXT: addps %xmm2, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: uitofp_load_8i32_to_8f32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm1 +; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1 +; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_load_8i32_to_8f32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1 +; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] +; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2 +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15] +; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 +; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: retq + %ld = load <8 x i32>, <8 x i32> *%a + %cvt = uitofp <8 x i32> %ld to <8 x float> + ret <8 x float> %cvt +} + +define <8 x float> @uitofp_load_8i16_to_8f32(<8 x i16> *%a) { +; SSE-LABEL: uitofp_load_8i16_to_8f32: +; SSE: # BB#0: +; SSE-NEXT: movdqa (%rdi), %xmm1 +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: uitofp_load_8i16_to_8f32: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_load_8i16_to_8f32: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX2-NEXT: retq + %ld = load <8 x i16>, <8 x i16> *%a + %cvt = uitofp <8 x i16> %ld to <8 x float> + ret <8 x float> %cvt +} + +define <8 x float> @uitofp_load_8i8_to_8f32(<8 x i8> *%a) { +; SSE-LABEL: uitofp_load_8i8_to_8f32: +; SSE: # BB#0: +; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: uitofp_load_8i8_to_8f32: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_load_8i8_to_8f32: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX2-NEXT: retq + %ld = load <8 x i8>, <8 x i8> *%a + %cvt = uitofp <8 x i8> %ld to <8 x float> + ret <8 x float> %cvt +} + +; ; Aggregates ; |