summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2016-07-11 05:36:53 +0000
committerCraig Topper <craig.topper@gmail.com>2016-07-11 05:36:53 +0000
commitc7f78103d3b5581ee6decd5b5edebc949890e15c (patch)
tree405469dfdc0bcd47ad5acf3c264345b815857c1b
parentb6d6904481045bb31db0804692b413d78fc247b9 (diff)
[AVX512] Add support for 512-bit ANDN now that all ones build vectors survive long enough to allow the matching.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@275046 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp3
-rw-r--r--test/CodeGen/X86/avx512-logic.ll66
2 files changed, 68 insertions, 1 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 8cb528ec9e3..c2887706b6d 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -27882,6 +27882,7 @@ static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
if (VT != MVT::v2i64 && VT != MVT::v4i64 &&
+ VT != MVT::v8i64 && VT != MVT::v16i32 &&
VT != MVT::v4i32 && VT != MVT::v8i32) // Legal with VLX
return SDValue();
@@ -27897,7 +27898,7 @@ static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
N01 = peekThroughBitcasts(N01);
- // Either match a direct AllOnes for 128 and 256-bit vectors, or an
+ // Either match a direct AllOnes for 128, 256, and 512-bit vectors, or an
// insert_subvector building a 256-bit AllOnes vector.
if (!ISD::isBuildVectorAllOnes(N01.getNode())) {
if (!VT.is256BitVector() || N01->getOpcode() != ISD::INSERT_SUBVECTOR)
diff --git a/test/CodeGen/X86/avx512-logic.ll b/test/CodeGen/X86/avx512-logic.ll
index c973b706e8f..d085467868a 100644
--- a/test/CodeGen/X86/avx512-logic.ll
+++ b/test/CodeGen/X86/avx512-logic.ll
@@ -17,6 +17,22 @@ entry:
ret <16 x i32> %x
}
+define <16 x i32> @vpandnd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
+; ALL-LABEL: vpandnd:
+; ALL: ## BB#0: ## %entry
+; ALL-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; ALL-NEXT: vpandnd %zmm0, %zmm1, %zmm0
+; ALL-NEXT: retq
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
+ i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %b2 = xor <16 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1,
+ i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %x = and <16 x i32> %a2, %b2
+ ret <16 x i32> %x
+}
+
define <16 x i32> @vpord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vpord:
; ALL: ## BB#0: ## %entry
@@ -58,6 +74,20 @@ entry:
ret <8 x i64> %x
}
+define <8 x i64> @vpandnq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
+; ALL-LABEL: vpandnq:
+; ALL: ## BB#0: ## %entry
+; ALL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; ALL-NEXT: vpandnq %zmm0, %zmm1, %zmm0
+; ALL-NEXT: retq
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %b2 = xor <8 x i64> %b, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+ %x = and <8 x i64> %a2, %b2
+ ret <8 x i64> %x
+}
+
define <8 x i64> @vporq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
; ALL-LABEL: vporq:
; ALL: ## BB#0: ## %entry
@@ -133,6 +163,25 @@ define <64 x i8> @and_v64i8(<64 x i8> %a, <64 x i8> %b) {
ret <64 x i8> %res
}
+define <64 x i8> @andn_v64i8(<64 x i8> %a, <64 x i8> %b) {
+; KNL-LABEL: andn_v64i8:
+; KNL: ## BB#0:
+; KNL-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; KNL-NEXT: vandnps %ymm1, %ymm3, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: andn_v64i8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpandnq %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %b2 = xor <64 x i8> %b, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %res = and <64 x i8> %a, %b2
+ ret <64 x i8> %res
+}
+
define <64 x i8> @or_v64i8(<64 x i8> %a, <64 x i8> %b) {
; KNL-LABEL: or_v64i8:
; KNL: ## BB#0:
@@ -178,6 +227,23 @@ define <32 x i16> @and_v32i16(<32 x i16> %a, <32 x i16> %b) {
ret <32 x i16> %res
}
+define <32 x i16> @andn_v32i16(<32 x i16> %a, <32 x i16> %b) {
+; KNL-LABEL: andn_v32i16:
+; KNL: ## BB#0:
+; KNL-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; KNL-NEXT: vandnps %ymm1, %ymm3, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: andn_v32i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpandnq %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %b2 = xor <32 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %res = and <32 x i16> %a, %b2
+ ret <32 x i16> %res
+}
+
define <32 x i16> @or_v32i16(<32 x i16> %a, <32 x i16> %b) {
; KNL-LABEL: or_v32i16:
; KNL: ## BB#0: