diff options
author | Sanjay Patel <spatel@rotateright.com> | 2015-08-19 21:27:27 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2015-08-19 21:27:27 +0000 |
commit | 3b7c3d3fe900139560a98a94e03934e5471b99a6 (patch) | |
tree | 7aa79b6e80111b7b660a456056930511bd90c69b | |
parent | d81980d640cb8fc9adaaa1e01d83db9eb164b149 (diff) |
[x86] enable machine combiner reassociations for scalar double-precision min/max
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@245506 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/Target/X86/X86InstrInfo.cpp | 4 | ||||
-rw-r--r-- | test/CodeGen/X86/machine-combiner.ll | 48 |
2 files changed, 52 insertions, 0 deletions
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index d9ee8b3c6d0..3732823f167 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -6394,9 +6394,13 @@ static bool isAssociativeAndCommutative(const MachineInstr &Inst) { // Normal min/max instructions are not commutative because of NaN and signed // zero semantics, but these are. Thus, there's no need to check for global // relaxed math; the instructions themselves have the properties we need. + case X86::MAXCSDrr: case X86::MAXCSSrr: + case X86::MINCSDrr: case X86::MINCSSrr: + case X86::VMAXCSDrr: case X86::VMAXCSSrr: + case X86::VMINCSDrr: case X86::VMINCSSrr: return true; case X86::ADDPDrr: diff --git a/test/CodeGen/X86/machine-combiner.ll b/test/CodeGen/X86/machine-combiner.ll index 2335cc7f1a8..e9300b4c6e9 100644 --- a/test/CodeGen/X86/machine-combiner.ll +++ b/test/CodeGen/X86/machine-combiner.ll @@ -406,3 +406,51 @@ define float @reassociate_maxs_single(float %x0, float %x1, float %x2, float %x3 ret float %sel2 } +; Verify that SSE and AVX scalar double-precision minimum ops are reassociated. + +define double @reassociate_mins_double(double %x0, double %x1, double %x2, double %x3) { +; SSE-LABEL: reassociate_mins_double: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: minsd %xmm3, %xmm2 +; SSE-NEXT: minsd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_mins_double: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vminsd %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fdiv double %x0, %x1 + %cmp1 = fcmp olt double %x2, %t0 + %sel1 = select i1 %cmp1, double %x2, double %t0 + %cmp2 = fcmp olt double %x3, %sel1 + %sel2 = select i1 %cmp2, double %x3, double %sel1 + ret double %sel2 +} + +; Verify that SSE and AVX scalar double-precision maximum ops are reassociated. + +define double @reassociate_maxs_double(double %x0, double %x1, double %x2, double %x3) { +; SSE-LABEL: reassociate_maxs_double: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: maxsd %xmm3, %xmm2 +; SSE-NEXT: maxsd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_maxs_double: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmaxsd %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fdiv double %x0, %x1 + %cmp1 = fcmp ogt double %x2, %t0 + %sel1 = select i1 %cmp1, double %x2, double %t0 + %cmp2 = fcmp ogt double %x3, %sel1 + %sel2 = select i1 %cmp2, double %x3, double %sel1 + ret double %sel2 +} + |