summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJingyue Wu <jingyue@google.com>2015-04-23 20:00:04 +0000
committerJingyue Wu <jingyue@google.com>2015-04-23 20:00:04 +0000
commit12f341611ad74da19a6d2e00b78c4ace51b4f339 (patch)
tree032f71f670842ec2d89b0a8890af897b00b2f9ff
parente32631cecd2d669867ec0ff91181891dea2e8fc1 (diff)
[NVPTX] run SeparateConstOffsetFromGEP before SLSR
Summary: We pick this order because SeparateConstOffsetFromGEP may create more opportunities for SLSR. Test Plan: reassociate-geps-and-slsr.ll no performance regression on internal benchmarks Reviewers: meheff Subscribers: llvm-commits, jholewinski Differential Revision: http://reviews.llvm.org/D9230 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@235632 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.cpp10
-rw-r--r--test/Transforms/StraightLineStrengthReduce/NVPTX/lit.local.cfg2
-rw-r--r--test/Transforms/StraightLineStrengthReduce/NVPTX/reassociate-geps-and-slsr.ll74
3 files changed, 82 insertions, 4 deletions
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index f9545cc1d4..dc8e8a2952 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -168,11 +168,13 @@ void NVPTXPassConfig::addIRPasses() {
// them unused. We could remove dead code in an ad-hoc manner, but that
// requires manual work and might be error-prone.
addPass(createDeadCodeEliminationPass());
- addPass(createStraightLineStrengthReducePass());
addPass(createSeparateConstOffsetFromGEPPass());
- // The SeparateConstOffsetFromGEP pass creates variadic bases that can be used
- // by multiple GEPs. Run GVN or EarlyCSE to really reuse them. GVN generates
- // significantly better code than EarlyCSE for some of our benchmarks.
+ // ReassociateGEPs exposes more opportunites for SLSR. See
+ // the example in reassociate-geps-and-slsr.ll.
+ addPass(createStraightLineStrengthReducePass());
+ // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
+ // EarlyCSE can reuse. GVN generates significantly better code than EarlyCSE
+ // for some of our benchmarks.
if (getOptLevel() == CodeGenOpt::Aggressive)
addPass(createGVNPass());
else
diff --git a/test/Transforms/StraightLineStrengthReduce/NVPTX/lit.local.cfg b/test/Transforms/StraightLineStrengthReduce/NVPTX/lit.local.cfg
new file mode 100644
index 0000000000..2cb98eb371
--- /dev/null
+++ b/test/Transforms/StraightLineStrengthReduce/NVPTX/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'NVPTX' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/StraightLineStrengthReduce/NVPTX/reassociate-geps-and-slsr.ll b/test/Transforms/StraightLineStrengthReduce/NVPTX/reassociate-geps-and-slsr.ll
new file mode 100644
index 0000000000..03c0356eb5
--- /dev/null
+++ b/test/Transforms/StraightLineStrengthReduce/NVPTX/reassociate-geps-and-slsr.ll
@@ -0,0 +1,74 @@
+; RUN: opt < %s -separate-const-offset-from-gep -slsr -gvn -S | FileCheck %s
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s --check-prefix=PTX
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+; arr[i + 5]
+; arr[i * 2 + 5]
+; arr[i * 3 + 5]
+; arr[i * 4 + 5]
+;
+; => reassociate-geps
+;
+; *(&arr[i] + 5)
+; *(&arr[i * 2] + 5)
+; *(&arr[i * 3] + 5)
+; *(&arr[i * 4] + 5)
+;
+; => slsr
+;
+; p1 = &arr[i]
+; *(p1 + 5)
+; p2 = p1 + i
+; *(p2 + 5)
+; p3 = p2 + i
+; *(p3 + 5)
+; p4 = p3 + i
+; *(p4 + 5)
+define void @slsr_after_reassociate_geps(float* %arr, i32 %i) {
+; CHECK-LABEL: @slsr_after_reassociate_geps(
+; PTX-LABEL: .visible .func slsr_after_reassociate_geps(
+; PTX: ld.param.u64 [[arr:%rd[0-9]+]], [slsr_after_reassociate_geps_param_0];
+; PTX: ld.param.u32 [[i:%r[0-9]+]], [slsr_after_reassociate_geps_param_1];
+ %i2 = shl nsw i32 %i, 1
+ %i3 = mul nsw i32 %i, 3
+ %i4 = shl nsw i32 %i, 2
+
+ %j1 = add nsw i32 %i, 5
+ %p1 = getelementptr inbounds float, float* %arr, i32 %j1
+; CHECK: [[b1:%[0-9]+]] = getelementptr float, float* %arr, i64 [[bump:%[0-9]+]]
+; PTX: mul.wide.s32 [[i4:%rd[0-9]+]], [[i]], 4;
+; PTX: add.s64 [[base1:%rd[0-9]+]], [[arr]], [[i4]];
+ %v1 = load float, float* %p1, align 4
+; PTX: ld.f32 {{%f[0-9]+}}, {{\[}}[[base1]]+20];
+ call void @foo(float %v1)
+
+ %j2 = add nsw i32 %i2, 5
+ %p2 = getelementptr inbounds float, float* %arr, i32 %j2
+; CHECK: [[b2:%[0-9]+]] = getelementptr float, float* [[b1]], i64 [[bump]]
+; PTX: add.s64 [[base2:%rd[0-9]+]], [[base1]], [[i4]];
+ %v2 = load float, float* %p2, align 4
+; PTX: ld.f32 {{%f[0-9]+}}, {{\[}}[[base2]]+20];
+ call void @foo(float %v2)
+
+ %j3 = add nsw i32 %i3, 5
+ %p3 = getelementptr inbounds float, float* %arr, i32 %j3
+; CHECK: [[b3:%[0-9]+]] = getelementptr float, float* [[b2]], i64 [[bump]]
+; PTX: add.s64 [[base3:%rd[0-9]+]], [[base2]], [[i4]];
+ %v3 = load float, float* %p3, align 4
+; PTX: ld.f32 {{%f[0-9]+}}, {{\[}}[[base3]]+20];
+ call void @foo(float %v3)
+
+ %j4 = add nsw i32 %i4, 5
+ %p4 = getelementptr inbounds float, float* %arr, i32 %j4
+; CHECK: [[b4:%[0-9]+]] = getelementptr float, float* [[b3]], i64 [[bump]]
+; PTX: add.s64 [[base4:%rd[0-9]+]], [[base3]], [[i4]];
+ %v4 = load float, float* %p4, align 4
+; PTX: ld.f32 {{%f[0-9]+}}, {{\[}}[[base4]]+20];
+ call void @foo(float %v4)
+
+ ret void
+}
+
+declare void @foo(float)