summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/atomic-dagsched.ll
diff options
context:
space:
mode:
authorDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
committerDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
commit7c9c6ed761bf9d28c0c257a045b35781969136e0 (patch)
tree508cac951011b10e2817eacecc1fa640bbdba51e /test/CodeGen/X86/atomic-dagsched.ll
parentdc64962c8649964d13cc60b83c8c400d5ae7504a (diff)
[opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230794 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/atomic-dagsched.ll')
-rw-r--r--test/CodeGen/X86/atomic-dagsched.ll18
1 files changed, 9 insertions, 9 deletions
diff --git a/test/CodeGen/X86/atomic-dagsched.ll b/test/CodeGen/X86/atomic-dagsched.ll
index 750b73d4823..97bb1afa47a 100644
--- a/test/CodeGen/X86/atomic-dagsched.ll
+++ b/test/CodeGen/X86/atomic-dagsched.ll
@@ -2,10 +2,10 @@
define void @test(i8** %a, i64* %b, i64 %c, i64 %d) nounwind {
entry:
- %ptrtoarg4 = load i8** %a, align 8
+ %ptrtoarg4 = load i8*, i8** %a, align 8
%brglist1 = getelementptr i8*, i8** %a, i64 1
- %ptrtoarg25 = load i8** %brglist1, align 8
- %0 = load i64* %b, align 8
+ %ptrtoarg25 = load i8*, i8** %brglist1, align 8
+ %0 = load i64, i64* %b, align 8
%1 = mul i64 %0, 4
%scevgep = getelementptr i8, i8* %ptrtoarg25, i64 %1
%2 = mul i64 %d, 4
@@ -18,8 +18,8 @@ loop.cond: ; preds = %test.exit, %entry
br i1 %3, label %return, label %loop
loop: ; preds = %loop.cond
- %4 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
- %5 = load i64* %4, align 8
+ %4 = load i64*, i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
+ %5 = load i64, i64* %4, align 8
%vector.size.i = ashr i64 %5, 3
%num.vector.wi.i = shl i64 %vector.size.i, 3
%6 = icmp eq i64 %vector.size.i, 0
@@ -36,7 +36,7 @@ vector_kernel_entry.i: ; preds = %vector_kernel_entry
%asr.iv = phi i64 [ %asr.iv.next, %vector_kernel_entry.i ], [ %vector.size.i, %dim_0_vector_pre_head.i ]
%8 = addrspacecast i8* %ptrtoarg4 to i32 addrspace(1)*
%asr.iv911 = addrspacecast i8* %asr.iv9 to <8 x i32> addrspace(1)*
- %9 = load <8 x i32> addrspace(1)* %asr.iv911, align 4
+ %9 = load <8 x i32>, <8 x i32> addrspace(1)* %asr.iv911, align 4
%extract8vector_func.i = extractelement <8 x i32> %9, i32 0
%extract9vector_func.i = extractelement <8 x i32> %9, i32 1
%extract10vector_func.i = extractelement <8 x i32> %9, i32 2
@@ -65,8 +65,8 @@ scalarIf.i: ; preds = %vector_kernel_entry
br i1 %18, label %test.exit, label %dim_0_pre_head.i
dim_0_pre_head.i: ; preds = %scalarIf.i
- %19 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
- %20 = load i64* %19, align 8
+ %19 = load i64*, i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
+ %20 = load i64, i64* %19, align 8
%21 = trunc i64 %20 to i32
%22 = mul i64 %vector.size.i, 8
br label %scalar_kernel_entry.i
@@ -76,7 +76,7 @@ scalar_kernel_entry.i: ; preds = %scalar_kernel_entry
%23 = addrspacecast i8* %asr.iv6 to i32 addrspace(1)*
%24 = addrspacecast i8* %ptrtoarg4 to i32 addrspace(1)*
%scevgep16 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
- %25 = load i32 addrspace(1)* %scevgep16, align 4
+ %25 = load i32, i32 addrspace(1)* %scevgep16, align 4
%26 = atomicrmw min i32 addrspace(1)* %24, i32 %25 seq_cst
%scevgep15 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
store i32 %21, i32 addrspace(1)* %scevgep15, align 4