summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAaron Watry <awatry@gmail.com>2013-06-27 14:20:27 -0500
committerAaron Watry <awatry@gmail.com>2013-06-27 14:20:27 -0500
commit7a9b834e6e5d4e4bd49dceddf420c8b9347ea926 (patch)
tree7f96693ecfab3236b439596e9e69cda2f7edc657
parent67b83e165a273bac0fe20549adfe0ebbb5b1defb (diff)
libclc: Fix vload/vstore with 64-bit pointers
This was broken previously on Radeon SI.
-rw-r--r--generic/lib/shared/vload_if.ll52
-rw-r--r--generic/lib/shared/vload_impl.ll45
-rw-r--r--generic/lib/shared/vstore_if.ll50
-rw-r--r--generic/lib/shared/vstore_impl.ll35
4 files changed, 86 insertions, 96 deletions
diff --git a/generic/lib/shared/vload_if.ll b/generic/lib/shared/vload_if.ll
index 2634d37..bda592b 100644
--- a/generic/lib/shared/vload_if.ll
+++ b/generic/lib/shared/vload_if.ll
@@ -1,60 +1,60 @@
;Start int global vload
-declare <2 x i32> @__clc_vload2_impl_i32__global(i32 %x, i32 %y)
-declare <3 x i32> @__clc_vload3_impl_i32__global(i32 %x, i32 %y)
-declare <4 x i32> @__clc_vload4_impl_i32__global(i32 %x, i32 %y)
-declare <8 x i32> @__clc_vload8_impl_i32__global(i32 %x, i32 %y)
-declare <16 x i32> @__clc_vload16_impl_i32__global(i32 %x, i32 %y)
-
-define <2 x i32> @__clc_vload2_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <2 x i32> @__clc_vload2_impl_i32__global(i32 %x, i32 %y)
+declare <2 x i32> @__clc_vload2_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
+declare <3 x i32> @__clc_vload3_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
+declare <4 x i32> @__clc_vload4_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
+declare <8 x i32> @__clc_vload8_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
+declare <16 x i32> @__clc_vload16_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
+
+define <2 x i32> @__clc_vload2_int__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <2 x i32> @__clc_vload2_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <2 x i32> %call
}
-define <3 x i32> @__clc_vload3_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <3 x i32> @__clc_vload3_impl_i32__global(i32 %x, i32 %y)
+define <3 x i32> @__clc_vload3_int__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <3 x i32> @__clc_vload3_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <3 x i32> %call
}
-define <4 x i32> @__clc_vload4_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <4 x i32> @__clc_vload4_impl_i32__global(i32 %x, i32 %y)
+define <4 x i32> @__clc_vload4_int__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <4 x i32> @__clc_vload4_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <4 x i32> %call
}
-define <8 x i32> @__clc_vload8_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <8 x i32> @__clc_vload8_impl_i32__global(i32 %x, i32 %y)
+define <8 x i32> @__clc_vload8_int__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <8 x i32> @__clc_vload8_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <8 x i32> %call
}
-define <16 x i32> @__clc_vload16_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <16 x i32> @__clc_vload16_impl_i32__global(i32 %x, i32 %y)
+define <16 x i32> @__clc_vload16_int__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <16 x i32> @__clc_vload16_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <16 x i32> %call
}
;Start uint global vload
-define <2 x i32> @__clc_vload2_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <2 x i32> @__clc_vload2_impl_i32__global(i32 %x, i32 %y)
+define <2 x i32> @__clc_vload2_uint__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <2 x i32> @__clc_vload2_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <2 x i32> %call
}
-define <3 x i32> @__clc_vload3_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <3 x i32> @__clc_vload3_impl_i32__global(i32 %x, i32 %y)
+define <3 x i32> @__clc_vload3_uint__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <3 x i32> @__clc_vload3_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <3 x i32> %call
}
-define <4 x i32> @__clc_vload4_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <4 x i32> @__clc_vload4_impl_i32__global(i32 %x, i32 %y)
+define <4 x i32> @__clc_vload4_uint__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <4 x i32> @__clc_vload4_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <4 x i32> %call
}
-define <8 x i32> @__clc_vload8_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <8 x i32> @__clc_vload8_impl_i32__global(i32 %x, i32 %y)
+define <8 x i32> @__clc_vload8_uint__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <8 x i32> @__clc_vload8_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <8 x i32> %call
}
-define <16 x i32> @__clc_vload16_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline {
- %call = call <16 x i32> @__clc_vload16_impl_i32__global(i32 %x, i32 %y)
+define <16 x i32> @__clc_vload16_uint__global(i32 %x, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %call = call <16 x i32> @__clc_vload16_impl_i32__global(i32 %x, i32 addrspace(1)* nocapture %addr)
ret <16 x i32> %call
}
diff --git a/generic/lib/shared/vload_impl.ll b/generic/lib/shared/vload_impl.ll
index ae719e0..1333aac 100644
--- a/generic/lib/shared/vload_impl.ll
+++ b/generic/lib/shared/vload_impl.ll
@@ -1,43 +1,38 @@
; This provides optimized implementations of vload4/8/16 for 32-bit int/uint
define <2 x i32> @__clc_vload2_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)*
- %4 = load <2 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <2 x i32> %4
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <2 x i32> addrspace(1)*
+ %3 = load <2 x i32> addrspace(1)* %2, align 4, !tbaa !3
+ ret <2 x i32> %3
}
define <3 x i32> @__clc_vload3_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)*
- %4 = load <3 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <3 x i32> %4
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <3 x i32> addrspace(1)*
+ %3 = load <3 x i32> addrspace(1)* %2, align 4, !tbaa !3
+ ret <3 x i32> %3
}
define <4 x i32> @__clc_vload4_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)*
- %4 = load <4 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <4 x i32> %4
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <4 x i32> addrspace(1)*
+ %3 = load <4 x i32> addrspace(1)* %2, align 4, !tbaa !3
+ ret <4 x i32> %3
}
define <8 x i32> @__clc_vload8_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)*
- %4 = load <8 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <8 x i32> %4
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <8 x i32> addrspace(1)*
+ %3 = load <8 x i32> addrspace(1)* %2, align 4, !tbaa !3
+ ret <8 x i32> %3
}
define <16 x i32> @__clc_vload16_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)*
- %4 = load <16 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <16 x i32> %4
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <16 x i32> addrspace(1)*
+ %3 = load <16 x i32> addrspace(1)* %2, align 4, !tbaa !3
+ ret <16 x i32> %3
}
!1 = metadata !{metadata !"char", metadata !5}
diff --git a/generic/lib/shared/vstore_if.ll b/generic/lib/shared/vstore_if.ll
index 30eb552..382a8a8 100644
--- a/generic/lib/shared/vstore_if.ll
+++ b/generic/lib/shared/vstore_if.ll
@@ -1,59 +1,59 @@
;Start int global vstore
-declare void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 %y)
-declare void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 %y)
-declare void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 %y)
-declare void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 %y)
-declare void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 %y)
+declare void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
+declare void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
+declare void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
+declare void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
+declare void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
-define void @__clc_vstore2_int__global(<2 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore2_int__global(<2 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
}
-define void @__clc_vstore3_int__global(<3 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore3_int__global(<3 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
}
-define void @__clc_vstore4_int__global(<4 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore4_int__global(<4 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
}
-define void @__clc_vstore8_int__global(<8 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore8_int__global(<8 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
}
-define void @__clc_vstore16_int__global(<16 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore16_int__global(<16 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
}
;Start uint global vstore
-define void @__clc_vstore2_uint__global(<2 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore2_uint__global(<2 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
}
-define void @__clc_vstore3_uint__global(<3 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore3_uint__global(<3 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
}
-define void @__clc_vstore4_uint__global(<4 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore4_uint__global(<4 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
}
-define void @__clc_vstore8_uint__global(<8 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore8_uint__global(<8 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
}
-define void @__clc_vstore16_uint__global(<16 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline {
- call void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 %y)
+define void @__clc_vstore16_uint__global(<16 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ call void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 addrspace(1)* nocapture %addr)
ret void
} \ No newline at end of file
diff --git a/generic/lib/shared/vstore_impl.ll b/generic/lib/shared/vstore_impl.ll
index 3baab5e..8790a8f 100644
--- a/generic/lib/shared/vstore_impl.ll
+++ b/generic/lib/shared/vstore_impl.ll
@@ -1,42 +1,37 @@
; This provides optimized implementations of vstore4/8/16 for 32-bit int/uint
define void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)*
- store <2 x i32> %vec, <2 x i32> addrspace(1)* %3, align 4, !tbaa !3
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <2 x i32> addrspace(1)*
+ store <2 x i32> %vec, <2 x i32> addrspace(1)* %2, align 4, !tbaa !3
ret void
}
define void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)*
- store <3 x i32> %vec, <3 x i32> addrspace(1)* %3, align 4, !tbaa !3
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <3 x i32> addrspace(1)*
+ store <3 x i32> %vec, <3 x i32> addrspace(1)* %2, align 4, !tbaa !3
ret void
}
define void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)*
- store <4 x i32> %vec, <4 x i32> addrspace(1)* %3, align 4, !tbaa !3
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <4 x i32> addrspace(1)*
+ store <4 x i32> %vec, <4 x i32> addrspace(1)* %2, align 4, !tbaa !3
ret void
}
define void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)*
- store <8 x i32> %vec, <8 x i32> addrspace(1)* %3, align 4, !tbaa !3
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <8 x i32> addrspace(1)*
+ store <8 x i32> %vec, <8 x i32> addrspace(1)* %2, align 4, !tbaa !3
ret void
}
define void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)*
- store <16 x i32> %vec, <16 x i32> addrspace(1)* %3, align 4, !tbaa !3
+ %1 = getelementptr i32 addrspace(1)* %addr, i32 %offset
+ %2 = bitcast i32 addrspace(1)* %1 to <16 x i32> addrspace(1)*
+ store <16 x i32> %vec, <16 x i32> addrspace(1)* %2, align 4, !tbaa !3
ret void
}