diff options
author | Aaron Watry <awatry@gmail.com> | 2013-05-14 23:25:59 -0500 |
---|---|---|
committer | Aaron Watry <awatry@gmail.com> | 2013-05-14 23:25:59 -0500 |
commit | fc784748d6b9023a3d0187414490af880f6e1c38 (patch) | |
tree | 652b9df557bd0b9392370a8ff1443a6f3622cd70 | |
parent | da254c8a43e8c189ddc33a2fd36ba0a1597d0247 (diff) |
libclc: Optimize vstore4/8/16 to global for int/uint types.
R600 probably doesn't support v2/v3 stores and chokes on types that aren't
32-bits in size.
These caveats could/should change in the future. For now, the
non-optimized implementations for other sizes/types are left intact.
-rw-r--r-- | generic/lib/SOURCES | 2 | ||||
-rw-r--r-- | generic/lib/shared/vstore.cl | 57 | ||||
-rw-r--r-- | generic/lib/shared/vstore_if.ll | 64 | ||||
-rw-r--r-- | generic/lib/shared/vstore_impl.ll | 90 |
4 files changed, 211 insertions, 2 deletions
diff --git a/generic/lib/SOURCES b/generic/lib/SOURCES index 5e0abe1..c2da3d7 100644 --- a/generic/lib/SOURCES +++ b/generic/lib/SOURCES @@ -29,5 +29,7 @@ shared/vload.cl shared/vload_if.ll shared/vload_impl.ll shared/vstore.cl +shared/vstore_if.ll +shared/vstore_impl.ll workitem/get_global_id.cl workitem/get_global_size.cl diff --git a/generic/lib/shared/vstore.cl b/generic/lib/shared/vstore.cl index 522b1d6..5b84f47 100644 --- a/generic/lib/shared/vstore.cl +++ b/generic/lib/shared/vstore.cl @@ -34,13 +34,12 @@ VSTORE_VECTORIZE(SCALAR_GENTYPE, __local) \ VSTORE_VECTORIZE(SCALAR_GENTYPE, __global) \ +//int/uint are special... see below #define VSTORE_TYPES() \ VSTORE_ADDR_SPACES(char) \ VSTORE_ADDR_SPACES(uchar) \ VSTORE_ADDR_SPACES(short) \ VSTORE_ADDR_SPACES(ushort) \ - VSTORE_ADDR_SPACES(int) \ - VSTORE_ADDR_SPACES(uint) \ VSTORE_ADDR_SPACES(long) \ VSTORE_ADDR_SPACES(ulong) \ VSTORE_ADDR_SPACES(float) \ @@ -52,3 +51,57 @@ VSTORE_TYPES() VSTORE_ADDR_SPACES(double) #endif +VSTORE_VECTORIZE(int, __private) +VSTORE_VECTORIZE(int, __local) +VSTORE_VECTORIZE(uint, __private) +VSTORE_VECTORIZE(uint, __local) + +_CLC_OVERLOAD _CLC_DEF void vstore2(int2 vec, size_t offset, global int *mem) { + mem[offset] = vec.s0; + mem[offset+1] = vec.s1; +} +_CLC_OVERLOAD _CLC_DEF void vstore3(int3 vec, size_t offset, global int *mem) { + mem[offset] = vec.s0; + mem[offset+1] = vec.s1; + mem[offset+2] = vec.s2; +} +_CLC_OVERLOAD _CLC_DEF void vstore2(uint2 vec, size_t offset, global uint *mem) { + mem[offset] = vec.s0; + mem[offset+1] = vec.s1; +} +_CLC_OVERLOAD _CLC_DEF void vstore3(uint3 vec, size_t offset, global uint *mem) { + mem[offset] = vec.s0; + mem[offset+1] = vec.s1; + mem[offset+2] = vec.s2; +} + +/*Note: R600 probably doesn't support store <2 x ?> and <3 x ?>... so + * they aren't actually overridden here... lowest-common-denominator + */ +_CLC_DECL void __clc_vstore4_int__global(int4 vec, size_t offset, __global int *); +_CLC_DECL void __clc_vstore8_int__global(int8 vec, size_t offset, __global int *); +_CLC_DECL void __clc_vstore16_int__global(int16 vec, size_t offset, __global int *); + +_CLC_OVERLOAD _CLC_DEF void vstore4(int4 vec, size_t offset, global int *x) { + __clc_vstore4_int__global(vec, offset, x); +} +_CLC_OVERLOAD _CLC_DEF void vstore8(int8 vec, size_t offset, global int *x) { + __clc_vstore8_int__global(vec, offset, x); +} +_CLC_OVERLOAD _CLC_DEF void vstore16(int16 vec, size_t offset, global int *x) { + __clc_vstore16_int__global(vec, offset, x); +} + +_CLC_DECL void __clc_vstore4_uint__global(uint4 vec, size_t offset, __global uint *); +_CLC_DECL void __clc_vstore8_uint__global(uint8 vec, size_t offset, __global uint *); +_CLC_DECL void __clc_vstore16_uint__global(uint16 vec, size_t offset, __global uint *); + +_CLC_OVERLOAD _CLC_DEF void vstore4(uint4 vec, size_t offset, global uint *x) { + __clc_vstore4_uint__global(vec, offset, x); +} +_CLC_OVERLOAD _CLC_DEF void vstore8(uint8 vec, size_t offset, global uint *x) { + __clc_vstore8_uint__global(vec, offset, x); +} +_CLC_OVERLOAD _CLC_DEF void vstore16(uint16 vec, size_t offset, global uint *x) { + __clc_vstore16_uint__global(vec, offset, x); +} diff --git a/generic/lib/shared/vstore_if.ll b/generic/lib/shared/vstore_if.ll new file mode 100644 index 0000000..e98a126 --- /dev/null +++ b/generic/lib/shared/vstore_if.ll @@ -0,0 +1,64 @@ +;Start int global vstore + +declare void @__clc_vstore2_impl_int__global(<2 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore2_int__global(<2 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore2_impl_int__global(<2 x i32> %vec, i32 %x, i32 %y) + ret void +} + +declare void @__clc_vstore3_impl_int__global(<3 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore3_int__global(<3 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore3_impl_int__global(<3 x i32> %vec, i32 %x, i32 %y) + ret void +} + +declare void @__clc_vstore4_impl_int__global(<4 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore4_int__global(<4 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore4_impl_int__global(<4 x i32> %vec, i32 %x, i32 %y) + ret void +} + +declare void @__clc_vstore8_impl_int__global(<8 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore8_int__global(<8 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore8_impl_int__global(<8 x i32> %vec, i32 %x, i32 %y) + ret void +} + +declare void @__clc_vstore16_impl_int__global(<16 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore16_int__global(<16 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore16_impl_int__global(<16 x i32> %vec, i32 %x, i32 %y) + ret void +} + + +;Start uint global vstore + +declare void @__clc_vstore2_impl_uint__global(<2 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore2_uint__global(<2 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore2_impl_uint__global(<2 x i32> %vec, i32 %x, i32 %y) + ret void +} + +declare void @__clc_vstore3_impl_uint__global(<3 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore3_uint__global(<3 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore3_impl_uint__global(<3 x i32> %vec, i32 %x, i32 %y) + ret void +} + +declare void @__clc_vstore4_impl_uint__global(<4 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore4_uint__global(<4 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore4_impl_uint__global(<4 x i32> %vec, i32 %x, i32 %y) + ret void +} + +declare void @__clc_vstore8_impl_uint__global(<8 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore8_uint__global(<8 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore8_impl_uint__global(<8 x i32> %vec, i32 %x, i32 %y) + ret void +} + +declare void @__clc_vstore16_impl_uint__global(<16 x i32> %vec, i32 %x, i32 %y) +define void @__clc_vstore16_uint__global(<16 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore16_impl_uint__global(<16 x i32> %vec, i32 %x, i32 %y) + ret void +}
\ No newline at end of file diff --git a/generic/lib/shared/vstore_impl.ll b/generic/lib/shared/vstore_impl.ll new file mode 100644 index 0000000..40f5513 --- /dev/null +++ b/generic/lib/shared/vstore_impl.ll @@ -0,0 +1,90 @@ +; This provides optimized implementations of vstore4/8/16 for 32-bit int/uint + +define void @__clc_vstore2_impl_int__global(<2 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)* + store <2 x i32> %vec, <2 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore3_impl_int__global(<3 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)* + store <3 x i32> %vec, <3 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore4_impl_int__global(<4 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)* + store <4 x i32> %vec, <4 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore8_impl_int__global(<8 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)* + store <8 x i32> %vec, <8 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore16_impl_int__global(<16 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)* + store <16 x i32> %vec, <16 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore2_impl_uint__global(<2 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)* + store <2 x i32> %vec, <2 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore3_impl_uint__global(<3 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)* + store <3 x i32> %vec, <3 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore4_impl_uint__global(<4 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)* + store <4 x i32> %vec, <4 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore8_impl_uint__global(<8 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)* + store <8 x i32> %vec, <8 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore16_impl_uint__global(<16 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)* + store <16 x i32> %vec, <16 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + + +!1 = metadata !{metadata !"char", metadata !5} +!2 = metadata !{metadata !"short", metadata !5} +!3 = metadata !{metadata !"int", metadata !5} +!4 = metadata !{metadata !"long", metadata !5} +!5 = metadata !{metadata !"omnipotent char", metadata !6} +!6 = metadata !{metadata !"Simple C/C++ TBAA"} + |