diff options
author | Aaron Watry <awatry@gmail.com> | 2013-05-23 19:49:43 -0500 |
---|---|---|
committer | Tom Stellard <thomas.stellard@amd.com> | 2013-05-24 16:10:23 -0700 |
commit | 265b4637effe3a4dbab56690e7a9e77b5bd6c3ce (patch) | |
tree | 885fc410ed72647061edebc9b7c5a02d63153510 /generic | |
parent | 2214095868e6b8027e3c3c16df1863b7d16eeb40 (diff) |
libclc: Add assembly versions of vstore for global [u]int4/8/16
The assembly should be generic, but at least currently R600 only supports
32-bit stores of [u]int1/4, and I believe that only global is well-supported.
R600 lowers the 8/16 component stores to multiple 4-component stores.
The unoptimized C versions of the other stuff is left in place.
Diffstat (limited to 'generic')
-rw-r--r-- | generic/lib/SOURCES | 2 | ||||
-rw-r--r-- | generic/lib/shared/vstore.cl | 63 | ||||
-rw-r--r-- | generic/lib/shared/vstore_if.ll | 59 | ||||
-rw-r--r-- | generic/lib/shared/vstore_impl.ll | 50 |
4 files changed, 168 insertions, 6 deletions
diff --git a/generic/lib/SOURCES b/generic/lib/SOURCES index 9f6acf3..8cda14a 100644 --- a/generic/lib/SOURCES +++ b/generic/lib/SOURCES @@ -27,5 +27,7 @@ shared/vload.cl shared/vload_if.ll shared/vload_impl.ll shared/vstore.cl +shared/vstore_if.ll +shared/vstore_impl.ll workitem/get_global_id.cl workitem/get_global_size.cl diff --git a/generic/lib/shared/vstore.cl b/generic/lib/shared/vstore.cl index e88ccc5..5b84f47 100644 --- a/generic/lib/shared/vstore.cl +++ b/generic/lib/shared/vstore.cl @@ -15,10 +15,8 @@ } \ \ _CLC_OVERLOAD _CLC_DEF void vstore4(PRIM_TYPE##4 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ - mem[offset] = vec.s0; \ - mem[offset+1] = vec.s1; \ - mem[offset+2] = vec.s2; \ - mem[offset+3] = vec.s3; \ + vstore2(vec.lo, offset, mem); \ + vstore2(vec.hi, offset+2, mem); \ } \ \ _CLC_OVERLOAD _CLC_DEF void vstore8(PRIM_TYPE##8 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ @@ -36,13 +34,12 @@ VSTORE_VECTORIZE(SCALAR_GENTYPE, __local) \ VSTORE_VECTORIZE(SCALAR_GENTYPE, __global) \ +//int/uint are special... see below #define VSTORE_TYPES() \ VSTORE_ADDR_SPACES(char) \ VSTORE_ADDR_SPACES(uchar) \ VSTORE_ADDR_SPACES(short) \ VSTORE_ADDR_SPACES(ushort) \ - VSTORE_ADDR_SPACES(int) \ - VSTORE_ADDR_SPACES(uint) \ VSTORE_ADDR_SPACES(long) \ VSTORE_ADDR_SPACES(ulong) \ VSTORE_ADDR_SPACES(float) \ @@ -54,3 +51,57 @@ VSTORE_TYPES() VSTORE_ADDR_SPACES(double) #endif +VSTORE_VECTORIZE(int, __private) +VSTORE_VECTORIZE(int, __local) +VSTORE_VECTORIZE(uint, __private) +VSTORE_VECTORIZE(uint, __local) + +_CLC_OVERLOAD _CLC_DEF void vstore2(int2 vec, size_t offset, global int *mem) { + mem[offset] = vec.s0; + mem[offset+1] = vec.s1; +} +_CLC_OVERLOAD _CLC_DEF void vstore3(int3 vec, size_t offset, global int *mem) { + mem[offset] = vec.s0; + mem[offset+1] = vec.s1; + mem[offset+2] = vec.s2; +} +_CLC_OVERLOAD _CLC_DEF void vstore2(uint2 vec, size_t offset, global uint *mem) { + mem[offset] = vec.s0; + mem[offset+1] = vec.s1; +} +_CLC_OVERLOAD _CLC_DEF void vstore3(uint3 vec, size_t offset, global uint *mem) { + mem[offset] = vec.s0; + mem[offset+1] = vec.s1; + mem[offset+2] = vec.s2; +} + +/*Note: R600 probably doesn't support store <2 x ?> and <3 x ?>... so + * they aren't actually overridden here... lowest-common-denominator + */ +_CLC_DECL void __clc_vstore4_int__global(int4 vec, size_t offset, __global int *); +_CLC_DECL void __clc_vstore8_int__global(int8 vec, size_t offset, __global int *); +_CLC_DECL void __clc_vstore16_int__global(int16 vec, size_t offset, __global int *); + +_CLC_OVERLOAD _CLC_DEF void vstore4(int4 vec, size_t offset, global int *x) { + __clc_vstore4_int__global(vec, offset, x); +} +_CLC_OVERLOAD _CLC_DEF void vstore8(int8 vec, size_t offset, global int *x) { + __clc_vstore8_int__global(vec, offset, x); +} +_CLC_OVERLOAD _CLC_DEF void vstore16(int16 vec, size_t offset, global int *x) { + __clc_vstore16_int__global(vec, offset, x); +} + +_CLC_DECL void __clc_vstore4_uint__global(uint4 vec, size_t offset, __global uint *); +_CLC_DECL void __clc_vstore8_uint__global(uint8 vec, size_t offset, __global uint *); +_CLC_DECL void __clc_vstore16_uint__global(uint16 vec, size_t offset, __global uint *); + +_CLC_OVERLOAD _CLC_DEF void vstore4(uint4 vec, size_t offset, global uint *x) { + __clc_vstore4_uint__global(vec, offset, x); +} +_CLC_OVERLOAD _CLC_DEF void vstore8(uint8 vec, size_t offset, global uint *x) { + __clc_vstore8_uint__global(vec, offset, x); +} +_CLC_OVERLOAD _CLC_DEF void vstore16(uint16 vec, size_t offset, global uint *x) { + __clc_vstore16_uint__global(vec, offset, x); +} diff --git a/generic/lib/shared/vstore_if.ll b/generic/lib/shared/vstore_if.ll new file mode 100644 index 0000000..30eb552 --- /dev/null +++ b/generic/lib/shared/vstore_if.ll @@ -0,0 +1,59 @@ +;Start int global vstore + +declare void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 %y) +declare void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 %y) +declare void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 %y) +declare void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 %y) +declare void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 %y) + +define void @__clc_vstore2_int__global(<2 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 %y) + ret void +} + +define void @__clc_vstore3_int__global(<3 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 %y) + ret void +} + +define void @__clc_vstore4_int__global(<4 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 %y) + ret void +} + +define void @__clc_vstore8_int__global(<8 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 %y) + ret void +} + +define void @__clc_vstore16_int__global(<16 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 %y) + ret void +} + + +;Start uint global vstore +define void @__clc_vstore2_uint__global(<2 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 %y) + ret void +} + +define void @__clc_vstore3_uint__global(<3 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 %y) + ret void +} + +define void @__clc_vstore4_uint__global(<4 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 %y) + ret void +} + +define void @__clc_vstore8_uint__global(<8 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 %y) + ret void +} + +define void @__clc_vstore16_uint__global(<16 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { + call void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 %y) + ret void +}
\ No newline at end of file diff --git a/generic/lib/shared/vstore_impl.ll b/generic/lib/shared/vstore_impl.ll new file mode 100644 index 0000000..3baab5e --- /dev/null +++ b/generic/lib/shared/vstore_impl.ll @@ -0,0 +1,50 @@ +; This provides optimized implementations of vstore4/8/16 for 32-bit int/uint + +define void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)* + store <2 x i32> %vec, <2 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)* + store <3 x i32> %vec, <3 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)* + store <4 x i32> %vec, <4 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)* + store <8 x i32> %vec, <8 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)* + store <16 x i32> %vec, <16 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + + +!1 = metadata !{metadata !"char", metadata !5} +!2 = metadata !{metadata !"short", metadata !5} +!3 = metadata !{metadata !"int", metadata !5} +!4 = metadata !{metadata !"long", metadata !5} +!5 = metadata !{metadata !"omnipotent char", metadata !6} +!6 = metadata !{metadata !"Simple C/C++ TBAA"} + |