summaryrefslogtreecommitdiff
path: root/src/cl_command_queue.c
diff options
context:
space:
mode:
authorZhenyu Wang <zhenyuw@linux.intel.com>2014-10-23 15:19:22 +0800
committerZhigang Gong <zhigang.gong@intel.com>2014-10-24 18:12:29 +0800
commit8b0850587b80847056c9d798642fd8a7155fe3c3 (patch)
treee2eaae3f53d84ac8d344785290f49f55ee0372d6 /src/cl_command_queue.c
parente74164f951ba9003f92d14d685f184617da056eb (diff)
Make use of write enable flag for mem bo map
Use drm/intel optimization for mem bo mapping in case of read or write. So we could be possibly waiting less. This also adds 'map_flags' check in clEnqueueMapBuffer/clEnqueueMapImage for actual read or write mapping. But currently leave clMapBufferIntel untouched which might break ABI/API. v2: Fix write_map flag in clEnqueueMapBuffer/clEnqueueMapImage. Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com> Reviewed-by: "Guo, Yejun" <yejun.guo@intel.com> Reviewed-by: Zhigang Gong <zhigang.gong@linux.intel.com>
Diffstat (limited to 'src/cl_command_queue.c')
-rw-r--r--src/cl_command_queue.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/src/cl_command_queue.c b/src/cl_command_queue.c
index 48deba05..d07774f0 100644
--- a/src/cl_command_queue.c
+++ b/src/cl_command_queue.c
@@ -336,7 +336,7 @@ cl_fulsim_read_all_surfaces(cl_command_queue queue, cl_kernel k)
assert(mem->bo);
chunk_n = cl_buffer_get_size(mem->bo) / chunk_sz;
chunk_remainder = cl_buffer_get_size(mem->bo) % chunk_sz;
- to = cl_mem_map(mem);
+ to = cl_mem_map(mem, 1);
for (j = 0; j < chunk_n; ++j) {
char name[256];
sprintf(name, "dump%03i.bmp", curr);