summaryrefslogtreecommitdiff
path: root/backend
diff options
context:
space:
mode:
authorJunyan He <junyan.he@intel.com>2016-04-13 16:06:05 +0800
committerJunyan He <junyan.he@intel.com>2016-04-13 16:06:05 +0800
commit81e5f37f21497581d9ddfddd17fe2d27aa547157 (patch)
treed4c6d9b875f74becea645b89a5c4ea5a08ad11b0 /backend
parent1e4a0762967aea9c1e53c79d880da06bf364983b (diff)
event
Diffstat (limited to 'backend')
-rw-r--r--backend/src/driver/cl_gen_mem.cpp24
1 files changed, 22 insertions, 2 deletions
diff --git a/backend/src/driver/cl_gen_mem.cpp b/backend/src/driver/cl_gen_mem.cpp
index cf35f06c..473a4ed0 100644
--- a/backend/src/driver/cl_gen_mem.cpp
+++ b/backend/src/driver/cl_gen_mem.cpp
@@ -267,11 +267,12 @@ cl_int GenEnqueueMapBuffer(cl_command_queue queue, cl_mem mem, void** ret_addr,
return CL_MEM_OBJECT_ALLOCATION_FAILURE;
}
+ GenGPUCommandQueue* gpuQueue = (GenGPUCommandQueue*)getGenCommandQueuePrivate(queue);
+ GBE_ASSERT(gpuQueue);
+
if (block) {
/* According to spec, when in block mode, we need to ensure all the
commands in queue are flushed. */
- GenGPUCommandQueue* gpuQueue = (GenGPUCommandQueue*)getGenCommandQueuePrivate(queue);
- GBE_ASSERT(gpuQueue);
gpuQueue->waitForFlush();
if (event_list) { // Need to wait for events.
@@ -296,6 +297,25 @@ cl_int GenEnqueueMapBuffer(cl_command_queue queue, cl_mem mem, void** ret_addr,
gpuQueue->setEventStatus(event_ret, CL_COMPLETE);
}
return CL_SUCCESS;
+ } else if (event_list == NULL) {
+ /* We do not have any events to wait, map it in sync mode. */
+ retAddr = genDoMapBuffer(genMem, mem, flags, offset, size);
+ if (retAddr == NULL) {
+ if (event_ret) {
+ gpuQueue->setEventStatus(event_ret, -1); // Set error for that event.
+ }
+ return CL_MAP_FAILURE;
+ }
+
+ if (ret_addr)
+ *ret_addr = retAddr;
+
+ if (event_ret) {
+ gpuQueue->setEventStatus(event_ret, CL_COMPLETE);
+ }
+ return CL_SUCCESS;
+ } else {
+ GBE_ASSERT(num_events > 0);
}
// Never come to here.