diff options
author | Ben Widawsky <ben@bwidawsk.net> | 2011-08-21 12:59:14 -0700 |
---|---|---|
committer | Ben Widawsky <ben@bwidawsk.net> | 2011-09-12 15:10:54 -0700 |
commit | aa681eb4849da945c4e8b94ba1531efdb75205e3 (patch) | |
tree | df0b51b6d41759a0779504d429022c97e10b43a3 | |
parent | 5d7f9816248979ad33e1e7fe9d15670461139c70 (diff) |
tempfor-bernard
-rw-r--r-- | src/mesa/drivers/dri/intel/intel_buffer_objects.c | 70 |
1 files changed, 66 insertions, 4 deletions
diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.c b/src/mesa/drivers/dri/intel/intel_buffer_objects.c index caf859016d..9a9ff83976 100644 --- a/src/mesa/drivers/dri/intel/intel_buffer_objects.c +++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.c @@ -300,6 +300,64 @@ intel_bufferobj_get_subdata(struct gl_context * ctx, /** * Called via glMapBufferRange and glMapBuffer + */ +static void * +intel_bufferobj_map(struct gl_context * ctx, + GLenum target, + GLenum access, struct gl_buffer_object *obj) +{ + struct intel_context *intel = intel_context(ctx); + struct intel_buffer_object *intel_obj = intel_buffer_object(obj); + GLboolean read_only = (access == GL_READ_ONLY_ARB); + GLboolean write_only = (access == GL_WRITE_ONLY_ARB); + + assert(intel_obj); + + if (intel_obj->sys_buffer) { + if (!read_only && intel_obj->source) { + release_buffer(intel_obj); + } + + if (!intel_obj->buffer || intel_obj->source) { + obj->Pointer = intel_obj->sys_buffer; + obj->Length = obj->Size; + obj->Offset = 0; + return obj->Pointer; + } + + free(intel_obj->sys_buffer); + intel_obj->sys_buffer = NULL; + } + + /* Flush any existing batchbuffer that might reference this data. */ + if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) + intel_flush(ctx); + + if (intel_obj->region) + intel_bufferobj_cow(intel, intel_obj); + + if (intel_obj->buffer == NULL) { + obj->Pointer = NULL; + return NULL; + } + + if (write_only) { + drm_intel_gem_bo_map_gtt(intel_obj->buffer); + intel_obj->mapped_gtt = GL_TRUE; + } else { + drm_intel_bo_map(intel_obj->buffer, !read_only); + intel_obj->mapped_gtt = GL_FALSE; + } + + obj->Pointer = intel_obj->buffer->virtual; + obj->Length = obj->Size; + obj->Offset = 0; + + return obj->Pointer; +} + +/** + * Called via glMapBufferRange(). * * The goal of this extension is to allow apps to accumulate their rendering * at the same time as they accumulate their buffer object. Without it, @@ -388,7 +446,11 @@ intel_bufferobj_map_range(struct gl_context * ctx, "range map", length, 64); if (!(access & GL_MAP_READ_BIT)) { - drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo); + if (access & GL_MAP_UNSYNCHRONIZED_BIT) { + abort(); + drm_intel_gem_bo_map_gtt_wo(intel_obj->range_map_bo, 0, intel_obj->buffer->size/4096); + } else + drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo); intel_obj->mapped_gtt = GL_TRUE; } else { drm_intel_bo_map(intel_obj->range_map_bo, @@ -401,9 +463,9 @@ intel_bufferobj_map_range(struct gl_context * ctx, } if (!(access & GL_MAP_READ_BIT)) { - if (access & GL_MAP_UNSYNCHRONIZED_BIT) - drm_intel_gem_bo_map_gtt_wo(intel_obj->buffer, 0, 0); - else + if (access & GL_MAP_UNSYNCHRONIZED_BIT) { + drm_intel_gem_bo_map_gtt_wo(intel_obj->buffer, 0, intel_obj->buffer->size / 4096); + } else drm_intel_gem_bo_map_gtt(intel_obj->buffer); intel_obj->mapped_gtt = GL_TRUE; } else { |