summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2013-06-26 16:43:41 -0700
committerBen Widawsky <benjamin.widawsky@intel.com>2013-12-06 10:55:34 -0800
commitf89c2cddd9e4c3b767fa0af19db6ac5f1b3d93d4 (patch)
tree476ac2d74539d6bd462b6cafe89406f786f832e9
parentbd5e6dd1ad7c2c6a78cffbe181735105e0fbbe54 (diff)
drm/i915: Defer request freeing
With context destruction, we always want to be able to tear down the underlying address space. This is invoked on the last unreference to the context which could happen before we've moved all objects to the inactive list. To enable a clean tear down the address space, make sure to process the request free lastly. Without this change, we cannot guarantee to we don't still have active objects in the VM. As an example of a failing case: CTX-A is created, count=1 CTX-A is used during execbuf does a context switch count = 2 and add_request count = 3 CTX B runs, switches, CTX-A count = 2 CTX-A is destroyed, count = 1 retire requests is called free_request from CTX-A, count = 0 <--- free context with active object As mentioned above, by doing the free request after processing the active list, we can avoid this case. Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ed5210897d61..e6d7b4c3ab5b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2423,6 +2423,8 @@ void i915_gem_reset(struct drm_device *dev)
void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
+ LIST_HEAD(deferred_request_free);
+ struct drm_i915_gem_request *request;
uint32_t seqno;
if (list_empty(&ring->request_list))
@@ -2433,8 +2435,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true);
while (!list_empty(&ring->request_list)) {
- struct drm_i915_gem_request *request;
-
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
@@ -2450,7 +2450,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
*/
ring->last_retired_head = request->tail;
- i915_gem_free_request(request);
+ list_move_tail(&request->list, &deferred_request_free);
}
/* Move any buffers on the active list that are no longer referenced
@@ -2475,6 +2475,13 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
ring->trace_irq_seqno = 0;
}
+ /* Finish processing active list before freeing request */
+ while (!list_empty(&deferred_request_free)) {
+ request = list_first_entry(&deferred_request_free,
+ struct drm_i915_gem_request,
+ list);
+ i915_gem_free_request(request);
+ }
WARN_ON(i915_verify_lists(ring->dev));
}