diff options
author | Ben Widawsky <ben@bwidawsk.net> | 2011-10-24 11:01:03 -0700 |
---|---|---|
committer | Ben Widawsky <ben@bwidawsk.net> | 2011-10-24 11:01:03 -0700 |
commit | cd617bfd78f05b082f63a4e20b2183c352b1dd26 (patch) | |
tree | 924ef872f6ff046f66e7b52a70df74e31835b554 | |
parent | 238252bfbcc3e3844f5d74babc28b97cfed43b86 (diff) |
review for danvetscheduler_blocking
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 46 |
3 files changed, 78 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 15c0ca58ad8b..de6b0c2c5a5f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1189,6 +1189,8 @@ int __must_check i915_add_request(struct intel_ring_buffer *ring, struct drm_i915_gem_request *request); int __must_check i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno); +int __must_check i915_gem_wait_unlocked(struct intel_ring_buffer *ring, + uint32_t seqno); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f0f885f44b87..07e1be746fc7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2031,6 +2031,36 @@ i915_wait_request(struct intel_ring_buffer *ring, return ret; } +int +i915_gem_wait_unlocked(struct intel_ring_buffer *ring, + uint32_t seqno) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + int ret; + + if (i915_seqno_passed(ring->get_seqno(ring), seqno)) + return 0; + + do { + /* yes it's racy, so we wake up pretty often to check... not + * power friendly. */ + ret = wait_event_interruptible_timeout(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) || + dev_priv->mm.suspended || + atomic_read(&dev_priv->mm.wedged), + msecs_to_jiffies(1)); + + if (atomic_read(&dev_priv->mm.wedged)) + BUG_ON(ret == 0); + + if (dev_priv->mm.suspended) + return -EBUSY; + + } while (ret == 0); + + return 0; +} + /** * Ensures that all rendering to the object has completed and the object is * safe to unbind from the GTT or access from the CPU. diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7b23d94e8d56..6d780c5a1417 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -43,6 +43,8 @@ struct change_domains { struct eb_objects { struct drm_i915_gem_object *batch_obj; struct list_head objects; + uint32_t seqno; + struct intel_ring_buffer *wait_ring; int buffer_count; int mode; int and; @@ -276,6 +278,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle) return NULL; } +static uint32_t +eb_would_block(struct eb_objects *eb) +{ + struct drm_i915_gem_object *obj; + uint32_t seqno, last_seqno = 0; + /* Let's see if this thing would have to block */ + list_for_each_entry(obj, &eb->objects, exec_list) { + /* It's on an active list, and not our ring */ + seqno = obj->last_rendering_seqno; + if (seqno == 0) + continue; + + if (seqno < obj->ring->outstanding_lazy_request) + continue; + + if (obj->ring == eb->batch_obj->ring) + continue; + + if (last_seqno < seqno) { + eb->wait_ring = obj->ring; + last_seqno = seqno; + } + } + + return last_seqno; +} + static void eb_destroy(struct eb_objects *eb) { @@ -1037,6 +1066,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj = NULL; struct drm_clip_rect *cliprects = NULL; struct intel_ring_buffer *ring; + bool rt_prio = false; u32 exec_start, exec_len; u32 seqno; int ret, i; @@ -1119,6 +1149,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } + rt_prio = (args->flags & I915_EXEC_PRIORITY_CLASS_MASK) == + I915_EXEC_PRIO_CLASS_REALTIME; + +again: for (i = 0; i < args->buffer_count; i++) { obj = to_intel_bo(drm_gem_object_lookup(dev, file, exec[i].handle)); @@ -1145,6 +1179,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* The last object is the batch object */ eb->batch_obj = obj; + if (eb_would_block(eb) && !rt_prio) { + /* Drop the lock and wait for the seqno */ + eb_reset(eb); + mutex_unlock(&dev->struct_mutex); + ret = i915_gem_wait_unlocked(ring, eb->seqno); + if (ret || i915_mutex_lock_interruptible(dev)) { + eb_destroy(eb); + goto pre_mutex_err; + } + goto again; + } + /* Move the objects en-masse into the GTT, evicting if necessary. */ ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects); if (ret) |