diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 49 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_scheduler.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_scheduler.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 8 |
6 files changed, 81 insertions, 15 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ed9d82946c54..6fa2541e5d09 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3159,9 +3159,14 @@ void __i915_add_request(struct drm_i915_gem_request *req, __i915_add_request(req, NULL, false) int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, - bool interruptible, + uint32_t flags, s64 *timeout, struct intel_rps_client *rps); + +/* flags used by users of __i915_wait_request */ +#define I915_WAIT_REQUEST_INTERRUPTIBLE (1 << 0) +#define I915_WAIT_REQUEST_LOCKED (1 << 1) + int __must_check i915_wait_request(struct drm_i915_gem_request *req); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ec6888491db2..695070d842ba 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1234,7 +1234,9 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) * __i915_wait_request - wait until execution of request has finished * @req: duh! * @reset_counter: reset sequence associated with the given request - * @interruptible: do an interruptible wait (normally yes) + * @flags: flags to define the nature of wait + * I915_WAIT_INTERRUPTIBLE - do an interruptible wait (normally yes) + * I915_WAIT_LOCKED - caller is holding struct_mutex * @timeout: in - how long to wait (NULL forever); out - how much time remaining * * Note: It is of utmost importance that the passed in seqno and reset_counter @@ -1249,20 +1251,22 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) */ int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, - bool interruptible, + uint32_t flags, s64 *timeout, struct intel_rps_client *rps) { struct intel_engine_cs *engine = i915_gem_request_get_engine(req); struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; + bool interruptible = flags & I915_WAIT_REQUEST_INTERRUPTIBLE; int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; uint32_t seqno; DEFINE_WAIT(wait); unsigned long timeout_expire; s64 before = 0; /* Only to silence a compiler warning. */ - int ret; + int ret = 0; + might_sleep(); WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); if (i915_gem_request_completed(req)) @@ -1317,6 +1321,19 @@ int __i915_wait_request(struct drm_i915_gem_request *req, break; } + if (flags & I915_WAIT_REQUEST_LOCKED) { + /* + * If this request is being processed by the scheduler + * then it is unsafe to sleep with the mutex lock held + * as the scheduler may require the lock in order to + * progress the request. + */ + if (i915_scheduler_is_mutex_required(req)) { + ret = -EAGAIN; + break; + } + } + if (i915_gem_request_completed(req)) { ret = 0; break; @@ -1521,6 +1538,7 @@ i915_wait_request(struct drm_i915_gem_request *req) struct drm_i915_private *dev_priv; bool interruptible; int ret; + uint32_t flags; BUG_ON(req == NULL); @@ -1534,9 +1552,13 @@ i915_wait_request(struct drm_i915_gem_request *req) if (ret) return ret; + flags = I915_WAIT_REQUEST_LOCKED; + if (interruptible) + flags |= I915_WAIT_REQUEST_INTERRUPTIBLE; + ret = __i915_wait_request(req, atomic_read(&dev_priv->gpu_error.reset_counter), - interruptible, NULL, NULL); + flags, NULL, NULL); if (ret) return ret; @@ -1648,7 +1670,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, mutex_unlock(&dev->struct_mutex); for (i = 0; ret == 0 && i < n; i++) - ret = __i915_wait_request(requests[i], reset_counter, true, + ret = __i915_wait_request(requests[i], reset_counter, + I915_WAIT_REQUEST_INTERRUPTIBLE, NULL, rps); mutex_lock(&dev->struct_mutex); @@ -3589,7 +3612,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) for (i = 0; i < n; i++) { if (ret == 0) - ret = __i915_wait_request(req[i], reset_counter, true, + ret = __i915_wait_request(req[i], reset_counter, + I915_WAIT_REQUEST_INTERRUPTIBLE, args->timeout_ns > 0 ? &args->timeout_ns : NULL, to_rps_client(file)); i915_gem_request_unreference(req[i]); @@ -3620,11 +3644,15 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, if (!i915_semaphore_is_enabled(obj->base.dev)) { struct drm_i915_private *i915 = to_i915(obj->base.dev); + uint32_t flags; + + flags = I915_WAIT_REQUEST_LOCKED; + if (i915->mm.interruptible) + flags |= I915_WAIT_REQUEST_INTERRUPTIBLE; + ret = __i915_wait_request(from_req, atomic_read(&i915->gpu_error.reset_counter), - i915->mm.interruptible, - NULL, - &i915->rps.semaphores); + flags, NULL, &i915->rps.semaphores); if (ret) return ret; @@ -4610,7 +4638,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (target == NULL) return 0; - ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); + ret = __i915_wait_request(target, reset_counter, + I915_WAIT_REQUEST_INTERRUPTIBLE, NULL, NULL); if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index cdb86da922e2..cf8b50aba9bd 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -910,6 +910,32 @@ void i915_scheduler_work_handler(struct work_struct *work) } /** + * i915_scheduler_is_mutex_required - query if it is safe to hold the mutex + * lock while waiting for the given request. + * @req: request to be queried + * + * Looks up the given request in the scheduler's internal queue and reports + * on whether the scheduler will need to acquire the driver's mutex lock in + * order for the that request to complete. + */ +bool i915_scheduler_is_mutex_required(struct drm_i915_gem_request *req) +{ + struct drm_i915_private *dev_priv = to_i915(req->engine->dev); + struct i915_scheduler *scheduler = dev_priv->scheduler; + + if (!scheduler) + return false; + + if (req->scheduler_qe == NULL) + return false; + + if (I915_SQS_IS_QUEUED(req->scheduler_qe)) + return true; + + return false; +} + +/** * i915_scheduler_closefile - notify the scheduler that a DRM file handle * has been closed. * @dev: DRM device diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index b8d4a343ee11..38e860dfde77 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -111,5 +111,6 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe); bool i915_scheduler_notify_request(struct drm_i915_gem_request *req); void i915_scheduler_wakeup(struct drm_device *dev); void i915_scheduler_work_handler(struct work_struct *work); +bool i915_scheduler_is_mutex_required(struct drm_i915_gem_request *req); #endif /* _I915_SCHEDULER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 531553de0453..54274d3ec601 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11368,7 +11368,7 @@ static void intel_mmio_flip_work_func(struct work_struct *work) if (mmio_flip->req) { WARN_ON(__i915_wait_request(mmio_flip->req, mmio_flip->crtc->reset_counter, - false, NULL, + 0, NULL, &mmio_flip->i915->rps.mmioflips)); i915_gem_request_unreference(mmio_flip->req); } @@ -13436,7 +13436,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, continue; ret = __i915_wait_request(intel_plane_state->wait_req, - reset_counter, true, + reset_counter, + I915_WAIT_REQUEST_INTERRUPTIBLE, NULL, NULL); /* Swallow -EIO errors to allow updates during hw lockup. */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e3f223720f8d..6021655bf519 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2362,6 +2362,7 @@ static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) int intel_engine_idle(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req; + uint32_t flags; /* Wait upon the last request to be completed */ if (list_empty(&engine->request_list)) @@ -2371,11 +2372,14 @@ int intel_engine_idle(struct intel_engine_cs *engine) struct drm_i915_gem_request, list); + flags = I915_WAIT_REQUEST_LOCKED; + if (to_i915(engine->dev)->mm.interruptible) + flags |= I915_WAIT_REQUEST_INTERRUPTIBLE; + /* Make sure we do not trigger any retires */ return __i915_wait_request(req, atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter), - to_i915(engine->dev)->mm.interruptible, - NULL, NULL); + flags, NULL, NULL); } int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) |