diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_scheduler.c | 170 |
3 files changed, 202 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 238747ae5bec..c862245c769d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2385,6 +2385,7 @@ struct drm_i915_gem_request * __must_check i915_gem_request_alloc(struct intel_engine_cs *engine, struct intel_context *ctx); void i915_gem_request_cancel(struct drm_i915_gem_request *req); +void i915_gem_request_dequeue(struct drm_i915_gem_request *req); void i915_gem_request_enable_interrupt(struct drm_i915_gem_request *req, bool fence_locked); void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2d3c956e382b..909808fc2c74 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1267,6 +1267,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, might_sleep(); WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); + /* Lightweight check first of all */ if (i915_gem_request_completed(req)) return 0; @@ -1475,6 +1476,11 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) */ request->ringbuf->last_retired_head = request->postfix; + /* + * Must use list_del_init() not list_del() because some other + * code tests (list_empty(&request->list)) to see whether the + * request is (still) on the engine->request_list! + */ list_del_init(&request->list); i915_gem_request_remove_from_client(request); @@ -1508,10 +1514,18 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) lockdep_assert_held(&engine->dev->struct_mutex); + /* + * If the request is not on any list, then presumably + * it's already been retired? + */ if (list_empty(&req->list)) return; do { + /* Don't blindly assume that the request will be found! */ + if (WARN_ON(list_empty(&engine->request_list))) + break; + tmp = list_first_entry(&engine->request_list, typeof(*tmp), list); @@ -2745,6 +2759,24 @@ void __i915_add_request(struct drm_i915_gem_request *request, intel_ring_reserved_space_end(ringbuf); } +void i915_gem_request_dequeue(struct drm_i915_gem_request *request) +{ + /* + * The request has been de-queued from the hardware in some manner + * (e.g. through pre-emption). So it needs to be removed from the + * active request list (the request list doesn't contribute to + * refcounting, so we don't also have to unreference it here). + * + * It also needs to have its seqno cleared as that will not be + * valid any longer. However, the expectation is that the request + * will be resubmitted later. At that time it will be assigned a + * shiny new seqno. + */ + WARN_ON(i915_gem_request_completed(request)); + list_del_init(&request->list); + request->seqno = 0; +} + static bool i915_context_is_banned(struct drm_i915_private *dev_priv, const struct intel_context *ctx) { diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index c7fb5ac5cee9..571cad18e3ec 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -1306,6 +1306,169 @@ void i915_scheduler_reset_cleanup(struct intel_engine_cs *engine) } } +/* + * At this point, preemption has occurred. + * + * All the requests that had already completed before preemption will + * have been taken off the fence_signal_list, signalled, and put onto + * the fence_unsignal_list for cleanup. The preempting request itself + * should however still be on the fence_signal_list (and has not been + * signalled). There may also be additional requests on this list; they + * have been preempted. + * + * The request_list has not yet been processed, so it may still contain + * requests that have already completed. It should also contain the + * preempting request (not completed), and maybe additional requests; + * again, these have been preempted and need to be recycled through the + * scheduler. + */ +noinline +static void +i915_scheduler_preemption_postprocess(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct i915_scheduler *scheduler = dev_priv->scheduler; + struct i915_scheduler_queue_entry *pnode = NULL; + struct drm_i915_gem_request *preq = NULL; + struct i915_scheduler_stats *stats; + unsigned long flags; + int preempted = 0, preemptive = 0; + + mutex_lock(&engine->dev->struct_mutex); + + /* + * FIXME: grab & empty fence_signal_list with spinlock, + * then iterate after? + */ + spin_lock_irqsave(&engine->fence_lock, flags); + while (!list_empty(&engine->fence_signal_list)) { + struct i915_scheduler_queue_entry *node; + struct drm_i915_gem_request *req; + + req = list_first_entry(&engine->fence_signal_list, + struct drm_i915_gem_request, + signal_link); + list_del_init(&req->signal_link); + spin_unlock_irqrestore(&engine->fence_lock, flags); + + /* We should find only tracked unsignalled requests */ + node = req->scheduler_qe; + WARN(!node || i915_gem_request_completed(req) || + (node->status == I915_SQS_PREEMPTED), + "Invalid node state: %s [req = %d:%d]\n", + node ? i915_scheduler_queue_status_str(node->status) : "NULL", + req->uniq, req->seqno); + + i915_gem_request_unreference(req); + + spin_lock_irqsave(&engine->fence_lock, flags); + } + spin_unlock_irqrestore(&engine->fence_lock, flags); + /* Fence signal list must now be empty */ + + /* + * The preemptive request and all other requests remaining on the + * engine's work-in-progress list must be marked as preempted, so + * the scheduler will reselect and resubmit them ... + */ + spin_lock_irqsave(&scheduler->lock, flags); + + { + struct drm_i915_gem_request *req, *next; + + list_for_each_entry_safe(req, next, &engine->request_list, list) { + struct i915_scheduler_queue_entry *node; + + node = req->scheduler_qe; + if (WARN_ON(req->engine != engine)) + continue; + if (i915_gem_request_completed(req)) + continue; + /* Let's hope there aren't any untracked nodes here! */ + if (WARN_ON(!node)) + continue; + + if (node->status == I915_SQS_PREEMPTED) { + /* Already processed in _notify() above */ + preemptive += 1; + preq = req; + pnode = req->scheduler_qe; + } else if (!WARN_ON(!I915_SQS_IS_FLYING(node))) { + preempted += 1; + node->status = I915_SQS_PREEMPTED; + trace_i915_scheduler_unfly(engine, node); + trace_i915_scheduler_node_state_change(engine, node); + /* Empty the preempted ringbuffer */ + intel_lr_context_resync(req->ctx, engine, false); + } + + i915_gem_request_dequeue(req); + } + } + + /* We should have found exactly ONE preemptive request */ + WARN(preemptive != 1, "Got unexpected preemptive count II: %d!\n", + preemptive); + stats = &scheduler->stats[engine->id]; + stats->preempted += preempted; + if (stats->max_preempted < preempted) + stats->max_preempted = preempted; + + { + /* XXX: Sky should be empty now */ + struct i915_scheduler_queue_entry *node; + + for_each_scheduler_node(node, engine->id) + WARN_ON(I915_SQS_IS_FLYING(node)); + } + + /* Anything else to do here ... ? */ + + /* + * Postprocessing complete; the scheduler is now back in + * normal non-preemptive state and can submit more requests + */ + scheduler->flags[engine->id] &= ~(I915_SF_PREEMPTING|I915_SF_PREEMPTED); + + spin_unlock_irqrestore(&scheduler->lock, flags); + + /* XXX: Should be nothing outstanding on request list */ + { + struct drm_i915_gem_request *req; + + list_for_each_entry(req, &engine->request_list, list) + WARN_ON(!i915_gem_request_completed(req)); + } + + /* Anything else to do here ... ? */ + if (!WARN_ON(pnode == NULL || preq == NULL)) { + WARN_ON(pnode->params.request != preq); + WARN_ON(preq->scheduler_qe != pnode); + WARN_ON(preq->seqno); + + /* + * FIXME: assign a new reserved seqno here to ensure + * we don't relaunch this request with the same seqno + * FIXME: can we just clear it here instead? + */ + if (dev_priv->next_seqno == 0) + dev_priv->next_seqno = 1; + dev_priv->last_seqno = dev_priv->next_seqno++; + DRM_DEBUG_DRIVER("reassigning reserved seqno %08x->%08x, (seqno %08x, uniq %d)\n", + preq->reserved_seqno, dev_priv->last_seqno, + preq->seqno, preq->uniq); + preq->reserved_seqno = dev_priv->last_seqno; + + /* FIXME: don't sleep, don't empty context? */ + msleep(1); + /* Empty the preempted ringbuffer */ + intel_lr_context_resync(preq->ctx, engine, false); + } + + mutex_unlock(&engine->dev->struct_mutex); +} + +noinline static bool i915_scheduler_remove(struct i915_scheduler *scheduler, struct intel_engine_cs *engine, struct list_head *remove) @@ -1418,12 +1581,17 @@ void i915_scheduler_work_handler(struct work_struct *work) { struct intel_engine_cs *engine; struct drm_i915_private *dev_priv; + struct i915_scheduler *scheduler; int i; dev_priv = container_of(work, struct drm_i915_private, mm.scheduler_work); + scheduler = dev_priv->scheduler; - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv, i) { + if (scheduler->flags[engine->id] & I915_SF_PREEMPTED) + i915_scheduler_preemption_postprocess(engine); i915_scheduler_process_work(engine); + } } /** |