summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/sched_policy.c
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2017-06-16 10:03:00 +0300
committerJani Nikula <jani.nikula@intel.com>2017-06-16 10:03:01 +0300
commit7dfb9ba33f6f3dee710f47cddf0a098d20afbc25 (patch)
tree3d11a018882c04091aa7ba013276c05ce8cce094 /drivers/gpu/drm/i915/gvt/sched_policy.c
parent9a30a26122c3fb249a4f509fb2253de259e58cd0 (diff)
parent615c16a9d8649b9894592d11bc393e684b11e2ea (diff)
Merge tag 'gvt-next-2017-06-08' of https://github.com/01org/gvt-linux into drm-intel-next-queued
gvt-next-2017-06-08 First gvt-next pull for 4.13: - optimization for per-VM mmio save/restore (Changbin) - optimization for mmio hash table (Changbin) - scheduler optimization with event (Ping) - vGPU reset refinement (Fred) - other misc refactor and cleanups, etc. Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170608093547.bjgs436e3iokrzdm@zhen-hp.sh.intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/sched_policy.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index f25ff133865f..436377da41ba 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -202,11 +202,6 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct vgpu_sched_data *vgpu_data;
struct intel_vgpu *vgpu = NULL;
- static uint64_t timer_check;
-
- if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
- gvt_balance_timeslice(sched_data);
-
/* no active vgpu or has already had a target */
if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
goto out;
@@ -231,9 +226,19 @@ out:
void intel_gvt_schedule(struct intel_gvt *gvt)
{
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+ static uint64_t timer_check;
mutex_lock(&gvt->lock);
+
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
+ (void *)&gvt->service_request)) {
+ if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
+ gvt_balance_timeslice(sched_data);
+ }
+ clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
+
tbs_sched_func(sched_data);
+
mutex_unlock(&gvt->lock);
}
@@ -303,8 +308,20 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
+ struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
+ int ring_id;
+
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
+
+ spin_lock_bh(&scheduler->mmio_context_lock);
+ for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+ if (scheduler->engine_owner[ring_id] == vgpu) {
+ intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+ scheduler->engine_owner[ring_id] = NULL;
+ }
+ }
+ spin_unlock_bh(&scheduler->mmio_context_lock);
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)