diff options
author | Joonas Lahtinen <joonas.lahtinen@linux.intel.com> | 2019-05-21 14:37:37 +0300 |
---|---|---|
committer | Joonas Lahtinen <joonas.lahtinen@linux.intel.com> | 2019-05-21 14:37:38 +0300 |
commit | 57cb853d1d5b07ed4e4647ad61b0c16a9c21996e (patch) | |
tree | 797f837a903efdf07d5295e13018502f58c12c50 | |
parent | a491cc8e1597ea25803191cded49d3686702a406 (diff) | |
parent | 591c39ffac4ab1ddf2ea6d49331cb614e0682b28 (diff) |
Merge tag 'gvt-fixes-2019-05-21' of https://github.com/intel/gvt-linux into drm-intel-fixesdrm-intel-fixes-2019-05-23
gvt-fixes-2019-05-21
- vGPU reset fix with sane init breadcrumb (Weinan)
- Fix TRTT handling to use context state (Yan)
- Fix one error return (Dan)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521062408.GH12913@zhen-hp.sh.intel.com
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cmd_parser.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio_context.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 23 |
5 files changed, 42 insertions, 37 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index ab002cfd3cab..5cb59c0b4bbe 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -896,12 +896,16 @@ static int cmd_reg_handler(struct parser_exec_state *s, } /* TODO - * Right now only scan LRI command on KBL and in inhibit context. - * It's good enough to support initializing mmio by lri command in - * vgpu inhibit context on KBL. + * In order to let workload with inhibit context to generate + * correct image data into memory, vregs values will be loaded to + * hw via LRIs in the workload with inhibit context. But as + * indirect context is loaded prior to LRIs in workload, we don't + * want reg values specified in indirect context overwritten by + * LRIs in workloads. So, when scanning an indirect context, we + * update reg values in it into vregs, so LRIs in workload with + * inhibit context will restore with correct values */ - if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv) - || IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) && + if (IS_GEN(gvt->dev_priv, 9) && intel_gvt_mmio_is_in_ctx(gvt, offset) && !strncmp(cmd, "lri", 3)) { intel_gvt_hypervisor_read_gpa(s->vgpu, diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 08c74e65836b..244ad1729764 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1076,8 +1076,10 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( } else { int type = get_next_pt_type(we->type); - if (!gtt_type_is_pt(type)) + if (!gtt_type_is_pt(type)) { + ret = -EINVAL; goto err; + } spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 90673fca792f..e09bd6e0cc4d 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1364,7 +1364,6 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u32 trtte = *(u32 *)p_data; if ((trtte & 1) && (trtte & (1 << 1)) == 0) { @@ -1373,11 +1372,6 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, return -EINVAL; } write_vreg(vgpu, offset, p_data, bytes); - /* TRTTE is not per-context */ - - mmio_hw_access_pre(dev_priv); - I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); - mmio_hw_access_post(dev_priv); return 0; } @@ -1385,15 +1379,6 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - u32 val = *(u32 *)p_data; - - if (val & 1) { - /* unblock hw logic */ - mmio_hw_access_pre(dev_priv); - I915_WRITE(_MMIO(offset), val); - mmio_hw_access_post(dev_priv); - } write_vreg(vgpu, offset, p_data, bytes); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index edf6d646eb25..90bb3df0db50 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -108,12 +108,13 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ - {RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ - {RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ - {RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */ - {RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */ - {RCS0, TRVADR, 0, false}, /* 0x4df0 */ - {RCS0, TRTTE, 0, false}, /* 0x4df4 */ + {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */ + {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */ + {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */ + {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */ + {RCS0, TRVADR, 0, true}, /* 0x4df0 */ + {RCS0, TRTTE, 0, true}, /* 0x4df4 */ + {RCS0, _MMIO(0x4dfc), 0, true}, {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ @@ -392,10 +393,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if (ring_id == RCS0 && - (IS_KABYLAKE(dev_priv) || - IS_BROXTON(dev_priv) || - IS_COFFEELAKE(dev_priv))) + if (ring_id == RCS0 && IS_GEN(dev_priv, 9)) return; if (!pre && !gen9_render_mocs.initialized) @@ -470,11 +468,10 @@ static void switch_mmio(struct intel_vgpu *pre, continue; /* * No need to do save or restore of the mmio which is in context - * state image on kabylake, it's initialized by lri command and + * state image on gen9, it's initialized by lri command and * save or restore with context together. */ - if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv) - || IS_COFFEELAKE(dev_priv)) && mmio->in_context) + if (IS_GEN(dev_priv, 9) && mmio->in_context) continue; // save diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 7c99bbc3e2b8..13632dba8b2a 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -298,12 +298,29 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) struct i915_request *req = workload->req; void *shadow_ring_buffer_va; u32 *cs; + int err; - if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915) - || IS_COFFEELAKE(req->i915)) - && is_inhibit_context(req->hw_context)) + if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context)) intel_vgpu_restore_inhibit_context(vgpu, req); + /* + * To track whether a request has started on HW, we can emit a + * breadcrumb at the beginning of the request and check its + * timeline's HWSP to see if the breadcrumb has advanced past the + * start of this request. Actually, the request must have the + * init_breadcrumb if its timeline set has_init_bread_crumb, or the + * scheduler might get a wrong state of it during reset. Since the + * requests from gvt always set the has_init_breadcrumb flag, here + * need to do the emit_init_breadcrumb for all the requests. + */ + if (req->engine->emit_init_breadcrumb) { + err = req->engine->emit_init_breadcrumb(req); + if (err) { + gvt_vgpu_err("fail to emit init breadcrumb\n"); + return err; + } + } + /* allocate shadow ring buffer */ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); if (IS_ERR(cs)) { |