diff options
author | Dave Airlie <airlied@redhat.com> | 2018-03-23 06:19:27 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-03-23 06:19:27 +1000 |
commit | b4eec0fa537165efc3265cdbb4bac06e6bdaf596 (patch) | |
tree | 1ab3efa433a3e3fd82ce24e958bf4984a3aa3b16 | |
parent | 2a2553cc45c889f15a1df0355891a809f17ca43d (diff) | |
parent | 8c5cb3c1c5701df8b8bcefb151547148378841e5 (diff) |
Merge tag 'drm-intel-next-fixes-2018-03-22' of git://anongit.freedesktop.org/drm/drm-intel into drm-nextdrm-misc-next-fixes-2018-03-28
GVT regression fix that caused guest VM GPU hang.
Fix for race conditions in declaring GPU wedged (hit in CI).
* tag 'drm-intel-next-fixes-2018-03-22' of git://anongit.freedesktop.org/drm/drm-intel:
drm/i915/gvt: force to set all context control bits from guest
drm/i915/gvt: Update PDPs after a vGPU mm object is pinned.
drm/i915/gvt: Invalidate vGPU PPGTT mm objects during a vGPU reset.
drm/i915/kvmgt: Handle kzalloc failure
drm/i915/gvt: fix spelling mistake: "destoried" -> "destroyed"
drm/i915/gvt: Remove reduntant printing of untracked mmio
drm/i915/pmu: Work around compiler warnings on some kernel configs
drm/i915: Only call tasklet_kill() on the first prepare_reset
drm/i915: Wrap engine->schedule in RCU locks for set-wedge protection
drm/i915/icl: do not save DDI A/E sharing bit for ICL
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/vgpu.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_pmu.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_request.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ddi.c | 9 |
10 files changed, 119 insertions, 41 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 0a100a288e6d..d29281231507 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2046,7 +2046,7 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) } if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head))) - gvt_err("vgpu ppgtt mm is not fully destoried\n"); + gvt_err("vgpu ppgtt mm is not fully destroyed\n"); if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) { gvt_err("Why we still has spt not freed?\n"); @@ -2291,6 +2291,28 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) } /** + * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances + * @vgpu: a vGPU + * + * This function is called when invalidate all PPGTT instances of a vGPU. + * + */ +void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) +{ + struct list_head *pos, *n; + struct intel_vgpu_mm *mm; + + list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { + mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); + if (mm->type == INTEL_GVT_MM_PPGTT) { + list_del_init(&mm->ppgtt_mm.lru_list); + if (mm->ppgtt_mm.shadowed) + invalidate_ppgtt_mm(mm); + } + } +} + +/** * intel_vgpu_reset_ggtt - reset the GGTT entry * @vgpu: a vGPU * diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index e831507e17c3..a8b369cd352b 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -194,6 +194,7 @@ struct intel_vgpu_gtt { extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); +void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 112f2ec7c25f..8c5d5d005854 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1767,6 +1767,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(CURBASE(PIPE_B), D_ALL); MMIO_D(CURBASE(PIPE_C), D_ALL); + MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL); + MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL); + MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL); + MMIO_D(_MMIO(0x700ac), D_ALL); MMIO_D(_MMIO(0x710ac), D_ALL); MMIO_D(_MMIO(0x720ac), D_ALL); @@ -2228,6 +2232,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL); MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL); + MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL); @@ -2559,6 +2564,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(WM_MISC, D_BDW); MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW); + MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); MMIO_D(_MMIO(0x66c04), D_BDW_PLUS); @@ -2787,6 +2793,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x70380), D_SKL_PLUS); MMIO_D(_MMIO(0x71380), D_SKL_PLUS); MMIO_D(_MMIO(0x72380), D_SKL_PLUS); + MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); MMIO_D(_MMIO(0x7039c), D_SKL_PLUS); MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL); @@ -2801,7 +2808,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); + MMIO_D(RPM_CONFIG0, D_SKL_PLUS); MMIO_D(_MMIO(0xd08), D_SKL_PLUS); + MMIO_D(RC6_LOCATION, D_SKL_PLUS); MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL); MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 8a428678e4b5..520fe3d0a882 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -184,7 +184,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) return NULL; } -static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, +static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, dma_addr_t dma_addr) { struct gvt_dma *new, *itr; @@ -192,7 +192,7 @@ static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); if (!new) - return; + return -ENOMEM; new->vgpu = vgpu; new->gfn = gfn; @@ -229,6 +229,7 @@ static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache); vgpu->vdev.nr_cache_entries++; + return 0; } static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, @@ -1586,11 +1587,12 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, entry = __gvt_cache_find_gfn(info->vgpu, gfn); if (!entry) { ret = gvt_dma_map_page(vgpu, gfn, dma_addr); - if (ret) { - mutex_unlock(&info->vgpu->vdev.cache_lock); - return ret; - } - __gvt_cache_add(info->vgpu, gfn, *dma_addr); + if (ret) + goto err_unlock; + + ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr); + if (ret) + goto err_unmap; } else { kref_get(&entry->ref); *dma_addr = entry->dma_addr; @@ -1598,6 +1600,12 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, mutex_unlock(&info->vgpu->vdev.cache_lock); return 0; + +err_unmap: + gvt_dma_unmap_page(vgpu, gfn, *dma_addr); +err_unlock: + mutex_unlock(&info->vgpu->vdev.cache_lock); + return ret; } static void __gvt_dma_release(struct kref *ref) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 9b92b4e25a20..a55b4975c154 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -52,6 +52,29 @@ static void set_context_pdp_root_pointer( pdp_pair[i].val = pdp[7 - i]; } +static void update_shadow_pdps(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + int ring_id = workload->ring_id; + struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; + struct drm_i915_gem_object *ctx_obj = + shadow_ctx->engine[ring_id].state->obj; + struct execlist_ring_context *shadow_ring_context; + struct page *page; + + if (WARN_ON(!workload->shadow_mm)) + return; + + if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) + return; + + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + shadow_ring_context = kmap(page); + set_context_pdp_root_pointer(shadow_ring_context, + (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); + kunmap(page); +} + static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -101,8 +124,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) #define COPY_REG(name) \ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) +#define COPY_REG_MASKED(name) {\ + intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + + RING_CTX_OFF(name.val),\ + &shadow_ring_context->name.val, 4);\ + shadow_ring_context->name.val |= 0xffff << 16;\ + } - COPY_REG(ctx_ctrl); + COPY_REG_MASKED(ctx_ctrl); COPY_REG(ctx_timestamp); if (ring_id == RCS) { @@ -111,9 +140,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) COPY_REG(rcs_indirect_ctx_offset); } #undef COPY_REG - - set_context_pdp_root_pointer(shadow_ring_context, - (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); +#undef COPY_REG_MASKED intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa + @@ -509,6 +536,8 @@ static int prepare_workload(struct intel_vgpu_workload *workload) return ret; } + update_shadow_pdps(workload); + ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { gvt_vgpu_err("fail to vgpu sync oos pages\n"); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 41f76e86aa1f..2e0a02a80fe4 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -522,6 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); + intel_vgpu_invalidate_ppgtt(vgpu); /*fence will not be reset during virtual reset */ if (dmlr) { intel_vgpu_reset_gtt(vgpu); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a5bd07338b46..0359d6f870b4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -471,10 +471,11 @@ static void __fence_set_priority(struct dma_fence *fence, int prio) rq = to_request(fence); engine = rq->engine; - if (!engine->schedule) - return; - engine->schedule(rq, prio); + rcu_read_lock(); + if (engine->schedule) + engine->schedule(rq, prio); + rcu_read_unlock(); } static void fence_set_priority(struct dma_fence *fence, int prio) @@ -2939,8 +2940,16 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) * calling engine->init_hw() and also writing the ELSP. * Turning off the execlists->tasklet until the reset is over * prevents the race. + * + * Note that this needs to be a single atomic operation on the + * tasklet (flush existing tasks, prevent new tasks) to prevent + * a race between reset and set-wedged. It is not, so we do the best + * we can atm and make sure we don't lock the machine up in the more + * common case of recursively being called from set-wedged from inside + * i915_reset. */ - tasklet_kill(&engine->execlists.tasklet); + if (!atomic_read(&engine->execlists.tasklet.count)) + tasklet_kill(&engine->execlists.tasklet); tasklet_disable(&engine->execlists.tasklet); /* @@ -3214,8 +3223,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) */ for_each_engine(engine, i915, id) { i915_gem_reset_prepare_engine(engine); + engine->submit_request = nop_submit_request; + engine->schedule = NULL; } + i915->caps.scheduler = 0; /* * Make sure no one is running the old callback before we proceed with @@ -3233,11 +3245,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) * start to complete all requests. */ engine->submit_request = nop_complete_submit_request; - engine->schedule = NULL; } - i915->caps.scheduler = 0; - /* * Make sure no request can slip through without getting completed by * either this call here to intel_engine_init_global_seqno, or the one diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 964467b03e4d..d8feb9053e0c 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -433,7 +433,7 @@ static u64 __get_rc6(struct drm_i915_private *i915) return val; } -static u64 get_rc6(struct drm_i915_private *i915, bool locked) +static u64 get_rc6(struct drm_i915_private *i915) { #if IS_ENABLED(CONFIG_PM) unsigned long flags; @@ -449,8 +449,7 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked) * previously. */ - if (!locked) - spin_lock_irqsave(&i915->pmu.lock, flags); + spin_lock_irqsave(&i915->pmu.lock, flags); if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; @@ -459,12 +458,10 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked) val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; } - if (!locked) - spin_unlock_irqrestore(&i915->pmu.lock, flags); + spin_unlock_irqrestore(&i915->pmu.lock, flags); } else { struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; - unsigned long flags2; /* * We are runtime suspended. @@ -473,10 +470,8 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked) * on top of the last known real value, as the approximated RC6 * counter value. */ - if (!locked) - spin_lock_irqsave(&i915->pmu.lock, flags); - - spin_lock_irqsave(&kdev->power.lock, flags2); + spin_lock_irqsave(&i915->pmu.lock, flags); + spin_lock(&kdev->power.lock); if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) i915->pmu.suspended_jiffies_last = @@ -486,14 +481,13 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked) i915->pmu.suspended_jiffies_last; val += jiffies - kdev->power.accounting_timestamp; - spin_unlock_irqrestore(&kdev->power.lock, flags2); + spin_unlock(&kdev->power.lock); val = jiffies_to_nsecs(val); val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; - if (!locked) - spin_unlock_irqrestore(&i915->pmu.lock, flags); + spin_unlock_irqrestore(&i915->pmu.lock, flags); } return val; @@ -502,7 +496,7 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked) #endif } -static u64 __i915_pmu_event_read(struct perf_event *event, bool locked) +static u64 __i915_pmu_event_read(struct perf_event *event) { struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); @@ -540,7 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event, bool locked) val = count_interrupts(i915); break; case I915_PMU_RC6_RESIDENCY: - val = get_rc6(i915, locked); + val = get_rc6(i915); break; } } @@ -555,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event) again: prev = local64_read(&hwc->prev_count); - new = __i915_pmu_event_read(event, false); + new = __i915_pmu_event_read(event); if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) goto again; @@ -605,14 +599,14 @@ static void i915_pmu_enable(struct perf_event *event) engine->pmu.enable_count[sample]++; } + spin_unlock_irqrestore(&i915->pmu.lock, flags); + /* * Store the current counter value so we can report the correct delta * for all listeners. Even when the event was already enabled and has * an existing non-zero value. */ - local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true)); - - spin_unlock_irqrestore(&i915->pmu.lock, flags); + local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); } static void i915_pmu_disable(struct perf_event *event) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index d437beac3969..282f57630cc1 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1081,8 +1081,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) * decide whether to preempt the entire chain so that it is ready to * run at the earliest possible convenience. */ + rcu_read_lock(); if (engine->schedule) engine->schedule(request, request->ctx->priority); + rcu_read_unlock(); local_bh_disable(); i915_sw_fence_commit(&request->submit); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index ac8fc2a44ac6..dbcf1a0586f9 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -3080,9 +3080,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->cloneable = 0; - intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & - (DDI_BUF_PORT_REVERSAL | - DDI_A_4_LANES); + if (INTEL_GEN(dev_priv) >= 11) + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & + DDI_BUF_PORT_REVERSAL; + else + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & + (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); intel_dig_port->dp.output_reg = INVALID_MMIO_REG; intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); |