diff options
author | Dave Airlie <airlied@redhat.com> | 2017-11-02 11:33:57 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-11-02 11:33:57 +1000 |
commit | 96ffbbf936298cad0df289ed1c09052493095b7f (patch) | |
tree | 47548e12527c8843c7b4d3e4158a7ce7350af7e7 | |
parent | 25dd1aa3b4cf0c361147aa45ff4dd1d335259ac1 (diff) | |
parent | bb5cf3386327c9cb5ca3fbb85242e940751649c8 (diff) |
Merge tag 'drm-intel-fixes-2017-11-01' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
Fixes for Stable:
- Fix KBL Blank Screen (Jani)
- Fix FIFO Underrun on SNB (Maarten)
Other fixes:
- Fix GPU Hang on i915gm (Chris)
- Fix gem_tiled_pread_pwrite IGT case (Chris)
- Cancel modeset retry work during modeset clean-up (Manasi)
* tag 'drm-intel-fixes-2017-11-01' of git://anongit.freedesktop.org/drm/drm-intel:
drm/i915: Check incoming alignment for unfenced buffers (on i915gm)
drm/i915: Hold rcu_read_lock when iterating over the radixtree (vma idr)
drm/i915: Hold rcu_read_lock when iterating over the radixtree (objects)
drm/i915/edp: read edp display control registers unconditionally
drm/i915: Do not rely on wm preservation for ILK watermarks
drm/i915: Cancel the modeset retry work during modeset cleanup
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 51 |
7 files changed, 57 insertions, 35 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 32e857dc507c..dc1faa49687d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2214,8 +2214,10 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) struct radix_tree_iter iter; void __rcu **slot; + rcu_read_lock(); radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) radix_tree_delete(&obj->mm.get_page.radix, iter.index); + rcu_read_unlock(); } void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 58a2a44f88bd..8afd2ce59b8d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -104,6 +104,7 @@ static void lut_close(struct i915_gem_context *ctx) kmem_cache_free(ctx->i915->luts, lut); } + rcu_read_lock(); radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); struct drm_i915_gem_object *obj = vma->obj; @@ -115,6 +116,7 @@ static void lut_close(struct i915_gem_context *ctx) __i915_gem_object_release_unless_active(obj); } + rcu_read_unlock(); } static void i915_gem_context_free(struct i915_gem_context *ctx) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 92437f455b43..4ac454ae54d7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -337,6 +337,10 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, (vma->node.start + vma->node.size - 1) >> 32) return true; + if (flags & __EXEC_OBJECT_NEEDS_MAP && + !i915_vma_is_map_and_fenceable(vma)) + return true; + return false; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5c7828c52d12..5ebdb63330dd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15227,6 +15227,23 @@ void intel_connector_unregister(struct drm_connector *connector) intel_panel_destroy_backlight(connector); } +static void intel_hpd_poll_fini(struct drm_device *dev) +{ + struct intel_connector *connector; + struct drm_connector_list_iter conn_iter; + + /* First disable polling... */ + drm_kms_helper_poll_fini(dev); + + /* Then kill the work that may have been queued by hpd. */ + drm_connector_list_iter_begin(dev, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + if (connector->modeset_retry_work.func) + cancel_work_sync(&connector->modeset_retry_work); + } + drm_connector_list_iter_end(&conn_iter); +} + void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -15247,7 +15264,7 @@ void intel_modeset_cleanup(struct drm_device *dev) * Due to the hpd irq storm handling the hotplug work can re-arm the * poll handlers. Hence disable polling after hpd handling is shut down. */ - drm_kms_helper_poll_fini(dev); + intel_hpd_poll_fini(dev); /* poll work can call into fbdev, hence clean that up afterwards */ intel_fbdev_fini(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 203198659ab2..09f274419eea 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -3731,9 +3731,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) } - /* Read the eDP Display control capabilities registers */ - if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && - drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, + /* + * Read the eDP display control registers. + * + * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in + * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it + * set, but require eDP 1.4+ detection (e.g. for supported link rates + * method). The display control registers should read zero if they're + * not supported anyway. + */ + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == sizeof(intel_dp->edp_dpcd)) DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index fa47285918f4..79fbaf78f604 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -496,7 +496,6 @@ struct intel_crtc_scaler_state { struct intel_pipe_wm { struct intel_wm_level wm[5]; - struct intel_wm_level raw_wm[5]; uint32_t linetime; bool fbc_wm_enabled; bool pipe_enabled; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a09f8ff6aff..cb950752c346 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2716,9 +2716,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_crtc *intel_crtc, int level, struct intel_crtc_state *cstate, - struct intel_plane_state *pristate, - struct intel_plane_state *sprstate, - struct intel_plane_state *curstate, + const struct intel_plane_state *pristate, + const struct intel_plane_state *sprstate, + const struct intel_plane_state *curstate, struct intel_wm_level *result) { uint16_t pri_latency = dev_priv->wm.pri_latency[level]; @@ -3038,28 +3038,24 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) struct intel_pipe_wm *pipe_wm; struct drm_device *dev = state->dev; const struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_plane *intel_plane; - struct intel_plane_state *pristate = NULL; - struct intel_plane_state *sprstate = NULL; - struct intel_plane_state *curstate = NULL; + struct drm_plane *plane; + const struct drm_plane_state *plane_state; + const struct intel_plane_state *pristate = NULL; + const struct intel_plane_state *sprstate = NULL; + const struct intel_plane_state *curstate = NULL; int level, max_level = ilk_wm_max_level(dev_priv), usable_level; struct ilk_wm_maximums max; pipe_wm = &cstate->wm.ilk.optimal; - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - struct intel_plane_state *ps; - - ps = intel_atomic_get_existing_plane_state(state, - intel_plane); - if (!ps) - continue; + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) { + const struct intel_plane_state *ps = to_intel_plane_state(plane_state); - if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY) + if (plane->type == DRM_PLANE_TYPE_PRIMARY) pristate = ps; - else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) + else if (plane->type == DRM_PLANE_TYPE_OVERLAY) sprstate = ps; - else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) + else if (plane->type == DRM_PLANE_TYPE_CURSOR) curstate = ps; } @@ -3081,11 +3077,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) if (pipe_wm->sprites_scaled) usable_level = 0; - ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, - pristate, sprstate, curstate, &pipe_wm->raw_wm[0]); - memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); - pipe_wm->wm[0] = pipe_wm->raw_wm[0]; + ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, + pristate, sprstate, curstate, &pipe_wm->wm[0]); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) pipe_wm->linetime = hsw_compute_linetime_wm(cstate); @@ -3095,8 +3089,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) ilk_compute_wm_reg_maximums(dev_priv, 1, &max); - for (level = 1; level <= max_level; level++) { - struct intel_wm_level *wm = &pipe_wm->raw_wm[level]; + for (level = 1; level <= usable_level; level++) { + struct intel_wm_level *wm = &pipe_wm->wm[level]; ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, pristate, sprstate, curstate, wm); @@ -3106,13 +3100,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) * register maximums since such watermarks are * always invalid. */ - if (level > usable_level) - continue; - - if (ilk_validate_wm_level(level, &max, wm)) - pipe_wm->wm[level] = *wm; - else - usable_level = level; + if (!ilk_validate_wm_level(level, &max, wm)) { + memset(wm, 0, sizeof(*wm)); + break; + } } return 0; |