diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 1723 |
1 files changed, 944 insertions, 779 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2113f401f0ba..12ff79594bc1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -48,6 +48,11 @@ #include <linux/reservation.h> #include <linux/dma-buf.h> +static bool is_mmio_work(struct intel_flip_work *work) +{ + return work->mmio_work.func; +} + /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { DRM_FORMAT_C8, @@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); static void intel_modeset_setup_hw_state(struct drm_device *dev); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); +static int ilk_max_pixel_rate(struct drm_atomic_state *state); +static int broxton_calc_cdclk(int max_pixclk); -typedef struct { - int min, max; -} intel_range_t; - -typedef struct { - int dot_limit; - int p2_slow, p2_fast; -} intel_p2_t; - -typedef struct intel_limit intel_limit_t; struct intel_limit { - intel_range_t dot, vco, n, m, m1, m2, p, p1; - intel_p2_t p2; + struct { + int min, max; + } dot, vco, n, m, m1, m2, p, p1; + + struct { + int dot_limit; + int p2_slow, p2_fast; + } p2; }; /* returns HPLL frequency in kHz */ @@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv) static int intel_vlv_hrawclk(struct drm_i915_private *dev_priv) { + /* RAWCLK_FREQ_VLV register updated from power well code */ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL); } @@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv) } } -static void intel_update_rawclk(struct drm_i915_private *dev_priv) +void intel_update_rawclk(struct drm_i915_private *dev_priv) { if (HAS_PCH_SPLIT(dev_priv)) dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); @@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv, return 270000; } -static const intel_limit_t intel_limits_i8xx_dac = { +static const struct intel_limit intel_limits_i8xx_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = { .p2_slow = 4, .p2_fast = 2 }, }; -static const intel_limit_t intel_limits_i8xx_dvo = { +static const struct intel_limit intel_limits_i8xx_dvo = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = { .p2_slow = 4, .p2_fast = 4 }, }; -static const intel_limit_t intel_limits_i8xx_lvds = { +static const struct intel_limit intel_limits_i8xx_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = { .p2_slow = 14, .p2_fast = 7 }, }; -static const intel_limit_t intel_limits_i9xx_sdvo = { +static const struct intel_limit intel_limits_i9xx_sdvo = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, @@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_i9xx_lvds = { +static const struct intel_limit intel_limits_i9xx_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, @@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = { }; -static const intel_limit_t intel_limits_g4x_sdvo = { +static const struct intel_limit intel_limits_g4x_sdvo = { .dot = { .min = 25000, .max = 270000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, @@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = { }, }; -static const intel_limit_t intel_limits_g4x_hdmi = { +static const struct intel_limit intel_limits_g4x_hdmi = { .dot = { .min = 22000, .max = 400000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, @@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_g4x_single_channel_lvds = { +static const struct intel_limit intel_limits_g4x_single_channel_lvds = { .dot = { .min = 20000, .max = 115000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, @@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = { }, }; -static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { +static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { .dot = { .min = 80000, .max = 224000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, @@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { }, }; -static const intel_limit_t intel_limits_pineview_sdvo = { +static const struct intel_limit intel_limits_pineview_sdvo = { .dot = { .min = 20000, .max = 400000}, .vco = { .min = 1700000, .max = 3500000 }, /* Pineview's Ncounter is a ring counter */ @@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_pineview_lvds = { +static const struct intel_limit intel_limits_pineview_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1700000, .max = 3500000 }, .n = { .min = 3, .max = 6 }, @@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = { * We calculate clock using (register_value + 2) for N/M1/M2, so here * the range value for them is (actual_value - 2). */ -static const intel_limit_t intel_limits_ironlake_dac = { +static const struct intel_limit intel_limits_ironlake_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 5 }, @@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_ironlake_single_lvds = { +static const struct intel_limit intel_limits_ironlake_single_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = { .p2_slow = 14, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_ironlake_dual_lvds = { +static const struct intel_limit intel_limits_ironlake_dual_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = { }; /* LVDS 100mhz refclk limits. */ -static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { +static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 2 }, @@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { .p2_slow = 14, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { +static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { .p2_slow = 7, .p2_fast = 7 }, }; -static const intel_limit_t intel_limits_vlv = { +static const struct intel_limit intel_limits_vlv = { /* * These are the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast @@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = { .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ }; -static const intel_limit_t intel_limits_chv = { +static const struct intel_limit intel_limits_chv = { /* * These are the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast @@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = { .p2 = { .p2_slow = 1, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_bxt = { +static const struct intel_limit intel_limits_bxt = { /* FIXME: find real dot limits */ .dot = { .min = 0, .max = INT_MAX }, .vco = { .min = 4800000, .max = 6700000 }, @@ -581,7 +585,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, * divided-down version of it. */ /* m1 is reserved as 0 in Pineview, n is a ring counter */ -static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) +static int pnv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; @@ -598,7 +602,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); } -static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) +static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = i9xx_dpll_compute_m(clock); clock->p = clock->p1 * clock->p2; @@ -610,7 +614,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) return clock->dot; } -static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) +static int vlv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; @@ -622,7 +626,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) return clock->dot / 5; } -int chv_calc_dpll_params(int refclk, intel_clock_t *clock) +int chv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; @@ -642,8 +646,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock) */ static bool intel_PLL_is_valid(struct drm_device *dev, - const intel_limit_t *limit, - const intel_clock_t *clock) + const struct intel_limit *limit, + const struct dpll *clock) { if (clock->n < limit->n.min || limit->n.max < clock->n) INTELPllInvalid("n out of range\n"); @@ -678,7 +682,7 @@ static bool intel_PLL_is_valid(struct drm_device *dev, } static int -i9xx_select_p2_div(const intel_limit_t *limit, +i9xx_select_p2_div(const struct intel_limit *limit, const struct intel_crtc_state *crtc_state, int target) { @@ -713,13 +717,13 @@ i9xx_select_p2_div(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -i9xx_find_best_dpll(const intel_limit_t *limit, +i9xx_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); @@ -770,13 +774,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -pnv_find_best_dpll(const intel_limit_t *limit, +pnv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); @@ -825,13 +829,13 @@ pnv_find_best_dpll(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -g4x_find_best_dpll(const intel_limit_t *limit, +g4x_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int max_n; bool found = false; /* approximately equals target * 0.00585 */ @@ -877,8 +881,8 @@ g4x_find_best_dpll(const intel_limit_t *limit, * best configuration and error found so far. Return the calculated error. */ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, - const intel_clock_t *calculated_clock, - const intel_clock_t *best_clock, + const struct dpll *calculated_clock, + const struct dpll *best_clock, unsigned int best_error_ppm, unsigned int *error_ppm) { @@ -918,14 +922,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool -vlv_find_best_dpll(const intel_limit_t *limit, +vlv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; - intel_clock_t clock; + struct dpll clock; unsigned int bestppm = 1000000; /* min update 19.2 MHz */ int max_n = min(limit->n.max, refclk / 19200); @@ -977,15 +981,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool -chv_find_best_dpll(const intel_limit_t *limit, +chv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; unsigned int best_error_ppm; - intel_clock_t clock; + struct dpll clock; uint64_t m2; int found = false; @@ -1035,10 +1039,10 @@ chv_find_best_dpll(const intel_limit_t *limit, } bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, - intel_clock_t *best_clock) + struct dpll *best_clock) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_bxt; + const struct intel_limit *limit = &intel_limits_bxt; return chv_find_best_dpll(limit, crtc_state, target_clock, refclk, NULL, best_clock); @@ -1203,7 +1207,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, u32 val; /* ILK FDI PLL is always enabled */ - if (INTEL_INFO(dev_priv)->gen == 5) + if (IS_GEN5(dev_priv)) return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ @@ -2309,7 +2313,7 @@ err_pm: return ret; } -static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) +void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; @@ -3110,17 +3114,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -ENODEV; } -static void intel_complete_page_flips(struct drm_device *dev) +static void intel_complete_page_flips(struct drm_i915_private *dev_priv) { - struct drm_crtc *crtc; - - for_each_crtc(dev, crtc) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum plane plane = intel_crtc->plane; + struct intel_crtc *crtc; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip_plane(dev, plane); - } + for_each_intel_crtc(dev_priv->dev, crtc) + intel_finish_page_flip_cs(dev_priv, crtc->pipe); } static void intel_update_primary_planes(struct drm_device *dev) @@ -3143,41 +3142,39 @@ static void intel_update_primary_planes(struct drm_device *dev) } } -void intel_prepare_reset(struct drm_device *dev) +void intel_prepare_reset(struct drm_i915_private *dev_priv) { /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) return; - drm_modeset_lock_all(dev); + drm_modeset_lock_all(dev_priv->dev); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(dev); + intel_display_suspend(dev_priv->dev); } -void intel_finish_reset(struct drm_device *dev) +void intel_finish_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space * will get its events and not get stuck. */ - intel_complete_page_flips(dev); + intel_complete_page_flips(dev_priv); /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { /* * Flips in the rings have been nuked by the reset, * so update the base address of all primary @@ -3187,7 +3184,7 @@ void intel_finish_reset(struct drm_device *dev) * FIXME: Atomic will make this obsolete since we won't schedule * CS-based flips (which might get lost in gpu resets) any more. */ - intel_update_primary_planes(dev); + intel_update_primary_planes(dev_priv->dev); return; } @@ -3198,18 +3195,18 @@ void intel_finish_reset(struct drm_device *dev) intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(dev_priv->dev); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(dev); + intel_display_resume(dev_priv->dev); intel_hpd_init(dev_priv); - drm_modeset_unlock_all(dev); + drm_modeset_unlock_all(dev_priv->dev); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -3224,7 +3221,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) return false; spin_lock_irq(&dev->event_lock); - pending = to_intel_crtc(crtc)->unpin_work != NULL; + pending = to_intel_crtc(crtc)->flip_work != NULL; spin_unlock_irq(&dev->event_lock); return pending; @@ -3803,7 +3800,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) if (atomic_read(&crtc->unpin_work_count) == 0) continue; - if (crtc->unpin_work) + if (crtc->flip_work) intel_wait_for_vblank(dev, crtc->pipe); return true; @@ -3815,11 +3812,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) static void page_flip_completed(struct intel_crtc *intel_crtc) { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct intel_unpin_work *work = intel_crtc->unpin_work; + struct intel_flip_work *work = intel_crtc->flip_work; - /* ensure that the unpin work is consistent wrt ->pending. */ - smp_rmb(); - intel_crtc->unpin_work = NULL; + intel_crtc->flip_work = NULL; if (work->event) drm_crtc_send_vblank_event(&intel_crtc->base, work->event); @@ -3827,7 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) drm_crtc_vblank_put(&intel_crtc->base); wake_up_all(&dev_priv->pending_flip_queue); - queue_work(dev_priv->wq, &work->work); + queue_work(dev_priv->wq, &work->unpin_work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); @@ -3851,9 +3846,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) if (ret == 0) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - if (intel_crtc->unpin_work) { + work = intel_crtc->flip_work; + if (work && !is_mmio_work(work)) { WARN_ONCE(1, "Removing stuck page flip\n"); page_flip_completed(intel_crtc); } @@ -4281,8 +4278,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; - DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", - intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); + DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n", + intel_crtc->base.base.id, intel_crtc->base.name, + intel_crtc->pipe, SKL_CRTC_INDEX); return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), @@ -4312,9 +4310,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, bool force_detach = !fb || !plane_state->visible; - DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", - intel_plane->base.base.id, intel_crtc->pipe, - drm_plane_index(&intel_plane->base)); + DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n", + intel_plane->base.base.id, intel_plane->base.name, + intel_crtc->pipe, drm_plane_index(&intel_plane->base)); ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), @@ -4330,8 +4328,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, /* check colorkey */ if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { - DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", - intel_plane->base.base.id); + DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", + intel_plane->base.base.id, + intel_plane->base.name); return -EINVAL; } @@ -4350,8 +4349,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_VYUY: break; default: - DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", - intel_plane->base.base.id, fb->base.id, fb->pixel_format); + DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", + intel_plane->base.base.id, intel_plane->base.name, + fb->base.id, fb->pixel_format); return -EINVAL; } @@ -4648,7 +4648,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) intel_pre_disable_primary(&crtc->base); } - if (pipe_config->disable_cxsr) { + if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) { crtc->wm.cxsr_allowed = false; /* @@ -5269,21 +5269,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) return max_cdclk_freq*90/100; } +static int skl_calc_cdclk(int max_pixclk, int vco); + static void intel_update_max_cdclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; + int max_cdclk, vco; + + vco = dev_priv->skl_preferred_vco_freq; + WARN_ON(vco != 8100000 && vco != 8640000); + /* + * Use the lower (vco 8640) cdclk values as a + * first guess. skl_calc_cdclk() will correct it + * if the preferred vco is 8100 instead. + */ if (limit == SKL_DFSM_CDCLK_LIMIT_675) - dev_priv->max_cdclk_freq = 675000; + max_cdclk = 617143; else if (limit == SKL_DFSM_CDCLK_LIMIT_540) - dev_priv->max_cdclk_freq = 540000; + max_cdclk = 540000; else if (limit == SKL_DFSM_CDCLK_LIMIT_450) - dev_priv->max_cdclk_freq = 450000; + max_cdclk = 432000; else - dev_priv->max_cdclk_freq = 337500; + max_cdclk = 308571; + + dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); } else if (IS_BROXTON(dev)) { dev_priv->max_cdclk_freq = 624000; } else if (IS_BROADWELL(dev)) { @@ -5324,264 +5337,313 @@ static void intel_update_cdclk(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); - DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", - dev_priv->cdclk_freq); + + if (INTEL_GEN(dev_priv) >= 9) + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n", + dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco, + dev_priv->cdclk_pll.ref); + else + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", + dev_priv->cdclk_freq); /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. + * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): + * Programmng [sic] note: bit[9:2] should be programmed to the number + * of cdclk that generates 4MHz reference clock freq which is used to + * generate GMBus clock. This will vary with the cdclk freq. */ - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { - /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. - */ + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); - } +} - if (dev_priv->max_cdclk_freq == 0) - intel_update_max_cdclk(dev); +/* convert from kHz to .1 fixpoint MHz with -1MHz offset */ +static int skl_cdclk_decimal(int cdclk) +{ + return DIV_ROUND_CLOSEST(cdclk - 1000, 500); } -static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency) +static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { - uint32_t divider; - uint32_t ratio; - uint32_t current_freq; - int ret; + int ratio; + + if (cdclk == dev_priv->cdclk_pll.ref) + return 0; - /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ - switch (frequency) { + switch (cdclk) { + default: + MISSING_CASE(cdclk); case 144000: + case 288000: + case 384000: + case 576000: + ratio = 60; + break; + case 624000: + ratio = 65; + break; + } + + return dev_priv->cdclk_pll.ref * ratio; +} + +static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(BXT_DE_PLL_ENABLE, 0); + + /* Timeout 200us */ + if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) + DRM_ERROR("timeout waiting for DE PLL unlock\n"); + + dev_priv->cdclk_pll.vco = 0; +} + +static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) +{ + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref); + u32 val; + + val = I915_READ(BXT_DE_PLL_CTL); + val &= ~BXT_DE_PLL_RATIO_MASK; + val |= BXT_DE_PLL_RATIO(ratio); + I915_WRITE(BXT_DE_PLL_CTL, val); + + I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); + + /* Timeout 200us */ + if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) + DRM_ERROR("timeout waiting for DE PLL lock\n"); + + dev_priv->cdclk_pll.vco = vco; +} + +static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) +{ + u32 val, divider; + int vco, ret; + + vco = bxt_de_pll_vco(dev_priv, cdclk); + + DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); + + /* cdclk = vco / 2 / div{1,1.5,2,4} */ + switch (DIV_ROUND_CLOSEST(vco, cdclk)) { + case 8: divider = BXT_CDCLK_CD2X_DIV_SEL_4; - ratio = BXT_DE_PLL_RATIO(60); break; - case 288000: + case 4: divider = BXT_CDCLK_CD2X_DIV_SEL_2; - ratio = BXT_DE_PLL_RATIO(60); break; - case 384000: + case 3: divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; - ratio = BXT_DE_PLL_RATIO(60); - break; - case 576000: - divider = BXT_CDCLK_CD2X_DIV_SEL_1; - ratio = BXT_DE_PLL_RATIO(60); break; - case 624000: + case 2: divider = BXT_CDCLK_CD2X_DIV_SEL_1; - ratio = BXT_DE_PLL_RATIO(65); - break; - case 19200: - /* - * Bypass frequency with DE PLL disabled. Init ratio, divider - * to suppress GCC warning. - */ - ratio = 0; - divider = 0; break; default: - DRM_ERROR("unsupported CDCLK freq %d", frequency); + WARN_ON(cdclk != dev_priv->cdclk_pll.ref); + WARN_ON(vco != 0); - return; + divider = BXT_CDCLK_CD2X_DIV_SEL_1; + break; } - mutex_lock(&dev_priv->rps.hw_lock); /* Inform power controller of upcoming frequency change */ + mutex_lock(&dev_priv->rps.hw_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 0x80000000); mutex_unlock(&dev_priv->rps.hw_lock); if (ret) { DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", - ret, frequency); + ret, cdclk); return; } - current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; - /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ - current_freq = current_freq * 500 + 1000; + if (dev_priv->cdclk_pll.vco != 0 && + dev_priv->cdclk_pll.vco != vco) + bxt_de_pll_disable(dev_priv); - /* - * DE PLL has to be disabled when - * - setting to 19.2MHz (bypass, PLL isn't used) - * - before setting to 624MHz (PLL needs toggling) - * - before setting to any frequency from 624MHz (PLL needs toggling) - */ - if (frequency == 19200 || frequency == 624000 || - current_freq == 624000) { - I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); - /* Timeout 200us */ - if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), - 1)) - DRM_ERROR("timout waiting for DE PLL unlock\n"); - } - - if (frequency != 19200) { - uint32_t val; - - val = I915_READ(BXT_DE_PLL_CTL); - val &= ~BXT_DE_PLL_RATIO_MASK; - val |= ratio; - I915_WRITE(BXT_DE_PLL_CTL, val); - - I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); - /* Timeout 200us */ - if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1)) - DRM_ERROR("timeout waiting for DE PLL lock\n"); - - val = I915_READ(CDCLK_CTL); - val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK; - val |= divider; - /* - * Disable SSA Precharge when CD clock frequency < 500 MHz, - * enable otherwise. - */ - val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE; - if (frequency >= 500000) - val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + if (dev_priv->cdclk_pll.vco != vco) + bxt_de_pll_enable(dev_priv, vco); - val &= ~CDCLK_FREQ_DECIMAL_MASK; - /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ - val |= (frequency - 1000) / 500; - I915_WRITE(CDCLK_CTL, val); - } + val = divider | skl_cdclk_decimal(cdclk); + /* + * FIXME if only the cd2x divider needs changing, it could be done + * without shutting off the pipe (if only one pipe is active). + */ + val |= BXT_CDCLK_CD2X_PIPE_NONE; + /* + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. + */ + if (cdclk >= 500000) + val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + I915_WRITE(CDCLK_CTL, val); mutex_lock(&dev_priv->rps.hw_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - DIV_ROUND_UP(frequency, 25000)); + DIV_ROUND_UP(cdclk, 25000)); mutex_unlock(&dev_priv->rps.hw_lock); if (ret) { DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", - ret, frequency); + ret, cdclk); return; } intel_update_cdclk(dev_priv->dev); } -static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv) +static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { - if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE)) - return false; + u32 cdctl, expected; - /* TODO: Check for a valid CDCLK rate */ + intel_update_cdclk(dev_priv->dev); - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) { - DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n"); + if (dev_priv->cdclk_pll.vco == 0 || + dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) + goto sanitize; - return false; - } + /* DPLL okay; verify the cdclock + * + * Some BIOS versions leave an incorrect decimal frequency value and + * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, + * so sanitize this register. + */ + cdctl = I915_READ(CDCLK_CTL); + /* + * Let's ignore the pipe field, since BIOS could have configured the + * dividers both synching to an active pipe, or asynchronously + * (PIPE_NONE). + */ + cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) { - DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n"); + expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | + skl_cdclk_decimal(dev_priv->cdclk_freq); + /* + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. + */ + if (dev_priv->cdclk_freq >= 500000) + expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - return false; - } + if (cdctl == expected) + /* All well; nothing to sanitize */ + return; - return true; -} +sanitize: + DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); -bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv) -{ - return broxton_cdclk_is_enabled(dev_priv); + /* force cdclk programming */ + dev_priv->cdclk_freq = 0; + + /* force full PLL disable + enable */ + dev_priv->cdclk_pll.vco = -1; } void broxton_init_cdclk(struct drm_i915_private *dev_priv) { - /* check if cd clock is enabled */ - if (broxton_cdclk_is_enabled(dev_priv)) { - DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n"); - return; - } + bxt_sanitize_cdclk(dev_priv); - DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n"); + if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) + return; /* * FIXME: * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. - * - check if setting the max (or any) cdclk freq is really necessary - * here, it belongs to modeset time */ - broxton_set_cdclk(dev_priv, 624000); - - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + broxton_set_cdclk(dev_priv, broxton_calc_cdclk(0)); +} - udelay(10); +void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) +{ + broxton_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref); +} - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout!\n"); +static int skl_calc_cdclk(int max_pixclk, int vco) +{ + if (vco == 8640000) { + if (max_pixclk > 540000) + return 617143; + else if (max_pixclk > 432000) + return 540000; + else if (max_pixclk > 308571) + return 432000; + else + return 308571; + } else { + if (max_pixclk > 540000) + return 675000; + else if (max_pixclk > 450000) + return 540000; + else if (max_pixclk > 337500) + return 450000; + else + return 337500; + } } -void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) +static void +skl_dpll0_update(struct drm_i915_private *dev_priv) { - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + u32 val; - udelay(10); + dev_priv->cdclk_pll.ref = 24000; + dev_priv->cdclk_pll.vco = 0; - if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) - DRM_ERROR("DBuf power disable timeout!\n"); + val = I915_READ(LCPLL1_CTL); + if ((val & LCPLL_PLL_ENABLE) == 0) + return; - /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ - broxton_set_cdclk(dev_priv, 19200); -} + if (WARN_ON((val & LCPLL_PLL_LOCK) == 0)) + return; -static const struct skl_cdclk_entry { - unsigned int freq; - unsigned int vco; -} skl_cdclk_frequencies[] = { - { .freq = 308570, .vco = 8640 }, - { .freq = 337500, .vco = 8100 }, - { .freq = 432000, .vco = 8640 }, - { .freq = 450000, .vco = 8100 }, - { .freq = 540000, .vco = 8100 }, - { .freq = 617140, .vco = 8640 }, - { .freq = 675000, .vco = 8100 }, -}; + val = I915_READ(DPLL_CTRL1); -static unsigned int skl_cdclk_decimal(unsigned int freq) -{ - return (freq - 1000) / 500; + if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | + DPLL_CTRL1_SSC(SKL_DPLL0) | + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) + return; + + switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): + dev_priv->cdclk_pll.vco = 8100000; + break; + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): + dev_priv->cdclk_pll.vco = 8640000; + break; + default: + MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); + break; + } } -static unsigned int skl_cdclk_get_vco(unsigned int freq) +void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco) { - unsigned int i; + bool changed = dev_priv->skl_preferred_vco_freq != vco; - for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { - const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i]; - - if (e->freq == freq) - return e->vco; - } + dev_priv->skl_preferred_vco_freq = vco; - return 8100; + if (changed) + intel_update_max_cdclk(dev_priv->dev); } static void -skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) +skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { - unsigned int min_freq; + int min_cdclk = skl_calc_cdclk(0, vco); u32 val; - /* select the minimum CDCLK before enabling DPLL 0 */ - val = I915_READ(CDCLK_CTL); - val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK; - val |= CDCLK_FREQ_337_308; - - if (required_vco == 8640) - min_freq = 308570; - else - min_freq = 337500; - - val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq); + WARN_ON(vco != 8100000 && vco != 8640000); + /* select the minimum CDCLK before enabling DPLL 0 */ + val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); I915_WRITE(CDCLK_CTL, val); POSTING_READ(CDCLK_CTL); @@ -5592,14 +5654,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. * The modeset code is responsible for the selection of the exact link * rate later on, with the constraint of choosing a frequency that - * works with required_vco. + * works with vco. */ val = I915_READ(DPLL_CTRL1); val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); - if (required_vco == 8640) + if (vco == 8640000) val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0); else @@ -5613,6 +5675,21 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) DRM_ERROR("DPLL0 not locked\n"); + + dev_priv->cdclk_pll.vco = vco; + + /* We'll want to keep using the current vco from now on. */ + skl_set_preferred_cdclk_vco(dev_priv, vco); +} + +static void +skl_dpll0_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); + if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) + DRM_ERROR("Couldn't disable DPLL0\n"); + + dev_priv->cdclk_pll.vco = 0; } static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) @@ -5642,12 +5719,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) return false; } -static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) +static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) { struct drm_device *dev = dev_priv->dev; u32 freq_select, pcu_ack; - DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); + WARN_ON((cdclk == 24000) != (vco == 0)); + + DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { DRM_ERROR("failed to inform PCU about cdclk change\n"); @@ -5655,7 +5734,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) } /* set CDCLK_CTL */ - switch(freq) { + switch (cdclk) { case 450000: case 432000: freq_select = CDCLK_FREQ_450_432; @@ -5665,20 +5744,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) freq_select = CDCLK_FREQ_540; pcu_ack = 2; break; - case 308570: + case 308571: case 337500: default: freq_select = CDCLK_FREQ_337_308; pcu_ack = 0; break; - case 617140: + case 617143: case 675000: freq_select = CDCLK_FREQ_675_617; pcu_ack = 3; break; } - I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); + if (dev_priv->cdclk_pll.vco != 0 && + dev_priv->cdclk_pll.vco != vco) + skl_dpll0_disable(dev_priv); + + if (dev_priv->cdclk_pll.vco != vco) + skl_dpll0_enable(dev_priv, vco); + + I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); POSTING_READ(CDCLK_CTL); /* inform PCU of the change */ @@ -5689,52 +5775,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) intel_update_cdclk(dev); } +static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv); + void skl_uninit_cdclk(struct drm_i915_private *dev_priv) { - /* disable DBUF power */ - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); - - udelay(10); - - if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) - DRM_ERROR("DBuf power disable timeout\n"); - - /* disable DPLL0 */ - I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); - if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) - DRM_ERROR("Couldn't disable DPLL0\n"); + skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0); } void skl_init_cdclk(struct drm_i915_private *dev_priv) { - unsigned int required_vco; - - /* DPLL0 not enabled (happens on early BIOS versions) */ - if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { - /* enable DPLL0 */ - required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); - skl_dpll0_enable(dev_priv, required_vco); - } + int cdclk, vco; - /* set CDCLK to the frequency the BIOS chose */ - skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); + skl_sanitize_cdclk(dev_priv); - /* enable DBUF power */ - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) { + /* + * Use the current vco as our initial + * guess as to what the preferred vco is. + */ + if (dev_priv->skl_preferred_vco_freq == 0) + skl_set_preferred_cdclk_vco(dev_priv, + dev_priv->cdclk_pll.vco); + return; + } - udelay(10); + vco = dev_priv->skl_preferred_vco_freq; + if (vco == 0) + vco = 8100000; + cdclk = skl_calc_cdclk(0, vco); - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout\n"); + skl_set_cdclk(dev_priv, cdclk, vco); } -int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) +static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) { - uint32_t lcpll1 = I915_READ(LCPLL1_CTL); - uint32_t cdctl = I915_READ(CDCLK_CTL); - int freq = dev_priv->skl_boot_cdclk; + uint32_t cdctl, expected; /* * check if the pre-os intialized the display @@ -5744,8 +5819,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; + intel_update_cdclk(dev_priv->dev); /* Is PLL enabled and locked ? */ - if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) + if (dev_priv->cdclk_pll.vco == 0 || + dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) goto sanitize; /* DPLL okay; verify the cdclock @@ -5754,19 +5831,20 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * decimal part is programmed wrong from BIOS where pre-os does not * enable display. Verify the same as well. */ - if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) + cdctl = I915_READ(CDCLK_CTL); + expected = (cdctl & CDCLK_FREQ_SEL_MASK) | + skl_cdclk_decimal(dev_priv->cdclk_freq); + if (cdctl == expected) /* All well; nothing to sanitize */ - return false; + return; + sanitize: - /* - * As of now initialize with max cdclk till - * we get dynamic cdclk support - * */ - dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq; - skl_init_cdclk(dev_priv); + DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); - /* we did have to sanitize */ - return true; + /* force cdclk programming */ + dev_priv->cdclk_freq = 0; + /* force full PLL disable + enable */ + dev_priv->cdclk_pll.vco = -1; } /* Adjust CDclk dividers to allow high res or save power if possible */ @@ -5906,21 +5984,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, return 200000; } -static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, - int max_pixclk) +static int broxton_calc_cdclk(int max_pixclk) { - /* - * FIXME: - * - remove the guardband, it's not needed on BXT - * - set 19.2MHz bypass frequency if there are no active pipes - */ - if (max_pixclk > 576000*9/10) + if (max_pixclk > 576000) return 624000; - else if (max_pixclk > 384000*9/10) + else if (max_pixclk > 384000) return 576000; - else if (max_pixclk > 288000*9/10) + else if (max_pixclk > 288000) return 384000; - else if (max_pixclk > 144000*9/10) + else if (max_pixclk > 144000) return 288000; else return 144000; @@ -5963,9 +6035,6 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - if (max_pixclk < 0) - return max_pixclk; - intel_state->cdclk = intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); @@ -5977,20 +6046,15 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) { - struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int max_pixclk = intel_mode_max_pixclk(dev, state); + int max_pixclk = ilk_max_pixel_rate(state); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - if (max_pixclk < 0) - return max_pixclk; - intel_state->cdclk = intel_state->dev_cdclk = - broxton_calc_cdclk(dev_priv, max_pixclk); + broxton_calc_cdclk(max_pixclk); if (!intel_state->active_crtcs) - intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); + intel_state->dev_cdclk = broxton_calc_cdclk(0); return 0; } @@ -6252,7 +6316,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) return; if (to_intel_plane_state(crtc->primary->state)->visible) { - WARN_ON(intel_crtc->unpin_work); + WARN_ON(intel_crtc->flip_work); intel_pre_disable_primary_noatomic(crtc); @@ -6262,8 +6326,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) dev_priv->display.crtc_disable(crtc); - DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n", - crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", + crtc->base.id, crtc->name); WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); crtc->state->active = false; @@ -6563,10 +6627,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + int clock_limit = dev_priv->max_dotclk_freq; - /* FIXME should check pixel clock limits on all platforms */ if (INTEL_INFO(dev)->gen < 4) { - int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; + clock_limit = dev_priv->max_cdclk_freq * 9 / 10; /* * Enable double wide mode when the dot clock @@ -6574,16 +6638,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, */ if (intel_crtc_supports_double_wide(crtc) && adjusted_mode->crtc_clock > clock_limit) { - clock_limit *= 2; + clock_limit = dev_priv->max_dotclk_freq; pipe_config->double_wide = true; } + } - if (adjusted_mode->crtc_clock > clock_limit) { - DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", - adjusted_mode->crtc_clock, clock_limit, - yesno(pipe_config->double_wide)); - return -EINVAL; - } + if (adjusted_mode->crtc_clock > clock_limit) { + DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", + adjusted_mode->crtc_clock, clock_limit, + yesno(pipe_config->double_wide)); + return -EINVAL; } /* @@ -6615,76 +6679,98 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, static int skylake_get_display_clock_speed(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t lcpll1 = I915_READ(LCPLL1_CTL); - uint32_t cdctl = I915_READ(CDCLK_CTL); - uint32_t linkrate; + uint32_t cdctl; - if (!(lcpll1 & LCPLL_PLL_ENABLE)) - return 24000; /* 24MHz is the cd freq with NSSC ref */ + skl_dpll0_update(dev_priv); - if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) - return 540000; + if (dev_priv->cdclk_pll.vco == 0) + return dev_priv->cdclk_pll.ref; - linkrate = (I915_READ(DPLL_CTRL1) & - DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1; + cdctl = I915_READ(CDCLK_CTL); - if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || - linkrate == DPLL_CTRL1_LINK_RATE_1080) { - /* vco 8640 */ + if (dev_priv->cdclk_pll.vco == 8640000) { switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: return 432000; case CDCLK_FREQ_337_308: - return 308570; + return 308571; + case CDCLK_FREQ_540: + return 540000; case CDCLK_FREQ_675_617: - return 617140; + return 617143; default: - WARN(1, "Unknown cd freq selection\n"); + MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); } } else { - /* vco 8100 */ switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: return 450000; case CDCLK_FREQ_337_308: return 337500; + case CDCLK_FREQ_540: + return 540000; case CDCLK_FREQ_675_617: return 675000; default: - WARN(1, "Unknown cd freq selection\n"); + MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); } } - /* error case, do as if DPLL0 isn't enabled */ - return 24000; + return dev_priv->cdclk_pll.ref; +} + +static void bxt_de_pll_update(struct drm_i915_private *dev_priv) +{ + u32 val; + + dev_priv->cdclk_pll.ref = 19200; + dev_priv->cdclk_pll.vco = 0; + + val = I915_READ(BXT_DE_PLL_ENABLE); + if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) + return; + + if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) + return; + + val = I915_READ(BXT_DE_PLL_CTL); + dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) * + dev_priv->cdclk_pll.ref; } static int broxton_get_display_clock_speed(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t cdctl = I915_READ(CDCLK_CTL); - uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; - uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); - int cdclk; + u32 divider; + int div, vco; - if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) - return 19200; + bxt_de_pll_update(dev_priv); - cdclk = 19200 * pll_ratio / 2; + vco = dev_priv->cdclk_pll.vco; + if (vco == 0) + return dev_priv->cdclk_pll.ref; + + divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; - switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { + switch (divider) { case BXT_CDCLK_CD2X_DIV_SEL_1: - return cdclk; /* 576MHz or 624MHz */ + div = 2; + break; case BXT_CDCLK_CD2X_DIV_SEL_1_5: - return cdclk * 2 / 3; /* 384MHz */ + div = 3; + break; case BXT_CDCLK_CD2X_DIV_SEL_2: - return cdclk / 2; /* 288MHz */ + div = 4; + break; case BXT_CDCLK_CD2X_DIV_SEL_4: - return cdclk / 4; /* 144MHz */ + div = 8; + break; + default: + MISSING_CASE(divider); + return dev_priv->cdclk_pll.ref; } - /* error case, do as if DE PLL isn't enabled */ - return 19200; + return DIV_ROUND_CLOSEST(vco, div); } static int broadwell_get_display_clock_speed(struct drm_device *dev) @@ -7063,7 +7149,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) static void i9xx_update_pll_dividers(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; u32 fp, fp2 = 0; @@ -7487,7 +7573,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe) static void i9xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7563,7 +7649,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, static void i8xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7817,7 +7903,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 48000; memset(&crtc_state->dpll_hw_state, 0, @@ -7853,7 +7939,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, @@ -7896,7 +7982,7 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, @@ -7930,7 +8016,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, @@ -7963,7 +8049,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_chv; + const struct intel_limit *limit = &intel_limits_chv; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -7984,7 +8070,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_vlv; + const struct intel_limit *limit = &intel_limits_vlv; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -8034,7 +8120,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int pipe = pipe_config->cpu_transcoder; - intel_clock_t clock; + struct dpll clock; u32 mdiv; int refclk = 100000; @@ -8131,7 +8217,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; int pipe = pipe_config->cpu_transcoder; enum dpio_channel port = vlv_pipe_to_channel(pipe); - intel_clock_t clock; + struct dpll clock; u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; int refclk = 100000; @@ -8275,12 +8361,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; + int i; u32 val, final; bool has_lvds = false; bool has_cpu_edp = false; bool has_panel = false; bool has_ck505 = false; bool can_ssc = false; + bool using_ssc_source = false; /* We need to take the global config into account */ for_each_intel_encoder(dev, encoder) { @@ -8307,8 +8395,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) can_ssc = true; } - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", - has_panel, has_lvds, has_ck505); + /* Check if any DPLLs are using the SSC source */ + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + u32 temp = I915_READ(PCH_DPLL(i)); + + if (!(temp & DPLL_VCO_ENABLE)) + continue; + + if ((temp & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + using_ssc_source = true; + break; + } + } + + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after @@ -8328,9 +8430,12 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) else final |= DREF_NONSPREAD_SOURCE_ENABLE; - final &= ~DREF_SSC_SOURCE_MASK; final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - final &= ~DREF_SSC1_ENABLE; + + if (!using_ssc_source) { + final &= ~DREF_SSC_SOURCE_MASK; + final &= ~DREF_SSC1_ENABLE; + } if (has_panel) { final |= DREF_SSC_SOURCE_ENABLE; @@ -8393,7 +8498,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); } else { - DRM_DEBUG_KMS("Disabling SSC entirely\n"); + DRM_DEBUG_KMS("Disabling CPU source output\n"); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; @@ -8404,16 +8509,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); - /* Turn off the SSC source */ - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_DISABLE; + if (!using_ssc_source) { + DRM_DEBUG_KMS("Disabling SSC source\n"); - /* Turn off SSC1 */ - val &= ~DREF_SSC1_ENABLE; + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); - udelay(200); + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } } BUG_ON(val != final); @@ -8794,7 +8903,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; @@ -8902,10 +9011,10 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - intel_clock_t reduced_clock; + struct dpll reduced_clock; bool has_reduced_clock = false; struct intel_shared_dpll *pll; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 120000; memset(&crtc_state->dpll_hw_state, 0, @@ -9300,6 +9409,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_fdi_m_n_config(crtc, pipe_config); if (HAS_PCH_IBX(dev_priv)) { + /* + * The pipe->pch transcoder and pch transcoder->pll + * mapping is fixed. + */ pll_id = (enum intel_dpll_id) crtc->pipe; } else { tmp = I915_READ(PCH_DPLL_SEL); @@ -9687,6 +9800,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) cdclk, dev_priv->cdclk_freq); } +static int broadwell_calc_cdclk(int max_pixclk) +{ + if (max_pixclk > 540000) + return 675000; + else if (max_pixclk > 450000) + return 540000; + else if (max_pixclk > 337500) + return 450000; + else + return 337500; +} + static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); @@ -9698,14 +9823,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) * FIXME should also account for plane ratio * once 64bpp pixel formats are supported. */ - if (max_pixclk > 540000) - cdclk = 675000; - else if (max_pixclk > 450000) - cdclk = 540000; - else if (max_pixclk > 337500) - cdclk = 450000; - else - cdclk = 337500; + cdclk = broadwell_calc_cdclk(max_pixclk); if (cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", @@ -9715,7 +9833,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) intel_state->cdclk = intel_state->dev_cdclk = cdclk; if (!intel_state->active_crtcs) - intel_state->dev_cdclk = 337500; + intel_state->dev_cdclk = broadwell_calc_cdclk(0); return 0; } @@ -9730,6 +9848,47 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) broadwell_set_cdclk(dev, req_cdclk); } +static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) +{ + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(state->dev); + const int max_pixclk = ilk_max_pixel_rate(state); + int vco = intel_state->cdclk_pll_vco; + int cdclk; + + /* + * FIXME should also account for plane ratio + * once 64bpp pixel formats are supported. + */ + cdclk = skl_calc_cdclk(max_pixclk, vco); + + /* + * FIXME move the cdclk caclulation to + * compute_config() so we can fail gracegully. + */ + if (cdclk > dev_priv->max_cdclk_freq) { + DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", + cdclk, dev_priv->max_cdclk_freq); + cdclk = dev_priv->max_cdclk_freq; + } + + intel_state->cdclk = intel_state->dev_cdclk = cdclk; + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = skl_calc_cdclk(0, vco); + + return 0; +} + +static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state) +{ + struct drm_i915_private *dev_priv = to_i915(old_state->dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state); + unsigned int req_cdclk = intel_state->dev_cdclk; + unsigned int req_vco = intel_state->cdclk_pll_vco; + + skl_set_cdclk(dev_priv, req_cdclk, req_vco); +} + static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { @@ -9850,6 +10009,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, enum intel_display_power_domain power_domain; u32 tmp; + /* + * The pipe->transcoder mapping is fixed with the exception of the eDP + * transcoder handled below. + */ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; /* @@ -10317,10 +10480,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - obj = i915_gem_alloc_object(dev, + obj = i915_gem_object_create(dev, intel_framebuffer_size_for_mode(mode, bpp)); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + if (IS_ERR(obj)) + return ERR_CAST(obj); mode_cmd.width = mode->hdisplay; mode_cmd.height = mode->vdisplay; @@ -10632,7 +10795,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, int pipe = pipe_config->cpu_transcoder; u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; - intel_clock_t clock; + struct dpll clock; int port_clock; int refclk = i9xx_pll_refclk(dev, pipe_config); @@ -10806,31 +10969,27 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } -void intel_mark_busy(struct drm_device *dev) +void intel_mark_busy(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->mm.busy) return; intel_runtime_pm_get(dev_priv); i915_update_gfx_val(dev_priv); - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_GEN(dev_priv) >= 6) gen6_rps_busy(dev_priv); dev_priv->mm.busy = true; } -void intel_mark_idle(struct drm_device *dev) +void intel_mark_idle(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->mm.busy) return; dev_priv->mm.busy = false; - if (INTEL_INFO(dev)->gen >= 6) - gen6_rps_idle(dev->dev_private); + if (INTEL_GEN(dev_priv) >= 6) + gen6_rps_idle(dev_priv); intel_runtime_pm_put(dev_priv); } @@ -10839,15 +10998,16 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; - struct intel_unpin_work *work; + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - work = intel_crtc->unpin_work; - intel_crtc->unpin_work = NULL; + work = intel_crtc->flip_work; + intel_crtc->flip_work = NULL; spin_unlock_irq(&dev->event_lock); if (work) { - cancel_work_sync(&work->work); + cancel_work_sync(&work->mmio_work); + cancel_work_sync(&work->unpin_work); kfree(work); } @@ -10858,12 +11018,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) static void intel_unpin_work_fn(struct work_struct *__work) { - struct intel_unpin_work *work = - container_of(__work, struct intel_unpin_work, work); + struct intel_flip_work *work = + container_of(__work, struct intel_flip_work, unpin_work); struct intel_crtc *crtc = to_intel_crtc(work->crtc); struct drm_device *dev = crtc->base.dev; struct drm_plane *primary = crtc->base.primary; + if (is_mmio_work(work)) + flush_work(&work->mmio_work); + mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(work->old_fb, primary->state->rotation); drm_gem_object_unreference(&work->pending_flip_obj->base); @@ -10882,60 +11045,14 @@ static void intel_unpin_work_fn(struct work_struct *__work) kfree(work); } -static void do_intel_finish_page_flip(struct drm_device *dev, - struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; - unsigned long flags; - - /* Ignore early vblank irqs */ - if (intel_crtc == NULL) - return; - - /* - * This is called both by irq handlers and the reset code (to complete - * lost pageflips) so needs the full irqsave spinlocks. - */ - spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->unpin_work; - - /* Ensure we don't miss a work->pending update ... */ - smp_rmb(); - - if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { - spin_unlock_irqrestore(&dev->event_lock, flags); - return; - } - - page_flip_completed(intel_crtc); - - spin_unlock_irqrestore(&dev->event_lock, flags); -} - -void intel_finish_page_flip(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - - do_intel_finish_page_flip(dev, crtc); -} - -void intel_finish_page_flip_plane(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; - - do_intel_finish_page_flip(dev, crtc); -} - /* Is 'a' after or equal to 'b'? */ static bool g4x_flip_count_after_eq(u32 a, u32 b) { return !((a - b) & 0x80000000); } -static bool page_flip_finished(struct intel_crtc *crtc) +static bool __pageflip_finished_cs(struct intel_crtc *crtc, + struct intel_flip_work *work) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -10977,40 +11094,103 @@ static bool page_flip_finished(struct intel_crtc *crtc) * anyway, we don't really care. */ return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == - crtc->unpin_work->gtt_offset && + crtc->flip_work->gtt_offset && g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), - crtc->unpin_work->flip_count); + crtc->flip_work->flip_count); } -void intel_prepare_page_flip(struct drm_device *dev, int plane) +static bool +__pageflip_finished_mmio(struct intel_crtc *crtc, + struct intel_flip_work *work) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); + /* + * MMIO work completes when vblank is different from + * flip_queued_vblank. + * + * Reset counter value doesn't matter, this is handled by + * i915_wait_request finishing early, so no need to handle + * reset here. + */ + return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; +} + + +static bool pageflip_finished(struct intel_crtc *crtc, + struct intel_flip_work *work) +{ + if (!atomic_read(&work->pending)) + return false; + + smp_rmb(); + + if (is_mmio_work(work)) + return __pageflip_finished_mmio(crtc, work); + else + return __pageflip_finished_cs(crtc, work); +} + +void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; + unsigned long flags; + + /* Ignore early vblank irqs */ + if (!crtc) + return; + + /* + * This is called both by irq handlers and the reset code (to complete + * lost pageflips) so needs the full irqsave spinlocks. + */ + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->flip_work; + + if (work != NULL && + !is_mmio_work(work) && + pageflip_finished(intel_crtc, work)) + page_flip_completed(intel_crtc); + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; unsigned long flags; + /* Ignore early vblank irqs */ + if (!crtc) + return; /* * This is called both by irq handlers and the reset code (to complete * lost pageflips) so needs the full irqsave spinlocks. - * - * NB: An MMIO update of the plane base pointer will also - * generate a page-flip completion irq, i.e. every modeset - * is also accompanied by a spurious intel_prepare_page_flip(). */ spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) - atomic_inc_not_zero(&intel_crtc->unpin_work->pending); + work = intel_crtc->flip_work; + + if (work != NULL && + is_mmio_work(work) && + pageflip_finished(intel_crtc, work)) + page_flip_completed(intel_crtc); + spin_unlock_irqrestore(&dev->event_lock, flags); } -static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) +static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, + struct intel_flip_work *work) { + work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc); + /* Ensure that the work item is consistent when activating it ... */ - smp_wmb(); - atomic_set(&work->pending, INTEL_FLIP_PENDING); - /* and that it is marked active as soon as the irq could fire. */ - smp_wmb(); + smp_mb__before_atomic(); + atomic_set(&work->pending, 1); } static int intel_gen2_queue_flip(struct drm_device *dev, @@ -11041,10 +11221,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, 0); /* aux display base address, unused */ - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11073,10 +11252,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, MI_NOOP); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11104,7 +11282,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset | + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far @@ -11115,7 +11293,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; intel_ring_emit(engine, pf | pipesrc); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11139,7 +11316,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -11151,7 +11328,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; intel_ring_emit(engine, pf | pipesrc); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11243,10 +11419,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, (MI_NOOP)); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11264,7 +11439,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, if (engine == NULL) return true; - if (INTEL_INFO(engine->dev)->gen < 5) + if (INTEL_GEN(engine->i915) < 5) return false; if (i915.use_mmio_flip < 0) @@ -11283,7 +11458,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, unsigned int rotation, - struct intel_unpin_work *work) + struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -11335,7 +11510,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, } static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, - struct intel_unpin_work *work) + struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -11358,48 +11533,20 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, POSTING_READ(DSPSURF(intel_crtc->plane)); } -/* - * XXX: This is the temporary way to update the plane registers until we get - * around to using the usual plane update functions for MMIO flips - */ -static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) -{ - struct intel_crtc *crtc = mmio_flip->crtc; - struct intel_unpin_work *work; - - spin_lock_irq(&crtc->base.dev->event_lock); - work = crtc->unpin_work; - spin_unlock_irq(&crtc->base.dev->event_lock); - if (work == NULL) - return; - - intel_mark_page_flip_active(work); - - intel_pipe_update_start(crtc); - - if (INTEL_INFO(mmio_flip->i915)->gen >= 9) - skl_do_mmio_flip(crtc, mmio_flip->rotation, work); - else - /* use_mmio_flip() retricts MMIO flips to ilk+ */ - ilk_do_mmio_flip(crtc, work); - - intel_pipe_update_end(crtc); -} - -static void intel_mmio_flip_work_func(struct work_struct *work) +static void intel_mmio_flip_work_func(struct work_struct *w) { - struct intel_mmio_flip *mmio_flip = - container_of(work, struct intel_mmio_flip, work); + struct intel_flip_work *work = + container_of(w, struct intel_flip_work, mmio_work); + struct intel_crtc *crtc = to_intel_crtc(work->crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_framebuffer *intel_fb = - to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); + to_intel_framebuffer(crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; - if (mmio_flip->req) { - WARN_ON(__i915_wait_request(mmio_flip->req, + if (work->flip_queued_req) + WARN_ON(__i915_wait_request(work->flip_queued_req, false, NULL, - &mmio_flip->i915->rps.mmioflips)); - i915_gem_request_unreference__unlocked(mmio_flip->req); - } + &dev_priv->rps.mmioflips)); /* For framebuffer backed by dmabuf, wait for fence */ if (obj->base.dma_buf) @@ -11407,29 +11554,15 @@ static void intel_mmio_flip_work_func(struct work_struct *work) false, false, MAX_SCHEDULE_TIMEOUT) < 0); - intel_do_mmio_flip(mmio_flip); - kfree(mmio_flip); -} - -static int intel_queue_mmio_flip(struct drm_device *dev, - struct drm_crtc *crtc, - struct drm_i915_gem_object *obj) -{ - struct intel_mmio_flip *mmio_flip; - - mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL); - if (mmio_flip == NULL) - return -ENOMEM; - - mmio_flip->i915 = to_i915(dev); - mmio_flip->req = i915_gem_request_reference(obj->last_write_req); - mmio_flip->crtc = to_intel_crtc(crtc); - mmio_flip->rotation = crtc->primary->state->rotation; + intel_pipe_update_start(crtc); - INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); - schedule_work(&mmio_flip->work); + if (INTEL_GEN(dev_priv) >= 9) + skl_do_mmio_flip(crtc, work->rotation, work); + else + /* use_mmio_flip() retricts MMIO flips to ilk+ */ + ilk_do_mmio_flip(crtc, work); - return 0; + intel_pipe_update_end(crtc, work); } static int intel_default_queue_flip(struct drm_device *dev, @@ -11442,37 +11575,32 @@ static int intel_default_queue_flip(struct drm_device *dev, return -ENODEV; } -static bool __intel_pageflip_stall_check(struct drm_device *dev, - struct drm_crtc *crtc) +static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc, + struct intel_flip_work *work) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work = intel_crtc->unpin_work; - u32 addr; - - if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) - return true; + u32 addr, vblank; - if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) + if (!atomic_read(&work->pending)) return false; - if (!work->enable_stall_check) - return false; + smp_rmb(); + vblank = intel_crtc_get_vblank_counter(intel_crtc); if (work->flip_ready_vblank == 0) { if (work->flip_queued_req && !i915_gem_request_completed(work->flip_queued_req, true)) return false; - work->flip_ready_vblank = drm_crtc_vblank_count(crtc); + work->flip_ready_vblank = vblank; } - if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) + if (vblank - work->flip_ready_vblank < 3) return false; /* Potential stall - if we see that the flip has happened, * assume a missed interrupt. */ - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); else addr = I915_READ(DSPADDR(intel_crtc->plane)); @@ -11484,12 +11612,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev, return addr == work->gtt_offset; } -void intel_check_page_flip(struct drm_device *dev, int pipe) +void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; + struct intel_flip_work *work; WARN_ON(!in_interrupt()); @@ -11497,16 +11625,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) return; spin_lock(&dev->event_lock); - work = intel_crtc->unpin_work; - if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { - WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", - work->flip_queued_vblank, drm_vblank_count(dev, pipe)); + work = intel_crtc->flip_work; + + if (work != NULL && !is_mmio_work(work) && + __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) { + WARN_ONCE(1, + "Kicking stuck page flip: queued at %d, now %d\n", + work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc)); page_flip_completed(intel_crtc); work = NULL; } - if (work != NULL && - drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) - intel_queue_rps_boost_for_request(dev, work->flip_queued_req); + + if (work != NULL && !is_mmio_work(work) && + intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1) + intel_queue_rps_boost_for_request(work->flip_queued_req); spin_unlock(&dev->event_lock); } @@ -11522,7 +11654,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *primary = crtc->primary; enum pipe pipe = intel_crtc->pipe; - struct intel_unpin_work *work; + struct intel_flip_work *work; struct intel_engine_cs *engine; bool mmio_flip; struct drm_i915_gem_request *request = NULL; @@ -11559,19 +11691,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->crtc = crtc; work->old_fb = old_fb; - INIT_WORK(&work->work, intel_unpin_work_fn); + INIT_WORK(&work->unpin_work, intel_unpin_work_fn); ret = drm_crtc_vblank_get(crtc); if (ret) goto free_work; - /* We borrow the event spin lock for protecting unpin_work */ + /* We borrow the event spin lock for protecting flip_work */ spin_lock_irq(&dev->event_lock); - if (intel_crtc->unpin_work) { + if (intel_crtc->flip_work) { /* Before declaring the flip queue wedged, check if * the hardware completed the operation behind our backs. */ - if (__intel_pageflip_stall_check(dev, crtc)) { + if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) { DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); page_flip_completed(intel_crtc); } else { @@ -11583,7 +11715,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return -EBUSY; } } - intel_crtc->unpin_work = work; + intel_crtc->flip_work = work; spin_unlock_irq(&dev->event_lock); if (atomic_read(&intel_crtc->unpin_work_count) >= 2) @@ -11638,6 +11770,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, */ if (!mmio_flip) { ret = i915_gem_object_sync(obj, engine, &request); + if (!ret && !request) { + request = i915_gem_request_alloc(engine, NULL); + ret = PTR_ERR_OR_ZERO(request); + } + if (ret) goto cleanup_pending; } @@ -11649,38 +11786,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj, 0); work->gtt_offset += intel_crtc->dspaddr_offset; + work->rotation = crtc->primary->state->rotation; if (mmio_flip) { - ret = intel_queue_mmio_flip(dev, crtc, obj); - if (ret) - goto cleanup_unpin; + INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); i915_gem_request_assign(&work->flip_queued_req, obj->last_write_req); - } else { - if (!request) { - request = i915_gem_request_alloc(engine, NULL); - if (IS_ERR(request)) { - ret = PTR_ERR(request); - goto cleanup_unpin; - } - } + schedule_work(&work->mmio_work); + } else { + i915_gem_request_assign(&work->flip_queued_req, request); ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, page_flip_flags); if (ret) goto cleanup_unpin; - i915_gem_request_assign(&work->flip_queued_req, request); - } + intel_mark_page_flip_active(intel_crtc, work); - if (request) i915_add_request_no_flush(request); + } - work->flip_queued_vblank = drm_crtc_vblank_count(crtc); - work->enable_stall_check = true; - - i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, + i915_gem_track_fb(intel_fb_obj(old_fb), obj, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); @@ -11706,7 +11833,7 @@ cleanup: drm_framebuffer_unreference(work->old_fb); spin_lock_irq(&dev->event_lock); - intel_crtc->unpin_work = NULL; + intel_crtc->flip_work = NULL; spin_unlock_irq(&dev->event_lock); drm_crtc_vblank_put(crtc); @@ -11808,12 +11935,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane_state *old_plane_state = to_intel_plane_state(plane->state); - int idx = intel_crtc->base.base.id, ret; bool mode_changed = needs_modeset(crtc_state); bool was_crtc_enabled = crtc->state->active; bool is_crtc_enabled = crtc_state->active; bool turn_off, turn_on, visible, was_visible; struct drm_framebuffer *fb = plane_state->fb; + int ret; if (crtc_state && INTEL_INFO(dev)->gen >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) { @@ -11834,6 +11961,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, * Visibility is calculated as if the crtc was on, but * after scaler setup everything depends on it being off * when the crtc isn't active. + * + * FIXME this is wrong for watermarks. Watermarks should also + * be computed as if the pipe would be active. Perhaps move + * per-plane wm computation to the .check_plane() hook, and + * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) to_intel_plane_state(plane_state)->visible = visible = false; @@ -11847,11 +11979,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); - DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, - plane->base.id, fb ? fb->base.id : -1); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", + intel_crtc->base.base.id, + intel_crtc->base.name, + plane->base.id, plane->name, + fb ? fb->base.id : -1); - DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", - plane->base.id, was_visible, visible, + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", + plane->base.id, plane->name, + was_visible, visible, turn_off, turn_on, mode_changed); if (turn_on) { @@ -12007,7 +12143,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } } else if (dev_priv->display.compute_intermediate_wm) { if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) - pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; + pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; } if (INTEL_INFO(dev)->gen >= 9) { @@ -12142,7 +12278,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, struct intel_plane_state *state; struct drm_framebuffer *fb; - DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, + DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n", + crtc->base.base.id, crtc->base.name, context, pipe_config, pipe_name(crtc->pipe)); DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); @@ -12243,29 +12380,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, state = to_intel_plane_state(plane->state); fb = state->base.fb; if (!fb) { - DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " - "disabled, scaler_id = %d\n", - plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", - plane->base.id, intel_plane->pipe, - (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1, - drm_plane_index(plane), state->scaler_id); + DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", + plane->base.id, plane->name, state->scaler_id); continue; } - DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", - plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", - plane->base.id, intel_plane->pipe, - crtc->base.primary == plane ? 0 : intel_plane->plane + 1, - drm_plane_index(plane)); - DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", - fb->base.id, fb->width, fb->height, fb->pixel_format); - DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", - state->scaler_id, - state->src.x1 >> 16, state->src.y1 >> 16, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16, - state->dst.x1, state->dst.y1, - drm_rect_width(&state->dst), drm_rect_height(&state->dst)); + DRM_DEBUG_KMS("[PLANE:%d:%s] enabled", + plane->base.id, plane->name); + DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s", + fb->base.id, fb->width, fb->height, + drm_get_format_name(fb->pixel_format)); + DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", + state->scaler_id, + state->src.x1 >> 16, state->src.y1 >> 16, + drm_rect_width(&state->src) >> 16, + drm_rect_height(&state->src) >> 16, + state->dst.x1, state->dst.y1, + drm_rect_width(&state->dst), + drm_rect_height(&state->dst)); } } @@ -12932,7 +13064,7 @@ verify_crtc_state(struct drm_crtc *crtc, pipe_config->base.crtc = crtc; pipe_config->base.state = old_state; - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); @@ -13280,6 +13412,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state) intel_state->active_crtcs |= 1 << i; else intel_state->active_crtcs &= ~(1 << i); + + if (crtc_state->active != crtc->state->active) + intel_state->active_pipe_changes |= drm_crtc_mask(crtc); } /* @@ -13290,9 +13425,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * adjusted_mode bits in the crtc directly. */ if (dev_priv->display.modeset_calc_cdclk) { + if (!intel_state->cdclk_pll_vco) + intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco; + if (!intel_state->cdclk_pll_vco) + intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq; + ret = dev_priv->display.modeset_calc_cdclk(state); + if (ret < 0) + return ret; - if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) + if (intel_state->dev_cdclk != dev_priv->cdclk_freq || + intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) ret = intel_modeset_all_pipes(state); if (ret < 0) @@ -13316,38 +13459,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * phase. The code here should be run after the per-crtc and per-plane 'check' * handlers to ensure that all derived state has been updated. */ -static void calc_watermark_data(struct drm_atomic_state *state) +static int calc_watermark_data(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *crtc; - struct drm_crtc_state *cstate; - struct drm_plane *plane; - struct drm_plane_state *pstate; - - /* - * Calculate watermark configuration details now that derived - * plane/crtc state is all properly updated. - */ - drm_for_each_crtc(crtc, dev) { - cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?: - crtc->state; - - if (cstate->active) - intel_state->wm_config.num_pipes_active++; - } - drm_for_each_legacy_plane(plane, dev) { - pstate = drm_atomic_get_existing_plane_state(state, plane) ?: - plane->state; + struct drm_i915_private *dev_priv = to_i915(dev); - if (!to_intel_plane_state(pstate)->visible) - continue; + /* Is there platform-specific watermark information to calculate? */ + if (dev_priv->display.compute_global_watermarks) + return dev_priv->display.compute_global_watermarks(state); - intel_state->wm_config.sprites_enabled = true; - if (pstate->crtc_w != pstate->src_w >> 16 || - pstate->crtc_h != pstate->src_h >> 16) - intel_state->wm_config.sprites_scaled = true; - } + return 0; } /** @@ -13377,14 +13498,13 @@ static int intel_atomic_check(struct drm_device *dev, if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) crtc_state->mode_changed = true; - if (!crtc_state->enable) { - if (needs_modeset(crtc_state)) - any_ms = true; + if (!needs_modeset(crtc_state)) continue; - } - if (!needs_modeset(crtc_state)) + if (!crtc_state->enable) { + any_ms = true; continue; + } /* FIXME: For only active_changed we shouldn't need to do any * state recomputation at all. */ @@ -13394,8 +13514,11 @@ static int intel_atomic_check(struct drm_device *dev, return ret; ret = intel_modeset_pipe_config(crtc, pipe_config); - if (ret) + if (ret) { + intel_dump_pipe_config(to_intel_crtc(crtc), + pipe_config, "[failed]"); return ret; + } if (i915.fastboot && intel_pipe_config_compare(dev, @@ -13405,13 +13528,12 @@ static int intel_atomic_check(struct drm_device *dev, to_intel_crtc_state(crtc_state)->update_pipe = true; } - if (needs_modeset(crtc_state)) { + if (needs_modeset(crtc_state)) any_ms = true; - ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) - return ret; - } + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + return ret; intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, needs_modeset(crtc_state) ? @@ -13431,9 +13553,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; intel_fbc_choose_crtc(dev_priv, state); - calc_watermark_data(state); - - return 0; + return calc_watermark_data(state); } static int intel_atomic_prepare_commit(struct drm_device *dev, @@ -13495,6 +13615,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, return ret; } +u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + + if (!dev->max_vblank_count) + return drm_accurate_vblank_count(&crtc->base); + + return dev->driver->get_vblank_counter(dev, crtc->pipe); +} + static void intel_atomic_wait_for_vblanks(struct drm_device *dev, struct drm_i915_private *dev_priv, unsigned crtc_mask) @@ -13597,7 +13727,8 @@ static int intel_atomic_commit(struct drm_device *dev, } drm_atomic_helper_swap_state(dev, state); - dev_priv->wm.config = intel_state->wm_config; + dev_priv->wm.distrust_bios_wm = false; + dev_priv->wm.skl_results = intel_state->wm_results; intel_shared_dpll_commit(state); if (intel_state->modeset) { @@ -13653,7 +13784,8 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_update_legacy_modeset_state(state->dev, state); if (dev_priv->display.modeset_commit_cdclk && - intel_state->dev_cdclk != dev_priv->cdclk_freq) + (intel_state->dev_cdclk != dev_priv->cdclk_freq || + intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) dev_priv->display.modeset_commit_cdclk(state); intel_modeset_verify_disabled(dev); @@ -13749,8 +13881,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc) state = drm_atomic_state_alloc(dev); if (!state) { - DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", - crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory", + crtc->base.id, crtc->name); return; } @@ -14006,7 +14138,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - intel_pipe_update_end(intel_crtc); + intel_pipe_update_end(intel_crtc, NULL); } /** @@ -14018,9 +14150,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, */ void intel_plane_destroy(struct drm_plane *plane) { - struct intel_plane *intel_plane = to_intel_plane(plane); + if (!plane) + return; + drm_plane_cleanup(plane); - kfree(intel_plane); + kfree(to_intel_plane(plane)); } const struct drm_plane_funcs intel_plane_funcs = { @@ -14092,10 +14226,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, primary->disable_plane = i9xx_disable_primary_plane; } - ret = drm_universal_plane_init(dev, &primary->base, 0, - &intel_plane_funcs, - intel_primary_formats, num_formats, - DRM_PLANE_TYPE_PRIMARY, NULL); + if (INTEL_INFO(dev)->gen >= 9) + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "plane 1%c", pipe_name(pipe)); + else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "primary %c", pipe_name(pipe)); + else + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "plane %c", plane_name(primary->plane)); if (ret) goto fail; @@ -14253,7 +14401,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, &intel_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), - DRM_PLANE_TYPE_CURSOR, NULL); + DRM_PLANE_TYPE_CURSOR, + "cursor %c", pipe_name(pipe)); if (ret) goto fail; @@ -14338,7 +14487,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) goto fail; ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, - cursor, &intel_crtc_funcs, NULL); + cursor, &intel_crtc_funcs, + "pipe %c", pipe_name(pipe)); if (ret) goto fail; @@ -14372,10 +14522,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) return; fail: - if (primary) - drm_plane_cleanup(primary); - if (cursor) - drm_plane_cleanup(cursor); + intel_plane_destroy(primary); + intel_plane_destroy(cursor); kfree(crtc_state); kfree(intel_crtc); } @@ -14554,6 +14702,8 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_D) & DP_DETECTED) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + bool has_edp, has_port; + /* * The DP_DETECTED bit is the latched state of the DDC * SDA pin at boot. However since eDP doesn't require DDC @@ -14562,27 +14712,37 @@ static void intel_setup_outputs(struct drm_device *dev) * Thus we can't rely on the DP_DETECTED bit alone to detect * eDP ports. Consult the VBT as well as DP_DETECTED to * detect eDP ports. + * + * Sadly the straps seem to be missing sometimes even for HDMI + * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap + * and VBT for the presence of the port. Additionally we can't + * trust the port type the VBT declares as we've seen at least + * HDMI ports that the VBT claim are DP or eDP. */ - if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_B)) + has_edp = intel_dp_is_edp(dev, PORT_B); + has_port = intel_bios_is_port_present(dev_priv, PORT_B); + if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) + has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); + if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIB, PORT_B); - if (I915_READ(VLV_DP_B) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_B)) - intel_dp_init(dev, VLV_DP_B, PORT_B); - if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_C)) + has_edp = intel_dp_is_edp(dev, PORT_C); + has_port = intel_bios_is_port_present(dev_priv, PORT_C); + if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) + has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); + if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIC, PORT_C); - if (I915_READ(VLV_DP_C) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_C)) - intel_dp_init(dev, VLV_DP_C, PORT_C); if (IS_CHERRYVIEW(dev)) { - /* eDP not supported on port D, so don't check VBT */ - if (I915_READ(CHV_HDMID) & SDVO_DETECTED) - intel_hdmi_init(dev, CHV_HDMID, PORT_D); - if (I915_READ(CHV_DP_D) & DP_DETECTED) + /* + * eDP not supported on port D, + * so no need to worry about it + */ + has_port = intel_bios_is_port_present(dev_priv, PORT_D); + if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) intel_dp_init(dev, CHV_DP_D, PORT_D); + if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) + intel_hdmi_init(dev, CHV_HDMID, PORT_D); } intel_dsi_init(dev); @@ -15050,12 +15210,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; - if (IS_BROADWELL(dev_priv)) { - dev_priv->display.modeset_commit_cdclk = - broadwell_modeset_commit_cdclk; - dev_priv->display.modeset_calc_cdclk = - broadwell_modeset_calc_cdclk; - } + } + + if (IS_BROADWELL(dev_priv)) { + dev_priv->display.modeset_commit_cdclk = + broadwell_modeset_commit_cdclk; + dev_priv->display.modeset_calc_cdclk = + broadwell_modeset_calc_cdclk; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->display.modeset_commit_cdclk = valleyview_modeset_commit_cdclk; @@ -15066,6 +15227,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) broxton_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = broxton_modeset_calc_cdclk; + } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + dev_priv->display.modeset_commit_cdclk = + skl_modeset_commit_cdclk; + dev_priv->display.modeset_calc_cdclk = + skl_modeset_calc_cdclk; } switch (INTEL_INFO(dev_priv)->gen) { @@ -15293,7 +15459,7 @@ void intel_modeset_init_hw(struct drm_device *dev) dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; intel_init_clock_gating(dev); - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); } /* @@ -15363,7 +15529,6 @@ retry: } /* Write calculated watermark values back */ - to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config; for_each_crtc_in_state(state, crtc, cstate, i) { struct intel_crtc_state *cs = to_intel_crtc_state(cstate); @@ -15461,11 +15626,13 @@ void intel_modeset_init(struct drm_device *dev) } intel_update_czclk(dev_priv); - intel_update_rawclk(dev_priv); intel_update_cdclk(dev); intel_shared_dpll_init(dev); + if (dev_priv->max_cdclk_freq == 0) + intel_update_max_cdclk(dev); + /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); @@ -15606,8 +15773,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { bool plane; - DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", - crtc->base.base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", + crtc->base.base.id, crtc->base.name); /* Pipe has the wrong plane attached and the plane is active. * Temporarily change the plane mapping and disable everything @@ -15775,26 +15942,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (crtc_state->base.active) { dev_priv->active_crtcs |= 1 << crtc->pipe; - if (IS_BROADWELL(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) pixclk = ilk_pipe_pixel_rate(crtc_state); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (crtc_state->ips_enabled) - pixclk = DIV_ROUND_UP(pixclk * 100, 95); - } else if (IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv) || - IS_BROXTON(dev_priv)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) pixclk = crtc_state->base.adjusted_mode.crtc_clock; else WARN_ON(dev_priv->display.modeset_calc_cdclk); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) + pixclk = DIV_ROUND_UP(pixclk * 100, 95); } dev_priv->min_pixclk[crtc->pipe] = pixclk; readout_plane_state(crtc); - DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", - crtc->base.base.id, + DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", + crtc->base.base.id, crtc->base.name, crtc->active ? "enabled" : "disabled"); } @@ -16025,15 +16190,16 @@ retry: void intel_modeset_gem_init(struct drm_device *dev) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct drm_i915_gem_object *obj; int ret; - intel_init_gt_powersave(dev); + intel_init_gt_powersave(dev_priv); intel_modeset_init_hw(dev); - intel_setup_overlay(dev); + intel_setup_overlay(dev_priv); /* * Make sure any fbs we allocated at startup are properly @@ -16076,7 +16242,7 @@ void intel_modeset_cleanup(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_connector *connector; - intel_disable_gt_powersave(dev); + intel_disable_gt_powersave(dev_priv); intel_backlight_unregister(dev); @@ -16106,9 +16272,9 @@ void intel_modeset_cleanup(struct drm_device *dev) drm_mode_config_cleanup(dev); - intel_cleanup_overlay(dev); + intel_cleanup_overlay(dev_priv); - intel_cleanup_gt_powersave(dev); + intel_cleanup_gt_powersave(dev_priv); intel_teardown_gmbus(dev); } @@ -16204,9 +16370,8 @@ struct intel_display_error_state { }; struct intel_display_error_state * -intel_display_capture_error_state(struct drm_device *dev) +intel_display_capture_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_display_error_state *error; int transcoders[] = { TRANSCODER_A, @@ -16216,14 +16381,14 @@ intel_display_capture_error_state(struct drm_device *dev) }; int i; - if (INTEL_INFO(dev)->num_pipes == 0) + if (INTEL_INFO(dev_priv)->num_pipes == 0) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); for_each_pipe(dev_priv, i) { @@ -16239,25 +16404,25 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].control = I915_READ(DSPCNTR(i)); error->plane[i].stride = I915_READ(DSPSTRIDE(i)); - if (INTEL_INFO(dev)->gen <= 3) { + if (INTEL_GEN(dev_priv) <= 3) { error->plane[i].size = I915_READ(DSPSIZE(i)); error->plane[i].pos = I915_READ(DSPPOS(i)); } - if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) error->plane[i].addr = I915_READ(DSPADDR(i)); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { error->plane[i].surface = I915_READ(DSPSURF(i)); error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } error->pipe[i].source = I915_READ(PIPESRC(i)); - if (HAS_GMCH_DISPLAY(dev)) + if (HAS_GMCH_DISPLAY(dev_priv)) error->pipe[i].stat = I915_READ(PIPESTAT(i)); } /* Note: this does not include DSI transcoders. */ - error->num_transcoders = INTEL_INFO(dev)->num_pipes; + error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; if (HAS_DDI(dev_priv)) error->num_transcoders++; /* Account for eDP. */ |