summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c151
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c100
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c132
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c320
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c365
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c205
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c252
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c4
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c321
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c145
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c55
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c37
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h4
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c329
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.h38
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_engine_cs.c455
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_engine_cs.h39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c69
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c57
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c866
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c91
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c53
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c101
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c132
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c85
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c909
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c68
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c25
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c17
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/README46
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm119
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm117
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c20
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h42
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h1
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c79
-rw-r--r--drivers/gpu/drm/i915/i915_params.c43
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c40
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c17
-rw-r--r--drivers/gpu/drm/i915/i915_query.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h98
-rw-r--r--drivers/gpu/drm/i915/i915_request.c40
-rw-r--r--drivers/gpu/drm/i915/i915_request.h11
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c71
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h1
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h5
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c42
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c381
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h29
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.c6
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c39
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c871
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c1
145 files changed, 5657 insertions, 3118 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0da6ea6e3f1..41a27fd5dbc7 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -78,6 +78,8 @@ gt-y += \
gt/debugfs_engines.o \
gt/debugfs_gt.o \
gt/debugfs_gt_pm.o \
+ gt/gen2_engine_cs.o \
+ gt/gen6_engine_cs.o \
gt/gen6_ppgtt.o \
gt/gen7_renderclear.o \
gt/gen8_ppgtt.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 4fec5bd64920..8c55f5bee9ab 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1469,8 +1469,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
- pipe_config->hw.adjusted_mode.private_flags |=
- I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
+ pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
}
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
@@ -1558,10 +1557,6 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
- /* We would not operate in periodic command mode */
- pipe_config->hw.adjusted_mode.private_flags &=
- ~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
-
/*
* In case of TE GATE cmd mode, we
* receive TE from the slave if
@@ -1569,14 +1564,14 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
*/
if (is_cmd_mode(intel_dsi)) {
if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
- pipe_config->hw.adjusted_mode.private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_DSI_USE_TE1 |
I915_MODE_FLAG_DSI_USE_TE0;
else if (intel_dsi->ports == BIT(PORT_B))
- pipe_config->hw.adjusted_mode.private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_DSI_USE_TE1;
else
- pipe_config->hw.adjusted_mode.private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_DSI_USE_TE0;
}
@@ -1954,6 +1949,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
return;
err:
+ drm_connector_cleanup(connector);
drm_encoder_cleanup(&encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index d043057d2fa0..630f49b7aa01 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -249,9 +249,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_post = false;
crtc_state->fifo_changed = false;
crtc_state->preload_luts = false;
+ crtc_state->inherited = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
+ crtc_state->dsb = NULL;
return &crtc_state->uapi;
}
@@ -292,6 +294,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb);
+
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
intel_crtc_free_hw_state(crtc_state);
kfree(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 839124647202..6593e2c38043 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -479,7 +479,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915_modparams.vbt_sdvo_panel_type;
+ index = dev_priv->params.vbt_sdvo_panel_type;
if (index == -2) {
drm_dbg_kms(&dev_priv->drm,
"Ignore SDVO panel mode from BIOS VBT tables.\n");
@@ -829,9 +829,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (i915_modparams.edp_vswing) {
+ if (dev_priv->params.edp_vswing) {
dev_priv->vbt.edp.low_vswing =
- i915_modparams.edp_vswing == 1;
+ dev_priv->params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
dev_priv->vbt.edp.low_vswing = vswing == 0;
@@ -1619,30 +1619,18 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
return 0;
}
-static enum port dvo_port_to_port(u8 dvo_port)
+static enum port __dvo_port_to_port(int n_ports, int n_dvo,
+ const int port_mapping[][3], u8 dvo_port)
{
- /*
- * Each DDI port can have more than one value on the "DVO Port" field,
- * so look for all the possible values for each port.
- */
- static const int dvo_ports[][3] = {
- [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
- [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
- [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
- [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
- [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
- [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
- [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1},
- };
enum port port;
int i;
- for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
- for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
- if (dvo_ports[port][i] == -1)
+ for (port = PORT_A; port < n_ports; port++) {
+ for (i = 0; i < n_dvo; i++) {
+ if (port_mapping[port][i] == -1)
break;
- if (dvo_port == dvo_ports[port][i])
+ if (dvo_port == port_mapping[port][i])
return port;
}
}
@@ -1650,6 +1638,48 @@ static enum port dvo_port_to_port(u8 dvo_port)
return PORT_NONE;
}
+static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
+ u8 dvo_port)
+{
+ /*
+ * Each DDI port can have more than one value on the "DVO Port" field,
+ * so look for all the possible values for each port.
+ */
+ static const int port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ [PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT },
+ [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 },
+ [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 },
+ };
+ /*
+ * Bspec lists the ports as A, B, C, D - however internally in our
+ * driver we keep them as PORT_A, PORT_B, PORT_D and PORT_E so the
+ * registers in Display Engine match the right offsets. Apply the
+ * mapping here to translate from VBT to internal convention.
+ */
+ static const int rkl_port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_C] = { -1 },
+ [PORT_D] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_E] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ };
+
+ if (IS_ROCKETLAKE(dev_priv))
+ return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping),
+ ARRAY_SIZE(rkl_port_mapping[0]),
+ rkl_port_mapping,
+ dvo_port);
+ else
+ return __dvo_port_to_port(ARRAY_SIZE(port_mapping),
+ ARRAY_SIZE(port_mapping[0]),
+ port_mapping,
+ dvo_port);
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv,
struct display_device_data *devdata,
u8 bdb_version)
@@ -1659,7 +1689,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
enum port port;
- port = dvo_port_to_port(child->dvo_port);
+ port = dvo_port_to_port(dev_priv, child->dvo_port);
if (port == PORT_NONE)
return;
@@ -2603,10 +2633,10 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
aux_ch = AUX_CH_B;
break;
case DP_AUX_C:
- aux_ch = AUX_CH_C;
+ aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_D : AUX_CH_C;
break;
case DP_AUX_D:
- aux_ch = AUX_CH_D;
+ aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_E : AUX_CH_D;
break;
case DP_AUX_E:
aux_ch = AUX_CH_E;
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index fef04e2d954e..bd060404d249 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,12 +5,12 @@
#include <drm/drm_atomic_state_helper.h>
+#include "intel_atomic.h"
#include "intel_bw.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
-#include "intel_sideband.h"
-#include "intel_atomic.h"
#include "intel_pm.h"
-
+#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -199,6 +199,12 @@ static const struct intel_sa_info tgl_sa_info = {
.displayrtids = 256,
};
+static const struct intel_sa_info rkl_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 20, /* GB/s */
+ .displayrtids = 128,
+};
+
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@@ -309,7 +315,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_GEN(dev_priv, 12))
+ if (IS_ROCKETLAKE(dev_priv))
+ icl_get_bw_info(dev_priv, &rkl_sa_info);
+ else if (IS_GEN(dev_priv, 12))
icl_get_bw_info(dev_priv, &tgl_sa_info);
else if (IS_GEN(dev_priv, 11))
icl_get_bw_info(dev_priv, &icl_sa_info);
@@ -420,6 +428,141 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_bw_state *new_bw_state = NULL;
+ struct intel_bw_state *old_bw_state = NULL;
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int max_bw = 0;
+ int slice_id;
+ enum pipe pipe;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ enum plane_id plane_id;
+ struct intel_dbuf_bw *crtc_bw;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
+
+ memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *plane_alloc =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *uv_plane_alloc =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
+ unsigned int data_rate = crtc_state->data_rate[plane_id];
+ unsigned int dbuf_mask = 0;
+
+ dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
+ dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
+
+ /*
+ * FIXME: To calculate that more properly we probably
+ * need to to split per plane data_rate into data_rate_y
+ * and data_rate_uv for multiplanar formats in order not
+ * to get accounted those twice if they happen to reside
+ * on different slices.
+ * However for pre-icl this would work anyway because
+ * we have only single slice and for icl+ uv plane has
+ * non-zero data rate.
+ * So in worst case those calculation are a bit
+ * pessimistic, which shouldn't pose any significant
+ * problem anyway.
+ */
+ for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
+ crtc_bw->used_bw[slice_id] += data_rate;
+ }
+ }
+
+ if (!old_bw_state)
+ return 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_dbuf_bw *crtc_bw;
+
+ crtc_bw = &new_bw_state->dbuf_bw[pipe];
+
+ for_each_dbuf_slice(slice_id) {
+ /*
+ * Current experimental observations show that contrary
+ * to BSpec we get underruns once we exceed 64 * CDCLK
+ * for slices in total.
+ * As a temporary measure in order not to keep CDCLK
+ * bumped up all the time we calculate CDCLK according
+ * to this formula for overall bw consumed by slices.
+ */
+ max_bw += crtc_bw->used_bw[slice_id];
+ }
+ }
+
+ new_bw_state->min_cdclk = max_bw / 64;
+
+ if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
+ int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_bw_state *new_bw_state = NULL;
+ struct intel_bw_state *old_bw_state = NULL;
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int min_cdclk = 0;
+ enum pipe pipe;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ }
+
+ if (!old_bw_state)
+ return 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ if (!cdclk_state)
+ return 0;
+
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ }
+
+ new_bw_state->min_cdclk = min_cdclk;
+
+ if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
+ int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index bbcaaa73ec1b..46c6eecbd917 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -9,14 +9,20 @@
#include <drm/drm_atomic.h>
#include "intel_display.h"
+#include "intel_display_power.h"
#include "intel_global_state.h"
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc_state;
+struct intel_dbuf_bw {
+ int used_bw[I915_MAX_DBUF_SLICES];
+};
+
struct intel_bw_state {
struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
/*
* Contains a bit mask, used to determine, whether correspondent
@@ -36,6 +42,8 @@ struct intel_bw_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ int min_cdclk;
};
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
@@ -56,5 +64,7 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask);
+int intel_bw_calc_min_cdclk(struct intel_atomic_state *state);
+int skl_bw_calc_min_cdclk(struct intel_atomic_state *state);
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 979a0241fdcb..45f7f33d1144 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -21,7 +21,10 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/time.h>
+
#include "intel_atomic.h"
+#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
@@ -2094,6 +2097,7 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
{
struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_bw_state *bw_state = NULL;
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int min_cdclk, i;
@@ -2106,6 +2110,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
if (min_cdclk < 0)
return min_cdclk;
+ bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(bw_state))
+ return PTR_ERR(bw_state);
+
if (cdclk_state->min_cdclk[i] == min_cdclk)
continue;
@@ -2117,9 +2125,15 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
}
min_cdclk = cdclk_state->force_min_cdclk;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(dev_priv, pipe) {
min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ if (!bw_state)
+ continue;
+
+ min_cdclk = max(bw_state->min_cdclk, min_cdclk);
+ }
+
return min_cdclk;
}
@@ -2700,29 +2714,59 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv)
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
-static int g4x_hrawclk(struct drm_i915_private *dev_priv)
+static int i9xx_hrawclk(struct drm_i915_private *dev_priv)
{
u32 clkcfg;
- /* hrawclock is 1/4 the FSB frequency */
- clkcfg = intel_de_read(dev_priv, CLKCFG);
- switch (clkcfg & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_400:
- return 100000;
- case CLKCFG_FSB_533:
- return 133333;
- case CLKCFG_FSB_667:
- return 166667;
- case CLKCFG_FSB_800:
- return 200000;
- case CLKCFG_FSB_1067:
- case CLKCFG_FSB_1067_ALT:
- return 266667;
- case CLKCFG_FSB_1333:
- case CLKCFG_FSB_1333_ALT:
- return 333333;
- default:
- return 133333;
+ /*
+ * hrawclock is 1/4 the FSB frequency
+ *
+ * Note that this only reads the state of the FSB
+ * straps, not the actual FSB frequency. Some BIOSen
+ * let you configure each independently. Ideally we'd
+ * read out the actual FSB frequency but sadly we
+ * don't know which registers have that information,
+ * and all the relevant docs have gone to bit heaven :(
+ */
+ clkcfg = intel_de_read(dev_priv, CLKCFG) & CLKCFG_FSB_MASK;
+
+ if (IS_MOBILE(dev_priv)) {
+ switch (clkcfg) {
+ case CLKCFG_FSB_400:
+ return 100000;
+ case CLKCFG_FSB_533:
+ return 133333;
+ case CLKCFG_FSB_667:
+ return 166667;
+ case CLKCFG_FSB_800:
+ return 200000;
+ case CLKCFG_FSB_1067:
+ return 266667;
+ case CLKCFG_FSB_1333:
+ return 333333;
+ default:
+ MISSING_CASE(clkcfg);
+ return 133333;
+ }
+ } else {
+ switch (clkcfg) {
+ case CLKCFG_FSB_400_ALT:
+ return 100000;
+ case CLKCFG_FSB_533:
+ return 133333;
+ case CLKCFG_FSB_667:
+ return 166667;
+ case CLKCFG_FSB_800:
+ return 200000;
+ case CLKCFG_FSB_1067_ALT:
+ return 266667;
+ case CLKCFG_FSB_1333_ALT:
+ return 333333;
+ case CLKCFG_FSB_1600_ALT:
+ return 400000;
+ default:
+ return 133333;
+ }
}
}
@@ -2743,8 +2787,8 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
freq = pch_rawclk(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
freq = vlv_hrawclk(dev_priv);
- else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
- freq = g4x_hrawclk(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 3)
+ freq = i9xx_hrawclk(dev_priv);
else
/* no rawclk on other platforms, or no need to know it */
return 0;
@@ -2760,25 +2804,30 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) >= 12) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_ELKHARTLAKE(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_CANNONLAKE(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = cnl_calc_voltage_level;
dev_priv->cdclk.table = cnl_cdclk_table;
} else if (IS_GEN9_LP(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = bxt_calc_voltage_level;
@@ -2787,18 +2836,23 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
else
dev_priv->cdclk.table = bxt_cdclk_table;
} else if (IS_GEN9_BC(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
} else if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bdw_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
} else if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = chv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = vlv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
} else {
+ dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 98ece9cd7cdd..945bb03bdd4d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -714,16 +714,16 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
-static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
+static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
- intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
- intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
/*
* Program the gc max 2 register to clamp values > 1.0.
@@ -731,15 +731,13 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
* from 3.0 to 7.0
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0),
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0),
1 << 16);
- intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1),
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1),
1 << 16);
- intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2),
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2),
1 << 16);
}
-
- intel_dsb_put(dsb);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
@@ -753,7 +751,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@@ -761,7 +759,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
ivb_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
}
}
@@ -776,7 +774,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@@ -784,7 +782,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
bdw_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
}
}
@@ -877,7 +875,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
ilk_load_lut_8(crtc, gamma_lut);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
}
}
@@ -900,14 +898,12 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
- intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
- intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
- intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
- intel_dsb_put(dsb);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue);
}
static void
@@ -916,7 +912,6 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
- struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
int i;
@@ -927,19 +922,17 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
* 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
* 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
- intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ intel_dsb_reg_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
-
- intel_dsb_put(dsb);
}
static void
@@ -949,7 +942,6 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
- struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
int i;
@@ -963,12 +955,13 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
* seg2[0] being unused by the hardware.
*/
- intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
@@ -986,24 +979,22 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
icl_load_gcmax(crtc_state, entry);
- ivb_load_lut_ext_max(crtc);
- intel_dsb_put(dsb);
+ ivb_load_lut_ext_max(crtc_state);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_dsb *dsb = intel_dsb_get(crtc);
if (crtc_state->hw.degamma_lut)
glk_load_degamma_lut(crtc_state);
@@ -1018,11 +1009,10 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
break;
default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
}
- intel_dsb_commit(dsb);
- intel_dsb_put(dsb);
+ intel_dsb_commit(crtc_state);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 9ff05ec12115..77b04bb3ec62 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -181,11 +181,25 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, CHICKEN_MISC_2, val);
}
+static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
+{
+ /*
+ * Some platforms only expect PHY_MISC to be programmed for PHY-A and
+ * PHY-B and may not even have instances of the register for the
+ * other combo PHY's.
+ */
+ if (IS_ELKHARTLAKE(i915) ||
+ IS_ROCKETLAKE(i915))
+ return phy < PHY_C;
+
+ return true;
+}
+
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
enum phy phy)
{
/* The PHY C added by EHL has no PHY_MISC register */
- if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ if (!has_phy_misc(dev_priv, phy))
return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
else
return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
@@ -220,6 +234,27 @@ static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
return false;
}
+static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
+{
+ /*
+ * Certain PHYs are connected to compensation resistors and act
+ * as masters to other PHYs.
+ *
+ * ICL,TGL:
+ * A(master) -> B(slave), C(slave)
+ * RKL:
+ * A(master) -> B(slave)
+ * C(master) -> D(slave)
+ *
+ * We must set the IREFGEN bit for any PHY acting as a master
+ * to another PHY.
+ */
+ if (IS_ROCKETLAKE(dev_priv) && phy == PHY_C)
+ return true;
+
+ return phy == PHY_A;
+}
+
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum phy phy)
{
@@ -231,7 +266,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- if (phy == PHY_A) {
+ if (phy_is_master(dev_priv, phy)) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
@@ -317,12 +352,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
continue;
}
- /*
- * Although EHL adds a combo PHY C, there's no PHY_MISC
- * register for it and no need to program the
- * DE_IO_COMP_PWR_DOWN setting on PHY C.
- */
- if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
/*
@@ -347,7 +377,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
skip_phy_misc:
cnl_set_procmon_ref_values(dev_priv, phy);
- if (phy == PHY_A) {
+ if (phy_is_master(dev_priv, phy)) {
val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
@@ -376,12 +406,7 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
"Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
- /*
- * Although EHL adds a combo PHY C, there's no PHY_MISC
- * register for it and no need to program the
- * DE_IO_COMP_PWR_DOWN setting on PHY C.
- */
- if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 2f5b9a4baafd..5b4510ce5693 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -833,7 +833,7 @@ intel_crt_detect(struct drm_connector *connector,
connector->base.id, connector->name,
force);
- if (i915_modparams.load_detect_test) {
+ if (dev_priv->params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
goto load_detect;
@@ -889,7 +889,7 @@ load_detect:
else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
- else if (i915_modparams.load_detect_test)
+ else if (dev_priv->params.load_detect_test)
status = connector_status_disconnected;
else
status = connector_status_unknown;
diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index 3112572cfb7d..f22a7645c249 100644
--- a/drivers/gpu/drm/i915/display/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -40,6 +40,10 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
+#define RKL_CSR_PATH "i915/rkl_dmc_ver2_01.bin"
+#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
+MODULE_FIRMWARE(RKL_CSR_PATH);
+
#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
#define TGL_CSR_MAX_FW_SIZE 0x6000
@@ -682,7 +686,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_csr_runtime_pm_get(dev_priv);
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ csr->fw_path = RKL_CSR_PATH;
+ csr->required_version = RKL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ } else if (INTEL_GEN(dev_priv) >= 12) {
csr->fw_path = TGL_CSR_PATH;
csr->required_version = TGL_CSR_VERSION_REQUIRED;
/* Allow to load fw via parameter using the last known size */
@@ -699,7 +707,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
csr->fw_path = GLK_CSR_PATH;
csr->required_version = GLK_CSR_VERSION_REQUIRED;
csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ } else if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) {
csr->fw_path = KBL_CSR_PATH;
csr->required_version = KBL_CSR_VERSION_REQUIRED;
csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
@@ -713,15 +723,15 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
}
- if (i915_modparams.dmc_firmware_path) {
- if (strlen(i915_modparams.dmc_firmware_path) == 0) {
+ if (dev_priv->params.dmc_firmware_path) {
+ if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
csr->fw_path = NULL;
drm_info(&dev_priv->drm,
"Disabling CSR firmware and runtime PM\n");
return;
}
- csr->fw_path = i915_modparams.dmc_firmware_path;
+ csr->fw_path = dev_priv->params.dmc_firmware_path;
/* Bypass version check for firmware override. */
csr->required_version = 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 0575a1eea2a1..884b507c5f55 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -639,11 +639,25 @@ struct tgl_dkl_phy_ddi_buf_trans {
static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */
+ { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
{ 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
{ 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
{ 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
{ 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
{ 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
@@ -722,10 +736,14 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv)) {
+ if (IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
- } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
+ } else if (IS_KBL_ULT(dev_priv) ||
+ IS_CFL_ULT(dev_priv) ||
+ IS_CML_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
return kbl_u_ddi_translations_dp;
} else {
@@ -738,12 +756,16 @@ static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) ||
+ IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
- } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
- IS_CFL_ULT(dev_priv)) {
+ } else if (IS_SKL_ULT(dev_priv) ||
+ IS_KBL_ULT(dev_priv) ||
+ IS_CFL_ULT(dev_priv) ||
+ IS_CML_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
return skl_u_ddi_translations_edp;
} else {
@@ -752,7 +774,9 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
return kbl_get_buf_trans_dp(dev_priv, n_entries);
else
return skl_get_buf_trans_dp(dev_priv, n_entries);
@@ -761,8 +785,10 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) ||
+ IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@@ -784,7 +810,9 @@ static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
enum port port, int *n_entries)
{
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) {
const struct ddi_buf_trans *ddi_translations =
kbl_get_buf_trans_dp(dev_priv, n_entries);
*n_entries = skl_buf_trans_num_entries(port, *n_entries);
@@ -1014,6 +1042,22 @@ tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return tgl_combo_phy_ddi_translations_dp_hbr;
}
+static const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
+{
+ if (type == INTEL_OUTPUT_HDMI) {
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
+ return tgl_dkl_phy_hdmi_ddi_trans;
+ } else if (rate > 270000) {
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2);
+ return tgl_dkl_phy_dp_ddi_trans_hbr2;
+ }
+
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
+ return tgl_dkl_phy_dp_ddi_trans;
+}
+
static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1025,7 +1069,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
- n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
+ tgl_get_dkl_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+ &n_entries);
default_entry = n_entries - 1;
} else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
@@ -1608,7 +1653,6 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 ctl;
if (INTEL_GEN(dev_priv) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
@@ -1626,10 +1670,9 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
}
- ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
- ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ intel_ddi_transcoder_func_reg_val_get(encoder,
+ crtc_state));
}
/*
@@ -2095,10 +2138,10 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
ddi_translations[level].deemphasis);
}
-u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
+static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
@@ -2108,7 +2151,8 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
tgl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
- n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
+ tgl_get_dkl_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
} else if (INTEL_GEN(dev_priv) == 11) {
if (IS_ELKHARTLAKE(dev_priv))
ehl_get_combo_buf_trans(dev_priv, encoder->type,
@@ -2151,19 +2195,9 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
* used on all DDI platforms. Should that change we need to
* rethink this code.
*/
-u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
+static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp)
{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
@@ -2585,15 +2619,17 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
+ int rate = 0;
if (type == INTEL_OUTPUT_HDMI) {
- n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
- ddi_translations = tgl_dkl_phy_hdmi_ddi_trans;
- } else {
- n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
- ddi_translations = tgl_dkl_phy_dp_ddi_trans;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ rate = intel_dp->link_rate;
}
+ ddi_translations = tgl_get_dkl_buf_trans(dev_priv, encoder->type, rate,
+ &n_entries);
+
if (level >= n_entries)
level = n_entries - 1;
@@ -4155,11 +4191,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
- if (INTEL_GEN(dev_priv) >= 12) {
- intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder);
- intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder);
- }
-
intel_dsc_get_config(encoder, pipe_config);
temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -4261,6 +4292,16 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder transcoder =
+ intel_dp_mst_is_slave_trans(pipe_config) ?
+ pipe_config->mst_master_transcoder :
+ pipe_config->cpu_transcoder;
+
+ intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
+ intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
+ }
+
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -4523,6 +4564,9 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
else
intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
+ intel_dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
+ intel_dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
+
if (INTEL_GEN(dev_priv) < 12) {
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index fbdf8ddde486..077e9dbbe367 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -42,9 +42,6 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
u32 ddi_signal_levels(struct intel_dp *intel_dp);
-u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
-u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
- u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index c1836095ea38..a11bb675f9b3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -4812,11 +4812,18 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
- if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
+ switch (plane_state->hw.color_encoding) {
+ case DRM_COLOR_YCBCR_BT709:
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
- else
- plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
-
+ break;
+ case DRM_COLOR_YCBCR_BT2020:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
+ break;
+ default:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
+ }
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
@@ -4879,7 +4886,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
int ret;
/* reset doesn't touch the display */
- if (!i915_modparams.force_reset_modeset_test &&
+ if (!dev_priv->params.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
@@ -6424,8 +6431,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
* We can't read out IPS on broadwell, assume the worst and
* forcibly enable IPS on the first fastset.
*/
- if (new_crtc_state->update_pipe &&
- old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
+ if (new_crtc_state->update_pipe && old_crtc_state->inherited)
return true;
return !old_crtc_state->ips_enabled;
@@ -7212,30 +7218,33 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
return false;
-
- if (IS_ELKHARTLAKE(dev_priv))
+ else if (IS_ROCKETLAKE(dev_priv))
+ return phy <= PHY_D;
+ else if (IS_ELKHARTLAKE(dev_priv))
return phy <= PHY_C;
-
- if (INTEL_GEN(dev_priv) >= 11)
+ else if (INTEL_GEN(dev_priv) >= 11)
return phy <= PHY_B;
-
- return false;
+ else
+ return false;
}
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (INTEL_GEN(dev_priv) >= 12)
+ if (IS_ROCKETLAKE(dev_priv))
+ return false;
+ else if (INTEL_GEN(dev_priv) >= 12)
return phy >= PHY_D && phy <= PHY_I;
-
- if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
+ else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
-
- return false;
+ else
+ return false;
}
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
{
- if (IS_ELKHARTLAKE(i915) && port == PORT_D)
+ if (IS_ROCKETLAKE(i915) && port >= PORT_D)
+ return (enum phy)port - 1;
+ else if (IS_ELKHARTLAKE(i915) && port == PORT_D)
return PHY_A;
return (enum phy)port;
@@ -7579,6 +7588,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
to_intel_bw_state(dev_priv->bw_obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(dev_priv->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum intel_display_power_domain domain;
@@ -7652,6 +7663,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
cdclk_state->min_voltage_level[pipe] = 0;
cdclk_state->active_pipes &= ~BIT(pipe);
+ dbuf_state->active_pipes &= ~BIT(pipe);
+
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
}
@@ -7869,7 +7882,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!i915_modparams.enable_ips)
+ if (!dev_priv->params.enable_ips)
return false;
if (crtc_state->pipe_bpp > 24)
@@ -8140,8 +8153,8 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- if (i915_modparams.panel_use_ssc >= 0)
- return i915_modparams.panel_use_ssc != 0;
+ if (dev_priv->params.panel_use_ssc >= 0)
+ return dev_priv->params.panel_use_ssc != 0;
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
@@ -10882,7 +10895,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- unsigned long panel_transcoder_mask = 0;
+ unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
intel_wakeref_t wf;
@@ -10892,9 +10905,6 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
- panel_transcoder_mask |= BIT(TRANSCODER_EDP);
-
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
* and DSI transcoders handled below.
@@ -10905,9 +10915,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_set_bit(panel_transcoder,
- &panel_transcoder_mask,
- ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
+ for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
+ panel_transcoder_mask) {
bool force_thru = false;
enum pipe trans_pipe;
@@ -12499,7 +12508,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
continue;
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
- if (!icl_is_nv12_y_plane(linked->id))
+ if (!icl_is_nv12_y_plane(dev_priv, linked->id))
continue;
if (crtc_state->active_planes & BIT(linked->id))
@@ -12545,6 +12554,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
else if (linked->id == PLANE_SPRITE4)
plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
+ else if (linked->id == PLANE_SPRITE3)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
+ else if (linked->id == PLANE_SPRITE2)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
else
MISSING_CASE(linked->id);
}
@@ -13570,8 +13583,8 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
static bool fastboot_enabled(struct drm_i915_private *dev_priv)
{
- if (i915_modparams.fastboot != -1)
- return i915_modparams.fastboot;
+ if (dev_priv->params.fastboot != -1)
+ return dev_priv->params.fastboot;
/* Enable fastboot by default on Skylake and newer */
if (INTEL_GEN(dev_priv) >= 9)
@@ -13595,8 +13608,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bool ret = true;
u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
- (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
- !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
+ current_config->inherited && !pipe_config->inherited;
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
@@ -14007,10 +14019,10 @@ static void verify_wm_state(struct intel_crtc *crtc,
hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 &&
- hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
+ hw_enabled_slices != dev_priv->dbuf.enabled_slices)
drm_err(&dev_priv->drm,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- dev_priv->enabled_dbuf_slices_mask,
+ dev_priv->dbuf.enabled_slices,
hw_enabled_slices);
/* planes */
@@ -14404,6 +14416,8 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
+ crtc->mode_flags = crtc_state->mode_flags;
+
/*
* The scanline counter increments at the leading edge of hsync.
*
@@ -14551,20 +14565,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
state->modeset = true;
state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
- state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
-
- if (state->active_pipe_changes) {
+ if (state->active_pipes != dev_priv->active_pipes) {
ret = _intel_atomic_lock_global_state(state);
if (ret)
return ret;
}
- ret = intel_modeset_calc_cdclk(state);
- if (ret)
- return ret;
-
- intel_modeset_clear_plls(state);
-
if (IS_HASWELL(dev_priv))
return hsw_mode_set_planes_workaround(state);
@@ -14641,11 +14647,10 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
/* See {hsw,vlv,ivb}_plane_ratio() */
return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_IVYBRIDGE(dev_priv);
+ IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
}
-static int intel_atomic_check_planes(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
+static int intel_atomic_check_planes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -14687,7 +14692,13 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- if (hweight8(old_active_planes) == hweight8(new_active_planes))
+ /*
+ * Not only the number of planes, but if the plane configuration had
+ * changed might already mean we need to recompute min CDCLK,
+ * because different planes might consume different amount of Dbuf bandwidth
+ * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
+ */
+ if (old_active_planes == new_active_planes)
continue;
ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
@@ -14695,6 +14706,21 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
return ret;
}
+ return 0;
+}
+
+static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_cdclk_state *new_cdclk_state;
+ struct intel_plane_state *plane_state;
+ struct intel_bw_state *new_bw_state;
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+ enum pipe pipe;
+ int ret;
+ int i;
/*
* active_planes bitmask has been updated, and potentially
* affected planes are part of the state. We can now
@@ -14706,6 +14732,30 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
return ret;
}
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+
+ if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
+ *need_cdclk_calc = true;
+
+ ret = dev_priv->display.bw_calc_min_cdclk(state);
+ if (ret)
+ return ret;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+
+ if (!new_cdclk_state || !new_bw_state)
+ return 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
+
+ /*
+ * Currently do this change only if we need to increase
+ */
+ if (new_bw_state->min_cdclk > min_cdclk)
+ *need_cdclk_calc = true;
+ }
+
return 0;
}
@@ -14757,16 +14807,13 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_cdclk_state *new_cdclk_state;
struct intel_crtc *crtc;
int ret, i;
bool any_ms = false;
- /* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->uapi.mode.private_flags !=
- old_crtc_state->uapi.mode.private_flags)
+ if (new_crtc_state->inherited != old_crtc_state->inherited)
new_crtc_state->uapi.mode_changed = true;
}
@@ -14868,14 +14915,10 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_atomic_check_planes(state, &any_ms);
+ ret = intel_atomic_check_planes(state);
if (ret)
goto fail;
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
- if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
- any_ms = true;
-
/*
* distrust_bios_wm will force a full dbuf recomputation
* but the hardware state will only get updated accordingly
@@ -14896,10 +14939,6 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = intel_atomic_check_crtcs(state);
- if (ret)
- goto fail;
-
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@@ -14909,6 +14948,22 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ ret = intel_atomic_check_cdclk(state, &any_ms);
+ if (ret)
+ goto fail;
+
+ if (any_ms) {
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+
+ intel_modeset_clear_plls(state);
+ }
+
+ ret = intel_atomic_check_crtcs(state);
+ if (ret)
+ goto fail;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!needs_modeset(new_crtc_state) &&
@@ -14939,8 +14994,24 @@ static int intel_atomic_check(struct drm_device *dev,
static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
{
- return drm_atomic_helper_prepare_planes(state->base.dev,
- &state->base);
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
+ if (ret < 0)
+ return ret;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ bool mode_changed = needs_modeset(crtc_state);
+
+ if (mode_changed || crtc_state->update_pipe ||
+ crtc_state->uapi.color_mgmt_changed) {
+ intel_dsb_prepare(crtc_state);
+ }
+ }
+
+ return 0;
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
@@ -15112,7 +15183,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
* of enabling them on the CRTC's first fastset.
*/
if (new_crtc_state->update_pipe && !modeset &&
- old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ old_crtc_state->inherited)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -15204,29 +15275,6 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
}
}
-static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
- u8 required_slices = state->enabled_dbuf_slices_mask;
- u8 slices_union = hw_enabled_slices | required_slices;
-
- /* If 2nd DBuf slice required, enable it here */
- if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, slices_union);
-}
-
-static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
- u8 required_slices = state->enabled_dbuf_slices_mask;
-
- /* If 2nd DBuf slice is no more required disable it */
- if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, required_slices);
-}
-
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -15393,15 +15441,27 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
&wait_reset);
}
+static void intel_cleanup_dsbs(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i)
+ intel_dsb_cleanup(old_crtc_state);
+}
+
static void intel_atomic_cleanup_work(struct work_struct *work)
{
- struct drm_atomic_state *state =
- container_of(work, struct drm_atomic_state, commit_work);
- struct drm_i915_private *i915 = to_i915(state->dev);
+ struct intel_atomic_state *state =
+ container_of(work, struct intel_atomic_state, base.commit_work);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
- drm_atomic_helper_cleanup_planes(&i915->drm, state);
- drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_put(state);
+ intel_cleanup_dsbs(state);
+ drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
+ drm_atomic_helper_commit_cleanup_done(&state->base);
+ drm_atomic_state_put(&state->base);
intel_atomic_helper_free_state(i915);
}
@@ -15467,9 +15527,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
intel_encoders_update_prepare(state);
- /* Enable all new slices, we might need */
- if (state->modeset)
- icl_dbuf_slice_pre_update(state);
+ intel_dbuf_pre_plane_update(state);
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.commit_modeset_enables(state);
@@ -15524,9 +15582,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
dev_priv->display.optimize_watermarks(state, crtc);
}
- /* Disable all slices, we don't need */
- if (state->modeset)
- icl_dbuf_slice_post_update(state);
+ intel_dbuf_post_plane_update(state);
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
@@ -15535,6 +15591,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
modeset_put_power_domains(dev_priv, put_domains[i]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
+
+ /*
+ * DSB cleanup is done in cleanup_work aligning with framebuffer
+ * cleanup. So copy and reset the dsb structure to sync with
+ * commit_done and later do dsb cleanup in cleanup_work.
+ */
+ old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
}
/* Underruns don't always raise interrupts, so check manually */
@@ -15684,8 +15747,15 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_swap_global_state(state);
if (ret) {
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
i915_sw_fence_commit(&state->commit_ready);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_dsb_cleanup(new_crtc_state);
+
drm_atomic_helper_cleanup_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
@@ -16745,7 +16815,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_D); /* DDI TC1 */
+ intel_ddi_init(dev_priv, PORT_E); /* DDI TC2 */
+ } else if (INTEL_GEN(dev_priv) >= 12) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_D);
@@ -17420,23 +17495,35 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
intel_update_cdclk(i915);
intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
+
+ dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
{
struct drm_plane *plane;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
- drm_for_each_crtc(crtc, state->dev) {
- struct drm_crtc_state *crtc_state;
+ for_each_intel_crtc(state->dev, crtc) {
+ struct intel_crtc_state *crtc_state;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
+
+ if (crtc_state->hw.active) {
+ /*
+ * Preserve the inherited flag to avoid
+ * taking the full modeset path.
+ */
+ crtc_state->inherited = true;
+ }
}
drm_for_each_plane(plane, state->dev) {
@@ -17578,6 +17665,15 @@ retry:
}
if (crtc_state->hw.active) {
+ /*
+ * We've not yet detected sink capabilities
+ * (audio,infoframes,etc.) and thus we don't want to
+ * force a full state recomputation yet. We want that to
+ * happen only for the first real commit from userspace.
+ * So preserve the inherited flag for the time being.
+ */
+ crtc_state->inherited = true;
+
ret = drm_atomic_add_affected_planes(state, &crtc->base);
if (ret)
goto out;
@@ -17665,7 +17761,8 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
if (IS_I845G(i915) || IS_I865G(i915)) {
mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
mode_config->cursor_height = 1023;
- } else if (IS_GEN(i915, 2)) {
+ } else if (IS_I830(i915) || IS_I85X(i915) ||
+ IS_I915G(i915) || IS_I915GM(i915)) {
mode_config->cursor_width = 64;
mode_config->cursor_height = 64;
} else {
@@ -17711,6 +17808,10 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
if (ret)
return ret;
+ ret = intel_dbuf_init(i915);
+ if (ret)
+ return ret;
+
ret = intel_bw_init(i915);
if (ret)
return ret;
@@ -18227,6 +18328,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(dev_priv->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(dev_priv->dbuf.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -18257,7 +18360,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
enableddisabled(crtc_state->hw.active));
}
- dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
+ dev_priv->active_pipes = cdclk_state->active_pipes =
+ dbuf_state->active_pipes = active_pipes;
readout_plane_state(dev_priv);
@@ -18348,7 +18452,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* set a flag to indicate that a full recalculation is
* needed on the next commit.
*/
- mode->private_flags = I915_MODE_FLAG_INHERITED;
+ crtc_state->inherited = true;
intel_crtc_compute_pixel_rate(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index efb4da205ea2..b7a6d56bac5f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -187,6 +187,13 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+#define for_each_dbuf_slice_in_mask(__slice, __mask) \
+ for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
+ for_each_if((BIT(__slice)) & (__mask))
+
+#define for_each_dbuf_slice(__slice) \
+ for_each_dbuf_slice_in_mask(__slice, BIT(I915_MAX_DBUF_SLICES) - 1)
+
enum port {
PORT_NONE = -1,
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 2b640d8ab9d2..d1cb48b3f462 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -125,7 +125,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- yesno(i915_modparams.enable_ips));
+ yesno(dev_priv->params.enable_ips));
if (INTEL_GEN(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
@@ -2218,7 +2218,8 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
}
if (INTEL_GEN(dev_priv) >= 10 &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ !to_intel_connector(connector)->mst_port) ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP))
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
connector, &i915_dsc_fec_support_fops);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 49998906cc61..834162bc5a3f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1161,7 +1161,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
+ u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
drm_WARN(&dev_priv->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices,
@@ -1943,22 +1943,29 @@ static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
- return !WARN_ON(power_domains->async_put_domains[0] &
- power_domains->async_put_domains[1]);
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+ return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
+ power_domains->async_put_domains[1]);
}
static bool
__async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
enum intel_display_power_domain domain;
bool err = false;
err |= !assert_async_put_domain_masks_disjoint(power_domains);
- err |= WARN_ON(!!power_domains->async_put_wakeref !=
- !!__async_put_domains_mask(power_domains));
+ err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
+ !!__async_put_domains_mask(power_domains));
for_each_power_domain(domain, __async_put_domains_mask(power_domains))
- err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
+ err |= drm_WARN_ON(&i915->drm,
+ power_domains->domain_use_count[domain] != 1);
return !err;
}
@@ -2200,11 +2207,14 @@ static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref)
{
- WARN_ON(power_domains->async_put_wakeref);
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+ drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
- WARN_ON(!queue_delayed_work(system_unbound_wq,
- &power_domains->async_put_work,
- msecs_to_jiffies(100)));
+ drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
+ &power_domains->async_put_work,
+ msecs_to_jiffies(100)));
}
static void
@@ -2913,6 +2923,53 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
+#define RKL_PW_4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define RKL_PW_3_POWER_DOMAINS ( \
+ RKL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+/*
+ * There is no PW_2/PG_2 on RKL.
+ *
+ * RKL PW_1/PG_1 domains (under HW/DMC control):
+ * - DBUF function (note: registers are in PW0)
+ * - PIPE_A and its planes and VDSC/joining, except VGA
+ * - transcoder A
+ * - DDI_A and DDI_B
+ * - FBC
+ *
+ * RKL PW_0/PG_0 domains (under HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - shared functions:
+ * * interrupts except pipe interrupts
+ * * MBus except PIPE_MBUS_DBOX_CTL
+ * * DBUF registers
+ * - central power except FBC
+ * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
+ */
+
+#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ RKL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -4283,6 +4340,140 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
};
+static const struct i915_power_well_desc rkl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_DC_OFF,
+ },
+ {
+ .name = "power well 3",
+ .domains = RKL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_3,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well 4",
+ .domains = RKL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ }
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ }
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ }
+ },
+ {
+ .name = "DDI D TC1 IO",
+ .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
+ },
+ },
+ {
+ .name = "DDI E TC2 IO",
+ .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX D TC1",
+ .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+ },
+ },
+ {
+ .name = "AUX E TC2",
+ .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
+ },
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -4322,7 +4513,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
mask = 0;
}
- if (!i915_modparams.disable_power_well)
+ if (!dev_priv->params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -4365,6 +4556,9 @@ __set_power_wells(struct i915_power_domains *power_domains,
const struct i915_power_well_desc *power_well_descs,
int power_well_count)
{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
u64 power_well_ids = 0;
int i;
@@ -4384,8 +4578,8 @@ __set_power_wells(struct i915_power_domains *power_domains,
if (id == DISP_PW_ID_NONE)
continue;
- WARN_ON(id >= sizeof(power_well_ids) * 8);
- WARN_ON(power_well_ids & BIT_ULL(id));
+ drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
+ drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
power_well_ids |= BIT_ULL(id);
}
@@ -4408,11 +4602,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
int err;
- i915_modparams.disable_power_well =
+ dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
- i915_modparams.disable_power_well);
+ dev_priv->params.disable_power_well);
dev_priv->csr.allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+ get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
dev_priv->csr.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
@@ -4428,7 +4622,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_GEN(dev_priv, 12)) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ err = set_power_wells(power_domains, rkl_power_wells);
+ } else if (IS_GEN(dev_priv, 12)) {
err = set_power_wells(power_domains, tgl_power_wells);
} else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
@@ -4491,45 +4687,38 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
-static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
- i915_reg_t reg, bool enable)
+static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
+ enum dbuf_slice slice, bool enable)
{
- u32 val, status;
+ i915_reg_t reg = DBUF_CTL_S(slice);
+ bool state;
+ u32 val;
val = intel_de_read(dev_priv, reg);
- val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
+ if (enable)
+ val |= DBUF_POWER_REQUEST;
+ else
+ val &= ~DBUF_POWER_REQUEST;
intel_de_write(dev_priv, reg, val);
intel_de_posting_read(dev_priv, reg);
udelay(10);
- status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
- if ((enable && !status) || (!enable && status)) {
- drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
- enable ? "enable" : "disable");
- return false;
- }
- return true;
+ state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
+ drm_WARN(&dev_priv->drm, enable != state,
+ "DBuf slice %d power %s timeout!\n",
+ slice, enable ? "enable" : "disable");
}
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices)
{
- icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
-}
-
-static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
-{
- icl_dbuf_slices_update(dev_priv, 0);
-}
-
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
- u8 req_slices)
-{
- int i;
- int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ enum dbuf_slice slice;
- drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
- "Invalid number of dbuf slices requested\n");
+ drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1),
+ "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n",
+ req_slices, num_slices);
drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
@@ -4543,36 +4732,36 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
*/
mutex_lock(&power_domains->lock);
- for (i = 0; i < max_slices; i++) {
- intel_dbuf_slice_set(dev_priv,
- DBUF_CTL_S(i),
- (req_slices & BIT(i)) != 0);
- }
+ for (slice = DBUF_S1; slice < num_slices; slice++)
+ gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
- dev_priv->enabled_dbuf_slices_mask = req_slices;
+ dev_priv->dbuf.enabled_slices = req_slices;
mutex_unlock(&power_domains->lock);
}
-static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
- skl_ddb_get_hw_state(dev_priv);
+ dev_priv->dbuf.enabled_slices =
+ intel_enabled_dbuf_slices_mask(dev_priv);
+
/*
* Just power up at least 1 slice, we will
* figure out later which slices we have and what we need.
*/
- icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
- BIT(DBUF_S1));
+ gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
+ dev_priv->dbuf.enabled_slices);
}
-static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
- icl_dbuf_slices_update(dev_priv, 0);
+ gen9_dbuf_slices_update(dev_priv, 0);
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
- u32 mask, val;
+ unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
+ u32 mask, val, i;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
@@ -4583,11 +4772,16 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
MBUS_ABOX_B_CREDIT(1) |
MBUS_ABOX_BW_CREDIT(1);
- intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
- if (INTEL_GEN(dev_priv) >= 12) {
- intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
- intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
- }
+ /*
+ * gen12 platforms that use abox1 and abox2 for pixel data reads still
+ * expect us to program the abox_ctl0 register as well, even though
+ * we don't have to program other instance-0 registers like BW_BUDDY.
+ */
+ if (IS_GEN(dev_priv, 12))
+ abox_regs |= BIT(0);
+
+ for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
+ intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
}
static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
@@ -5066,7 +5260,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
enum intel_dram_type type = dev_priv->dram_info.type;
u8 num_channels = dev_priv->dram_info.num_channels;
const struct buddy_page_mask *table;
- int i;
+ unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
+ int config, i;
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
/* Wa_1409767108: tgl */
@@ -5074,29 +5269,27 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
else
table = tgl_buddy_page_masks;
- for (i = 0; table[i].page_mask != 0; i++)
- if (table[i].num_channels == num_channels &&
- table[i].type == type)
+ for (config = 0; table[config].page_mask != 0; config++)
+ if (table[config].num_channels == num_channels &&
+ table[config].type == type)
break;
- if (table[i].page_mask == 0) {
+ if (table[config].page_mask == 0) {
drm_dbg(&dev_priv->drm,
"Unknown memory configuration; disabling address buddy logic.\n");
- intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
- intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+ for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
+ intel_de_write(dev_priv, BW_BUDDY_CTL(i),
+ BW_BUDDY_DISABLE);
} else {
- intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
- table[i].page_mask);
- intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
- table[i].page_mask);
-
- /* Wa_22010178259:tgl */
- intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
- BW_BUDDY_TLB_REQ_TIMER_MASK,
- REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
- intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
- BW_BUDDY_TLB_REQ_TIMER_MASK,
- REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
+ for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
+ intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
+ table[config].page_mask);
+
+ /* Wa_22010178259:tgl,rkl */
+ intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ BW_BUDDY_TLB_REQ_TIMER(0x8));
+ }
}
}
@@ -5127,7 +5320,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_cdclk_init_hw(dev_priv);
/* 5. Enable DBUF. */
- icl_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(dev_priv);
/* 6. Setup MBUS. */
icl_mbus_init(dev_priv);
@@ -5150,7 +5343,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 1. Disable all display engine functions -> aready done */
/* 2. Disable DBUF */
- icl_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(dev_priv);
@@ -5375,7 +5568,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915_modparams.disable_power_well)
+ if (!i915->params.disable_power_well)
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_sync_hw(i915);
@@ -5399,7 +5592,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
fetch_and_zero(&i915->power_domains.wakeref);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915_modparams.disable_power_well)
+ if (!i915->params.disable_power_well)
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
intel_display_power_flush_work_sync(i915);
@@ -5488,7 +5681,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
- if (!i915_modparams.disable_power_well)
+ if (!i915->params.disable_power_well)
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
intel_display_power_flush_work(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 6c917699293b..54c20c76057e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -314,15 +314,16 @@ intel_display_power_put_async(struct drm_i915_private *i915,
enum dbuf_slice {
DBUF_S1,
DBUF_S2,
+ I915_MAX_DBUF_SLICES
};
+void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices);
+
#define with_intel_display_power(i915, domain, wf) \
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
- u8 req_slices);
-
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 2bf3d4cb4ea9..4b0aaa3081c9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -479,16 +479,6 @@ struct intel_atomic_state {
bool dpll_set, modeset;
- /*
- * Does this transaction change the pipes that are active? This mask
- * tracks which CRTC's have changed their active state at the end of
- * the transaction (not counting the temporary disable during modesets).
- * This mask should only be non-zero when intel_state->modeset is true,
- * but the converse is not necessarily true; simply changing a mode may
- * not flip the final active status of any CRTC's
- */
- u8 active_pipe_changes;
-
u8 active_pipes;
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -506,9 +496,6 @@ struct intel_atomic_state {
*/
bool global_state_changed;
- /* Number of enabled DBuf slices */
- u8 enabled_dbuf_slices_mask;
-
struct i915_sw_fence commit_ready;
struct llist_node freed;
@@ -643,8 +630,7 @@ struct intel_crtc_scaler_state {
int scaler_id;
};
-/* drm_mode->private_flags */
-#define I915_MODE_FLAG_INHERITED (1<<0)
+/* {crtc,crtc_state}->mode_flags */
/* Flag to get scanline using frame time stamps */
#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
/* Flag to use the scanline counter instead of the pixel counter */
@@ -841,6 +827,7 @@ struct intel_crtc_state {
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fifo_changed; /* FIFO split is changed */
bool preload_luts;
+ bool inherited; /* state inherited from BIOS? */
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
@@ -956,6 +943,9 @@ struct intel_crtc_state {
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
+ /* I915_MODE_FLAG_* */
+ u8 mode_flags;
+
u8 lane_count;
/*
@@ -1080,6 +1070,9 @@ struct intel_crtc_state {
/* Only valid on TGL+ */
enum transcoder mst_master_transcoder;
+
+ /* For DSB related info */
+ struct intel_dsb *dsb;
};
enum intel_pipe_crc_source {
@@ -1118,6 +1111,10 @@ struct intel_crtc {
*/
bool active;
u8 plane_ids_mask;
+
+ /* I915_MODE_FLAG_* */
+ u8 mode_flags;
+
unsigned long long enabled_power_domains;
struct intel_overlay *overlay;
@@ -1149,9 +1146,6 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
- /* per pipe DSB related info */
- struct intel_dsb dsb;
-
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc;
#endif
@@ -1373,6 +1367,9 @@ struct intel_dp {
void (*set_idle_link_train)(struct intel_dp *intel_dp);
void (*set_signal_levels)(struct intel_dp *intel_dp);
+ u8 (*preemph_max)(struct intel_dp *intel_dp);
+ u8 (*voltage_max)(struct intel_dp *intel_dp);
+
/* Displayport compliance testing */
struct intel_dp_compliance compliance;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index cc525fda441a..3df5d901dd9d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -409,7 +409,10 @@ static int intel_dp_rate_index(const int *rates, int len, int rate)
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
- WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_WARN_ON(&i915->drm,
+ !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
intel_dp->num_source_rates,
@@ -418,7 +421,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
intel_dp->common_rates);
/* Paranoia, there should always be something in common. */
- if (WARN_ON(intel_dp->num_common_rates == 0)) {
+ if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
@@ -465,6 +468,15 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int index;
+ /*
+ * TODO: Enable fallback on MST links once MST link compute can handle
+ * the fallback params.
+ */
+ if (intel_dp->is_mst) {
+ drm_err(&i915->drm, "Link Training Unsuccessful\n");
+ return -1;
+ }
+
index = intel_dp_rate_index(intel_dp->common_rates,
intel_dp->num_common_rates,
link_rate);
@@ -1555,6 +1567,7 @@ static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
int ret;
@@ -1568,10 +1581,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
rxsize = 2; /* 0 or 1 data bytes */
- if (WARN_ON(txsize > 20))
+ if (drm_WARN_ON(&i915->drm, txsize > 20))
return -E2BIG;
- WARN_ON(!msg->buffer != !msg->size);
+ drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
@@ -1596,7 +1609,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
rxsize = msg->size + 1;
- if (WARN_ON(rxsize > 20))
+ if (drm_WARN_ON(&i915->drm, rxsize > 20))
return -E2BIG;
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
@@ -1871,10 +1884,11 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int len;
len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
- if (WARN_ON(len <= 0))
+ if (drm_WARN_ON(&i915->drm, len <= 0))
return 162000;
return intel_dp->common_rates[len - 1];
@@ -1882,10 +1896,11 @@ intel_dp_max_link_rate(struct intel_dp *intel_dp)
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i = intel_dp_rate_index(intel_dp->sink_rates,
intel_dp->num_sink_rates, rate);
- if (WARN_ON(i < 0))
+ if (drm_WARN_ON(&i915->drm, i < 0))
i = 0;
return i;
@@ -3984,70 +3999,24 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATU
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
-/* These are source-specific values. */
-u8
-intel_dp_voltage_max(struct intel_dp *intel_dp)
+static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- enum port port = encoder->port;
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+}
- if (HAS_DDI(dev_priv))
- return intel_ddi_dp_voltage_max(encoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
}
-u8
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
+static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- enum port port = encoder->port;
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+}
- if (HAS_DDI(dev_priv)) {
- return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- } else {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- }
+static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
static void vlv_set_signal_levels(struct intel_dp *intel_dp)
@@ -4330,6 +4299,7 @@ static u32 ivb_cpu_edp_signal_levels(u8 train_set)
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
return EDP_LINK_TRAIN_400MV_6DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -4746,7 +4716,9 @@ intel_dp_sink_can_mst(struct intel_dp *intel_dp)
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
- return i915_modparams.enable_dp_mst &&
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return i915->params.enable_dp_mst &&
intel_dp->can_mst &&
intel_dp_sink_can_mst(intel_dp);
}
@@ -4763,13 +4735,13 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
encoder->base.base.id, encoder->base.name,
yesno(intel_dp->can_mst), yesno(sink_can_mst),
- yesno(i915_modparams.enable_dp_mst));
+ yesno(i915->params.enable_dp_mst));
if (!intel_dp->can_mst)
return;
intel_dp->is_mst = sink_can_mst &&
- i915_modparams.enable_dp_mst;
+ i915->params.enable_dp_mst;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@@ -5595,35 +5567,46 @@ update_status:
"Could not write test response to sink\n");
}
-static int
+/**
+ * intel_dp_check_mst_status - service any pending MST interrupts, check link status
+ * @intel_dp: Intel DP struct
+ *
+ * Read any pending MST interrupts, call MST core to handle these and ack the
+ * interrupts. Check if the main and AUX link state is ok.
+ *
+ * Returns:
+ * - %true if pending interrupts were serviced (or no interrupts were
+ * pending) w/o detecting an error condition.
+ * - %false if an error condition - like AUX failure or a loss of link - is
+ * detected, which needs servicing from the hotplug work.
+ */
+static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- bool need_retrain = false;
-
- if (!intel_dp->is_mst)
- return -EINVAL;
+ bool link_ok = true;
- WARN_ON_ONCE(intel_dp->active_mst_links < 0);
+ drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
for (;;) {
u8 esi[DP_DPRX_ESI_LEN] = {};
- bool bret, handled;
+ bool handled;
int retry;
- bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
- if (!bret) {
+ if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
drm_dbg_kms(&i915->drm,
"failed to get ESI - device may have failed\n");
- return -EINVAL;
+ link_ok = false;
+
+ break;
}
/* check link status - esi[10] = 0x200c */
- if (intel_dp->active_mst_links > 0 && !need_retrain &&
+ if (intel_dp->active_mst_links > 0 && link_ok &&
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
drm_dbg_kms(&i915->drm,
"channel EQ not ok, retraining\n");
- need_retrain = true;
+ link_ok = false;
}
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
@@ -5643,7 +5626,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
}
}
- return need_retrain;
+ return link_ok;
}
static bool
@@ -5966,7 +5949,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
u8 *dpcd = intel_dp->dpcd;
u8 type;
- if (WARN_ON(intel_dp_is_edp(intel_dp)))
+ if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
return connector_status_connected;
if (lspcon->active)
@@ -6191,7 +6174,17 @@ intel_dp_detect(struct drm_connector *connector,
goto out;
}
- if (intel_dp->reset_link_params) {
+ /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
+ intel_dp_configure_mst(intel_dp);
+
+ /*
+ * TODO: Reset link params when switching to MST mode, until MST
+ * supports link training fallback params.
+ */
+ if (intel_dp->reset_link_params || intel_dp->is_mst) {
/* Initial max link lane count */
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
@@ -6203,12 +6196,6 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp_print_rates(intel_dp);
- /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
- if (INTEL_GEN(dev_priv) >= 11)
- intel_dp_get_dsc_sink_cap(intel_dp);
-
- intel_dp_configure_mst(intel_dp);
-
if (intel_dp->is_mst) {
/*
* If we are in MST mode then this connector
@@ -7294,35 +7281,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (intel_dp->is_mst) {
- switch (intel_dp_check_mst_status(intel_dp)) {
- case -EINVAL:
- /*
- * If we were in MST mode, and device is not
- * there, get out of MST mode
- */
- drm_dbg_kms(&i915->drm,
- "MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst,
- intel_dp->mst_mgr.mst_state);
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
-
- return IRQ_NONE;
- case 1:
- return IRQ_NONE;
- default:
- break;
- }
- }
-
- if (!intel_dp->is_mst) {
- bool handled;
-
- handled = intel_dp_short_pulse(intel_dp);
-
- if (!handled)
+ if (!intel_dp_check_mst_status(intel_dp))
return IRQ_NONE;
+ } else if (!intel_dp_short_pulse(intel_dp)) {
+ return IRQ_NONE;
}
return IRQ_HANDLED;
@@ -8366,6 +8328,15 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
else
intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
+ (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
+ intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
+ intel_dig_port->dp.voltage_max = intel_dp_voltage_max_3;
+ } else {
+ intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
+ intel_dig_port->dp.voltage_max = intel_dp_voltage_max_2;
+ }
+
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 1702959ca079..0a8950f744f6 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -92,10 +92,6 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
-u8
-intel_dp_voltage_max(struct intel_dp *intel_dp);
-u8
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 0722540d64ad..acbd7eb66cbe 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -348,7 +348,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- if (i915_modparams.enable_dpcd_backlight == 0 ||
+ if (i915->params.enable_dpcd_backlight == 0 ||
!intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
@@ -358,7 +358,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
*/
if (i915->vbt.backlight.type !=
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
- i915_modparams.enable_dpcd_backlight != 1 &&
+ i915->params.enable_dpcd_backlight != 1 &&
!drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
drm_info(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index e4f1843170b7..b9e4ee2dbddc 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,6 +34,21 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
+static u8 dp_voltage_max(u8 preemph)
+{
+ switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ default:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ }
+}
+
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
@@ -44,23 +59,19 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
u8 preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
- u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
-
- if (this_v > v)
- v = this_v;
- if (this_p > p)
- p = this_p;
+ v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
+ p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
}
- voltage_max = intel_dp_voltage_max(intel_dp);
- if (v >= voltage_max)
- v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
-
- preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ preemph_max = intel_dp->preemph_max(intel_dp);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ voltage_max = min(intel_dp->voltage_max(intel_dp),
+ dp_voltage_max(p));
+ if (v >= voltage_max)
+ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 2e6c6375a23b..8273f2e07427 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -317,6 +317,25 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
return ret;
}
+static void clear_act_sent(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ intel_de_write(i915, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_ACT_SENT);
+}
+
+static void wait_for_act_sent(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (intel_de_wait_for_set(i915, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_ACT_SENT, 1))
+ drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
+
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+}
+
static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -370,6 +389,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ clear_act_sent(intel_dp);
+
val = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
@@ -377,11 +398,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
val);
- if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
- DP_TP_STATUS_ACT_SENT, 1))
- drm_err(&dev_priv->drm,
- "Timed out waiting for ACT sent when disabling\n");
- drm_dp_check_act_status(&intel_dp->mst_mgr);
+ wait_for_act_sent(intel_dp);
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
@@ -452,7 +469,6 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
- u32 temp;
bool first_mst_stream;
/* MST encoders are bound to a crtc, not to a connector,
@@ -485,8 +501,6 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
intel_dp->active_mst_links++;
- temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
- intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
@@ -514,19 +528,25 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+ clear_act_sent(intel_dp);
+
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+ val |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder),
+ val);
+
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
intel_dp->active_mst_links);
- if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
- DP_TP_STATUS_ACT_SENT, 1))
- drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n");
-
- drm_dp_check_act_status(&intel_dp->mst_mgr);
+ wait_for_act_sent(intel_dp);
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 29fec6a92d17..24e6d63e2d47 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -34,152 +34,52 @@
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
-static bool is_dsb_busy(struct intel_dsb *dsb)
+static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
+ enum dsb_id id)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ return DSB_STATUS & intel_de_read(i915, DSB_CTRL(pipe, id));
}
-static bool intel_dsb_enable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_enable_engine(struct drm_i915_private *i915,
+ enum pipe pipe, enum dsb_id id)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
+ drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl |= DSB_ENABLE;
- intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
- intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(i915, DSB_CTRL(pipe, id));
return true;
}
-static bool intel_dsb_disable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_disable_engine(struct drm_i915_private *i915,
+ enum pipe pipe, enum dsb_id id)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
+ drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl &= ~DSB_ENABLE;
- intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
- intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(i915, DSB_CTRL(pipe, id));
return true;
}
/**
- * intel_dsb_get() - Allocate DSB context and return a DSB instance.
- * @crtc: intel_crtc structure to get pipe info.
- *
- * This function provides handle of a DSB instance, for the further DSB
- * operations.
- *
- * Returns: address of Intel_dsb instance requested for.
- * Failure: Returns the same DSB instance, but without a command buffer.
- */
-
-struct intel_dsb *
-intel_dsb_get(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
- struct intel_dsb *dsb = &crtc->dsb;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 *buf;
- intel_wakeref_t wakeref;
-
- if (!HAS_DSB(i915))
- return dsb;
-
- if (dsb->refcount++ != 0)
- return dsb;
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
- if (IS_ERR(obj)) {
- drm_err(&i915->drm, "Gem object creation failed\n");
- goto out;
- }
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma)) {
- drm_err(&i915->drm, "Vma creation failed\n");
- i915_gem_object_put(obj);
- goto out;
- }
-
- buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
- if (IS_ERR(buf)) {
- drm_err(&i915->drm, "Command buffer creation failed\n");
- goto out;
- }
-
- dsb->id = DSB1;
- dsb->vma = vma;
- dsb->cmd_buf = buf;
-
-out:
- /*
- * On error dsb->cmd_buf will continue to be NULL, making the writes
- * pass-through. Leave the dangling ref to be removed later by the
- * corresponding intel_dsb_put(): the important error message will
- * already be logged above.
- */
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
- return dsb;
-}
-
-/**
- * intel_dsb_put() - To destroy DSB context.
- * @dsb: intel_dsb structure.
- *
- * This function destroys the DSB context allocated by a dsb_get(), by
- * unpinning and releasing the VMA object associated with it.
- */
-
-void intel_dsb_put(struct intel_dsb *dsb)
-{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (!HAS_DSB(i915))
- return;
-
- if (drm_WARN_ON(&i915->drm, dsb->refcount == 0))
- return;
-
- if (--dsb->refcount == 0) {
- i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
- dsb->cmd_buf = NULL;
- dsb->free_pos = 0;
- dsb->ins_start_offset = 0;
- }
-}
-
-/**
* intel_dsb_indexed_reg_write() -Write to the DSB context for auto
* increment register.
- * @dsb: intel_dsb structure.
+ * @crtc_state: intel_crtc_state structure
* @reg: register address.
* @val: value.
*
@@ -189,19 +89,20 @@ void intel_dsb_put(struct intel_dsb *dsb)
* is done through mmio write.
*/
-void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
- u32 val)
+void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct intel_dsb *dsb = crtc_state->dsb;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 *buf = dsb->cmd_buf;
+ u32 *buf;
u32 reg_val;
- if (!buf) {
+ if (!dsb) {
intel_de_write(dev_priv, reg, val);
return;
}
-
+ buf = dsb->cmd_buf;
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
@@ -256,7 +157,7 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
/**
* intel_dsb_reg_write() -Write to the DSB context for normal
* register.
- * @dsb: intel_dsb structure.
+ * @crtc_state: intel_crtc_state structure
* @reg: register address.
* @val: value.
*
@@ -265,17 +166,21 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
* and rest all erroneous condition register programming is done
* through mmio write.
*/
-void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
+void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 *buf = dsb->cmd_buf;
+ struct intel_dsb *dsb;
+ u32 *buf;
- if (!buf) {
+ dsb = crtc_state->dsb;
+ if (!dsb) {
intel_de_write(dev_priv, reg, val);
return;
}
+ buf = dsb->cmd_buf;
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
@@ -290,26 +195,27 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
/**
* intel_dsb_commit() - Trigger workload execution of DSB.
- * @dsb: intel_dsb structure.
+ * @crtc_state: intel_crtc_state structure
*
* This function is used to do actual write to hardware using DSB.
* On errors, fall back to MMIO. Also this function help to reset the context.
*/
-void intel_dsb_commit(struct intel_dsb *dsb)
+void intel_dsb_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct intel_dsb *dsb = crtc_state->dsb;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
u32 tail;
- if (!dsb->free_pos)
+ if (!(dsb && dsb->free_pos))
return;
- if (!intel_dsb_enable_engine(dsb))
+ if (!intel_dsb_enable_engine(dev_priv, pipe, dsb->id))
goto reset;
- if (is_dsb_busy(dsb)) {
+ if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
drm_err(&dev_priv->drm,
"HEAD_PTR write failed - dsb engine is busy.\n");
goto reset;
@@ -322,7 +228,7 @@ void intel_dsb_commit(struct intel_dsb *dsb)
memset(&dsb->cmd_buf[dsb->free_pos], 0,
(tail - dsb->free_pos * 4));
- if (is_dsb_busy(dsb)) {
+ if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
drm_err(&dev_priv->drm,
"TAIL_PTR write failed - dsb engine is busy.\n");
goto reset;
@@ -332,7 +238,7 @@ void intel_dsb_commit(struct intel_dsb *dsb)
i915_ggtt_offset(dsb->vma), tail);
intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
i915_ggtt_offset(dsb->vma) + tail);
- if (wait_for(!is_dsb_busy(dsb), 1)) {
+ if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
drm_err(&dev_priv->drm,
"Timed out waiting for DSB workload completion.\n");
goto reset;
@@ -341,5 +247,79 @@ void intel_dsb_commit(struct intel_dsb *dsb)
reset:
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
- intel_dsb_disable_engine(dsb);
+ intel_dsb_disable_engine(dev_priv, pipe, dsb->id);
+}
+
+/**
+ * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
+ * @crtc_state: intel_crtc_state structure to prepare associated dsb instance.
+ *
+ * This function prepare the command buffer which is used to store dsb
+ * instructions with data.
+ */
+void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *buf;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_DSB(i915))
+ return;
+
+ dsb = kmalloc(sizeof(*dsb), GFP_KERNEL);
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Gem object creation failed\n");
+ kfree(dsb);
+ goto out;
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ drm_err(&i915->drm, "Vma creation failed\n");
+ i915_gem_object_put(obj);
+ kfree(dsb);
+ goto out;
+ }
+
+ buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(buf)) {
+ drm_err(&i915->drm, "Command buffer creation failed\n");
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ kfree(dsb);
+ goto out;
+ }
+
+ dsb->id = DSB1;
+ dsb->vma = vma;
+ dsb->cmd_buf = buf;
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ crtc_state->dsb = dsb;
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+/**
+ * intel_dsb_cleanup() - To cleanup DSB context.
+ * @crtc_state: intel_crtc_state structure to cleanup associated dsb instance.
+ *
+ * This function cleanup the DSB context by unpinning and releasing
+ * the VMA object associated with it.
+ */
+void intel_dsb_cleanup(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsb)
+ return;
+
+ i915_vma_unpin_and_release(&crtc_state->dsb->vma, I915_VMA_RELEASE_MAP);
+ kfree(crtc_state->dsb);
+ crtc_state->dsb = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 395ef9ce558e..654a11f24b80 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -10,7 +10,7 @@
#include "i915_reg.h"
-struct intel_crtc;
+struct intel_crtc_state;
struct i915_vma;
enum dsb_id {
@@ -22,7 +22,6 @@ enum dsb_id {
};
struct intel_dsb {
- long refcount;
enum dsb_id id;
u32 *cmd_buf;
struct i915_vma *vma;
@@ -41,12 +40,12 @@ struct intel_dsb {
u32 ins_start_offset;
};
-struct intel_dsb *
-intel_dsb_get(struct intel_crtc *crtc);
-void intel_dsb_put(struct intel_dsb *dsb);
-void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val);
-void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
- u32 val);
-void intel_dsb_commit(struct intel_dsb *dsb);
+void intel_dsb_prepare(struct intel_crtc_state *crtc_state);
+void intel_dsb_cleanup(struct intel_crtc_state *crtc_state);
+void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val);
+void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val);
+void intel_dsb_commit(const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 1c26673acb2d..30649e17cfb7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -740,7 +740,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
return false;
}
- if (!i915_modparams.enable_fbc) {
+ if (!dev_priv->params.enable_fbc) {
fbc->no_fbc_reason = "disabled per module param or by default";
return false;
}
@@ -1017,7 +1017,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
fbc->flip_pending = false;
- if (!i915_modparams.enable_fbc) {
+ if (!dev_priv->params.enable_fbc) {
intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
__intel_fbc_disable(dev_priv);
@@ -1370,8 +1370,8 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
*/
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
- if (i915_modparams.enable_fbc >= 0)
- return !!i915_modparams.enable_fbc;
+ if (dev_priv->params.enable_fbc >= 0)
+ return !!dev_priv->params.enable_fbc;
if (!HAS_FBC(dev_priv))
return 0;
@@ -1415,9 +1415,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->display.has_fbc = false;
- i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+ dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
- i915_modparams.enable_fbc);
+ dev_priv->params.enable_fbc);
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 2cbc4619b4ce..815b054bb167 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1923,8 +1923,11 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
- return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
+ return (INTEL_GEN(dev_priv) >= 10 ||
+ IS_GEMINILAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv));
}
void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 010f37240710..a31a98d26882 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -3082,6 +3082,24 @@ static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
+static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ WARN_ON(port == PORT_C);
+
+ /*
+ * Pin mapping for RKL depends on which PCH is present. With TGP, the
+ * final two outputs use type-c pins, even though they're actually
+ * combo outputs. With CMP, the traditional DDI A-D pins are used for
+ * all outputs.
+ */
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C)
+ return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
+
+ return GMBUS_PIN_1_BXT + phy;
+}
+
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -3119,7 +3137,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
return ddc_pin;
}
- if (HAS_PCH_MCC(dev_priv))
+ if (IS_ROCKETLAKE(dev_priv))
+ ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
+ else if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 664f88354101..2e94c1413c02 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -89,6 +89,15 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
{
enum phy phy = intel_port_to_phy(dev_priv, port);
+ /*
+ * RKL + TGP PCH is a special case; we effectively choose the hpd_pin
+ * based on the DDI rather than the PHY (i.e., the last two outputs
+ * shold be HPD_PORT_{D,E} rather than {C,D}. Note that this differs
+ * from the behavior of both TGL+TGP and RKL+CMP.
+ */
+ if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv))
+ return HPD_PORT_A + port - PORT_A;
+
switch (phy) {
case PHY_F:
return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 872f2a489339..1888611244db 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -784,8 +784,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
struct drm_i915_private *dev_priv = to_i915(dev);
/* use the module option value if specified */
- if (i915_modparams.lvds_channel_mode > 0)
- return i915_modparams.lvds_channel_mode == 2;
+ if (dev_priv->params.lvds_channel_mode > 0)
+ return dev_priv->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index cc6b00959586..de995362f428 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -801,7 +801,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
const struct firmware *fw = NULL;
- const char *name = i915_modparams.vbt_firmware;
+ const char *name = dev_priv->params.vbt_firmware;
int ret;
if (!name || !*name)
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 66711e62fa71..52b4f6193b4c 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -100,12 +100,15 @@
#define CLK_RGB24_MASK 0x0
#define CLK_RGB16_MASK 0x070307
#define CLK_RGB15_MASK 0x070707
-#define CLK_RGB8I_MASK 0xffffff
+#define RGB30_TO_COLORKEY(c) \
+ ((((c) & 0x3fc00000) >> 6) | (((c) & 0x000ff000) >> 4) | (((c) & 0x000003fc) >> 2))
#define RGB16_TO_COLORKEY(c) \
- (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+ ((((c) & 0xf800) << 8) | (((c) & 0x07e0) << 5) | (((c) & 0x001f) << 3))
#define RGB15_TO_COLORKEY(c) \
- (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+ ((((c) & 0x7c00) << 9) | (((c) & 0x03e0) << 6) | (((c) & 0x001f) << 3))
+#define RGB8I_TO_COLORKEY(c) \
+ ((((c) & 0xff) << 16) | (((c) & 0xff) << 8) | (((c) & 0xff) << 0))
/* overlay flip addr flag */
#define OFC_UPDATE 0x1
@@ -682,8 +685,8 @@ static void update_colorkey(struct intel_overlay *overlay,
switch (format) {
case DRM_FORMAT_C8:
- key = 0;
- flags |= CLK_RGB8I_MASK;
+ key = RGB8I_TO_COLORKEY(key);
+ flags |= CLK_RGB24_MASK;
break;
case DRM_FORMAT_XRGB1555:
key = RGB15_TO_COLORKEY(key);
@@ -693,6 +696,11 @@ static void update_colorkey(struct intel_overlay *overlay,
key = RGB16_TO_COLORKEY(key);
flags |= CLK_RGB16_MASK;
break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ key = RGB30_TO_COLORKEY(key);
+ flags |= CLK_RGB24_MASK;
+ break;
default:
flags |= CLK_RGB24_MASK;
break;
@@ -777,9 +785,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
if (!overlay->active) {
- u32 oconfig;
-
- oconfig = OCONF_CC_OUT_8BIT;
+ const struct intel_crtc_state *crtc_state =
+ overlay->crtc->config;
+ u32 oconfig = 0;
+
+ if (crtc_state->gamma_enable &&
+ crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ oconfig |= OCONF_CC_OUT_8BIT;
+ if (crtc_state->gamma_enable)
+ oconfig |= OCONF_GAMMA2_ENABLE;
if (IS_GEN(dev_priv, 4))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 3c5056dbf607..aaed9eb3b56c 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -521,10 +521,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
- if (i915_modparams.invert_brightness < 0)
+ if (dev_priv->params.invert_brightness < 0)
return val;
- if (i915_modparams.invert_brightness > 0 ||
+ if (dev_priv->params.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val + panel->backlight.min;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index b7a2c102648a..86bf7a76f93d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -83,7 +83,7 @@ static bool psr_global_enabled(struct drm_i915_private *i915)
{
switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- return i915_modparams.enable_psr;
+ return i915->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -426,6 +426,12 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 11)
val |= EDP_PSR_TP4_TIME_0US;
+ if (dev_priv->params.psr_safest_params) {
+ val |= EDP_PSR_TP1_TIME_2500us;
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
+ goto check_tp3_sel;
+ }
+
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
@@ -444,6 +450,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP2_TP3_TIME_2500us;
+check_tp3_sel:
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP1_TP3_SEL;
@@ -495,18 +502,13 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
-static void hsw_activate_psr2(struct intel_dp *intel_dp)
+static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 val;
-
- val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
-
- val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- val |= EDP_Y_COORDINATE_ENABLE;
+ u32 val = 0;
- val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+ if (dev_priv->params.psr_safest_params)
+ return EDP_PSR2_TP2_TIME_2500us;
if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
@@ -518,6 +520,39 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
else
val |= EDP_PSR2_TP2_TIME_2500us;
+ return val;
+}
+
+static void hsw_activate_psr2(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val;
+
+ val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
+
+ val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ val |= EDP_Y_COORDINATE_ENABLE;
+
+ val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+ val |= intel_psr2_get_tp_time(intel_dp);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /*
+ * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
+ * values from BSpec. In order to setting an optimal power
+ * consumption, lower than 4k resoluition mode needs to decrese
+ * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
+ * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
+ */
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
+ val |= TGL_EDP_PSR2_FAST_WAKE(7);
+ } else if (INTEL_GEN(dev_priv) >= 9) {
+ val |= EDP_PSR2_IO_BUFFER_WAKE(7);
+ val |= EDP_PSR2_FAST_WAKE(7);
+ }
+
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
@@ -647,6 +682,21 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
/*
+ * Some platforms lack PSR2 HW tracking and instead require manual
+ * tracking by software. In this case, the driver is required to track
+ * the areas that need updates and program hardware to send selective
+ * updates.
+ *
+ * So until the software tracking is implemented, PSR2 needs to be
+ * disabled for platforms without PSR2 HW tracking.
+ */
+ if (!HAS_PSR_HW_TRACKING(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No PSR2 HW tracking in the platform\n");
+ return false;
+ }
+
+ /*
* DSC and PSR2 cannot be enabled simultaneously. If a requested
* resolution requires DSC to be enabled, priority is given to DSC
* over PSR2.
@@ -1450,9 +1500,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
- if (i915_modparams.enable_psr == -1)
+ if (dev_priv->params.enable_psr == -1)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
- i915_modparams.enable_psr = 0;
+ dev_priv->params.enable_psr = 0;
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index bc6c26818e15..773523dcd107 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -411,6 +411,7 @@ static const char *sdvo_cmd_name(u8 cmd)
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
const char *cmd_name;
int i, pos = 0;
char buffer[64];
@@ -431,7 +432,7 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
else
BUF_PRINT("(%02X)", cmd);
- WARN_ON(pos >= sizeof(buffer) - 1);
+ drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
@@ -533,6 +534,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
const char *cmd_status;
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
@@ -597,7 +599,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- WARN_ON(pos >= sizeof(buffer) - 1);
+ drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
@@ -1081,6 +1083,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -1106,7 +1109,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
HDMI_QUANTIZATION_RANGE_FULL);
ret = hdmi_avi_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(&dev_priv->drm, ret))
return false;
return true;
@@ -1115,6 +1118,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
const union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
ssize_t len;
@@ -1123,11 +1127,12 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0)
return true;
- if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
+ if (drm_WARN_ON(&dev_priv->drm,
+ frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
return false;
len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data));
- if (WARN_ON(len < 0))
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
@@ -1237,6 +1242,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(pipe_config->uapi.crtc->dev);
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
@@ -1257,7 +1263,8 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
clock->m1 = 12;
clock->m2 = 8;
} else {
- WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
+ drm_WARN(&dev_priv->drm, 1,
+ "SDVO TV clock out of range: %i\n", dotclock);
}
pipe_config->clock_set = true;
@@ -2293,7 +2300,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
return 0;
}
- WARN_ON(1);
+ drm_WARN_ON(connector->dev, 1);
*val = 0;
} else if (property == intel_sdvo_connector->top ||
property == intel_sdvo_connector->bottom)
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 0000ec7055f7..3cd461bf9131 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -333,6 +333,21 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
+static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
+{
+ if (IS_ROCKETLAKE(i915))
+ return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3);
+ else
+ return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5);
+}
+
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ return INTEL_GEN(dev_priv) >= 11 &&
+ icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id);
+}
+
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
{
return INTEL_GEN(dev_priv) >= 11 &&
@@ -3003,7 +3018,7 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
if (icl_is_hdr_plane(dev_priv, plane_id)) {
*num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
return icl_hdr_plane_formats;
- } else if (icl_is_nv12_y_plane(plane_id)) {
+ } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) {
*num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
return icl_sdr_y_plane_formats;
} else {
@@ -3046,6 +3061,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
+ unsigned int supported_csc;
const u64 *modifiers;
const u32 *formats;
int num_formats;
@@ -3120,9 +3136,13 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0,
supported_rotations);
+ supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709);
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020);
+
drm_plane_create_color_properties(&plane->base,
- BIT(DRM_COLOR_YCBCR_BT601) |
- BIT(DRM_COLOR_YCBCR_BT709),
+ supported_csc,
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT709,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 5eeaa92420d1..cd2104ba1ca1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -32,21 +32,14 @@ struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
-static inline bool icl_is_nv12_y_plane(enum plane_id id)
-{
- /* Don't need to do a gen check, these planes are only available on gen11 */
- if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
- return true;
-
- return false;
-}
-
static inline u8 icl_hdr_plane_mask(void)
{
return BIT(PLANE_PRIMARY) |
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
}
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id);
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b161c15baf86..5b5dc86a5737 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -360,12 +360,12 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
}
if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
- !WARN_ON(dig_port->tc_legacy_port))
+ !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
goto out_set_tbt_alt_mode;
max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
if (dig_port->tc_legacy_port) {
- WARN_ON(max_lanes != 4);
+ drm_WARN_ON(&i915->drm, max_lanes != 4);
dig_port->tc_mode = TC_PORT_LEGACY;
return;
@@ -445,18 +445,20 @@ static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 live_status_mask = tc_port_live_status_mask(dig_port);
bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
enum tc_port_mode mode;
- if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
+ if (in_safe_mode ||
+ drm_WARN_ON(&i915->drm, !icl_tc_phy_status_complete(dig_port)))
return TC_PORT_TBT_ALT;
mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
if (live_status_mask) {
enum tc_port_mode live_mode = fls(live_status_mask) - 1;
- if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
+ if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
mode = live_mode;
}
@@ -505,7 +507,9 @@ static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
int refcount)
{
- WARN_ON(dig_port->tc_link_refcount);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
dig_port->tc_link_refcount = refcount;
}
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index abc67207f2f3..777032d9697b 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1158,7 +1158,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
/* pixel counter doesn't work on i965gm TV output */
if (IS_I965GM(dev_priv))
- adjusted_mode->private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
}
@@ -1328,7 +1328,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
/* pixel counter doesn't work on i965gm TV output */
if (IS_I965GM(dev_priv))
- adjusted_mode->private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 95ad87d4ccb3..d145fe2bed81 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -476,13 +476,13 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
* POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases:
*
* - ICL eDP/DSI transcoder
- * - TGL pipe A
+ * - Gen12+ (except RKL) pipe A
*
* For any other pipe, VDSC/joining uses the power well associated with
* the pipe in use. Hence another reference on the pipe power domain
* will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
- if (INTEL_GEN(i915) >= 12 && pipe == PIPE_A)
+ if (INTEL_GEN(i915) >= 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else if (is_pipe_dsc(crtc_state))
return POWER_DOMAIN_PIPE(pipe);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f582ab52f0b0..052e0b31a2da 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -298,7 +298,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
if (IS_GEN9_LP(dev_priv)) {
/* Enable Frame time stamp based scanline reporting */
- adjusted_mode->private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
/* Dual link goes to DSI transcoder A. */
@@ -1097,8 +1097,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
/* Enable Frame time stamo based scanline reporting */
- adjusted_mode->private_flags |=
- I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
+ pipe_config->mode_flags |=
+ I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 30c229fcb404..5c13809dc3c8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -650,7 +650,7 @@ static void context_close(struct i915_gem_context *ctx)
* context close.
*/
if (!i915_gem_context_is_persistent(ctx) ||
- !i915_modparams.enable_hangcheck)
+ !ctx->i915->params.enable_hangcheck)
kill_context(ctx);
i915_gem_context_put(ctx);
@@ -667,7 +667,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
* reset] are allowed to survive past termination. We require
* hangcheck to ensure that the persistent requests are healthy.
*/
- if (!i915_modparams.enable_hangcheck)
+ if (!ctx->i915->params.enable_hangcheck)
return -EINVAL;
i915_gem_context_set_persistence(ctx);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 7db5a793739d..2679380159fc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -217,6 +217,7 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+ .name = "i915_gem_object_dmabuf",
.get_pages = i915_gem_object_get_pages_dmabuf,
.put_pages = i915_gem_object_put_pages_dmabuf,
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index db8eb1c6afe9..c38ab51e82f0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -45,13 +45,6 @@ struct eb_vma_array {
struct eb_vma vma[];
};
-enum {
- FORCE_CPU_RELOC = 1,
- FORCE_GTT_RELOC,
- FORCE_GPU_RELOC,
-#define DBG_FORCE_RELOC 0 /* choose one of the above! */
-};
-
#define __EXEC_OBJECT_HAS_PIN BIT(31)
#define __EXEC_OBJECT_HAS_FENCE BIT(30)
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
@@ -260,8 +253,6 @@ struct i915_execbuffer {
*/
struct reloc_cache {
struct drm_mm_node node; /** temporary GTT binding */
- unsigned long vaddr; /** Current kmap address */
- unsigned long page; /** Currently mapped page index */
unsigned int gen; /** Cached value of INTEL_GEN */
bool use_64bit_reloc : 1;
bool has_llc : 1;
@@ -605,23 +596,6 @@ eb_add_vma(struct i915_execbuffer *eb,
}
}
-static inline int use_cpu_reloc(const struct reloc_cache *cache,
- const struct drm_i915_gem_object *obj)
-{
- if (!i915_gem_object_has_struct_page(obj))
- return false;
-
- if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
- return true;
-
- if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
- return false;
-
- return (cache->has_llc ||
- obj->cache_dirty ||
- obj->cache_level != I915_CACHE_NONE);
-}
-
static int eb_reserve_vma(const struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
@@ -945,8 +919,6 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
static void reloc_cache_init(struct reloc_cache *cache,
struct drm_i915_private *i915)
{
- cache->page = -1;
- cache->vaddr = 0;
/* Must be a variable in the struct to allow GCC to unroll. */
cache->gen = INTEL_GEN(i915);
cache->has_llc = HAS_LLC(i915);
@@ -958,25 +930,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->target = NULL;
}
-static inline void *unmask_page(unsigned long p)
-{
- return (void *)(uintptr_t)(p & PAGE_MASK);
-}
-
-static inline unsigned int unmask_flags(unsigned long p)
-{
- return p & ~PAGE_MASK;
-}
-
-#define KMAP 0x4 /* after CLFLUSH_FLAGS */
-
-static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
-{
- struct drm_i915_private *i915 =
- container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
- return &i915->ggtt;
-}
-
#define RELOC_TAIL 4
static int reloc_gpu_chain(struct reloc_cache *cache)
@@ -1089,181 +1042,6 @@ static int reloc_gpu_flush(struct reloc_cache *cache)
return err;
}
-static void reloc_cache_reset(struct reloc_cache *cache)
-{
- void *vaddr;
-
- if (!cache->vaddr)
- return;
-
- vaddr = unmask_page(cache->vaddr);
- if (cache->vaddr & KMAP) {
- if (cache->vaddr & CLFLUSH_AFTER)
- mb();
-
- kunmap_atomic(vaddr);
- i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
- } else {
- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
-
- intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- io_mapping_unmap_atomic((void __iomem *)vaddr);
-
- if (drm_mm_node_allocated(&cache->node)) {
- ggtt->vm.clear_range(&ggtt->vm,
- cache->node.start,
- cache->node.size);
- mutex_lock(&ggtt->vm.mutex);
- drm_mm_remove_node(&cache->node);
- mutex_unlock(&ggtt->vm.mutex);
- } else {
- i915_vma_unpin((struct i915_vma *)cache->node.mm);
- }
- }
-
- cache->vaddr = 0;
- cache->page = -1;
-}
-
-static void *reloc_kmap(struct drm_i915_gem_object *obj,
- struct reloc_cache *cache,
- unsigned long page)
-{
- void *vaddr;
-
- if (cache->vaddr) {
- kunmap_atomic(unmask_page(cache->vaddr));
- } else {
- unsigned int flushes;
- int err;
-
- err = i915_gem_object_prepare_write(obj, &flushes);
- if (err)
- return ERR_PTR(err);
-
- BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
- BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
-
- cache->vaddr = flushes | KMAP;
- cache->node.mm = (void *)obj;
- if (flushes)
- mb();
- }
-
- vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
- cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
- cache->page = page;
-
- return vaddr;
-}
-
-static void *reloc_iomap(struct drm_i915_gem_object *obj,
- struct reloc_cache *cache,
- unsigned long page)
-{
- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
- unsigned long offset;
- void *vaddr;
-
- if (cache->vaddr) {
- intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
- } else {
- struct i915_vma *vma;
- int err;
-
- if (i915_gem_object_is_tiled(obj))
- return ERR_PTR(-EINVAL);
-
- if (use_cpu_reloc(cache, obj))
- return NULL;
-
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
- if (err)
- return ERR_PTR(err);
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
- if (IS_ERR(vma)) {
- memset(&cache->node, 0, sizeof(cache->node));
- mutex_lock(&ggtt->vm.mutex);
- err = drm_mm_insert_node_in_range
- (&ggtt->vm.mm, &cache->node,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- mutex_unlock(&ggtt->vm.mutex);
- if (err) /* no inactive aperture space, use cpu reloc */
- return NULL;
- } else {
- cache->node.start = vma->node.start;
- cache->node.mm = (void *)vma;
- }
- }
-
- offset = cache->node.start;
- if (drm_mm_node_allocated(&cache->node)) {
- ggtt->vm.insert_page(&ggtt->vm,
- i915_gem_object_get_dma_address(obj, page),
- offset, I915_CACHE_NONE, 0);
- } else {
- offset += page << PAGE_SHIFT;
- }
-
- vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
- offset);
- cache->page = page;
- cache->vaddr = (unsigned long)vaddr;
-
- return vaddr;
-}
-
-static void *reloc_vaddr(struct drm_i915_gem_object *obj,
- struct reloc_cache *cache,
- unsigned long page)
-{
- void *vaddr;
-
- if (cache->page == page) {
- vaddr = unmask_page(cache->vaddr);
- } else {
- vaddr = NULL;
- if ((cache->vaddr & KMAP) == 0)
- vaddr = reloc_iomap(obj, cache, page);
- if (!vaddr)
- vaddr = reloc_kmap(obj, cache, page);
- }
-
- return vaddr;
-}
-
-static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
-{
- if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
- if (flushes & CLFLUSH_BEFORE) {
- clflushopt(addr);
- mb();
- }
-
- *addr = value;
-
- /*
- * Writes to the same cacheline are serialised by the CPU
- * (including clflush). On the write path, we only require
- * that it hits memory in an orderly fashion and place
- * mb barriers at the start and end of the relocation phase
- * to ensure ordering of clflush wrt to the system.
- */
- if (flushes & CLFLUSH_AFTER)
- clflushopt(addr);
- } else
- *addr = value;
-}
-
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -1429,17 +1207,6 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
return cmd;
}
-static inline bool use_reloc_gpu(struct i915_vma *vma)
-{
- if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
- return true;
-
- if (DBG_FORCE_RELOC)
- return false;
-
- return !dma_resv_test_signaled_rcu(vma->resv, true);
-}
-
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
{
struct page *page;
@@ -1454,10 +1221,10 @@ static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
return addr + offset_in_page(offset);
}
-static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
- struct i915_vma *vma,
- u64 offset,
- u64 target_addr)
+static int __reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
{
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
@@ -1473,7 +1240,7 @@ static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
- return false;
+ return PTR_ERR(batch);
addr = gen8_canonical_addr(vma->node.start + offset);
if (gen >= 8) {
@@ -1522,55 +1289,21 @@ static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
*batch++ = target_addr;
}
- return true;
-}
-
-static bool reloc_entry_gpu(struct i915_execbuffer *eb,
- struct i915_vma *vma,
- u64 offset,
- u64 target_addr)
-{
- if (eb->reloc_cache.vaddr)
- return false;
-
- if (!use_reloc_gpu(vma))
- return false;
-
- return __reloc_entry_gpu(eb, vma, offset, target_addr);
+ return 0;
}
static u64
-relocate_entry(struct i915_vma *vma,
+relocate_entry(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
const struct drm_i915_gem_relocation_entry *reloc,
- struct i915_execbuffer *eb,
const struct i915_vma *target)
{
u64 target_addr = relocation_target(reloc, target);
- u64 offset = reloc->offset;
-
- if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
- bool wide = eb->reloc_cache.use_64bit_reloc;
- void *vaddr;
-
-repeat:
- vaddr = reloc_vaddr(vma->obj,
- &eb->reloc_cache,
- offset >> PAGE_SHIFT);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
-
- GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
- clflush_write32(vaddr + offset_in_page(offset),
- lower_32_bits(target_addr),
- eb->reloc_cache.vaddr);
-
- if (wide) {
- offset += sizeof(u32);
- target_addr >>= 32;
- wide = false;
- goto repeat;
- }
- }
+ int err;
+
+ err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr);
+ if (err)
+ return err;
return target->node.start | UPDATE;
}
@@ -1626,8 +1359,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
err = i915_vma_bind(target->vma,
target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
- if (WARN_ONCE(err,
- "Unexpected failure to bind target VMA!"))
+ if (err)
return err;
}
}
@@ -1636,8 +1368,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* If the relocation already has the right value in it, no
* more work needs to be done.
*/
- if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+ if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@@ -1669,7 +1400,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(ev->vma, reloc, eb, target->vma);
+ return relocate_entry(eb, ev->vma, reloc, target->vma);
}
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
@@ -1707,10 +1438,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* this is bad and so lockdep complains vehemently.
*/
copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
- if (unlikely(copied)) {
- remain = -EFAULT;
- goto out;
- }
+ if (unlikely(copied))
+ return -EFAULT;
remain -= count;
do {
@@ -1718,8 +1447,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
- remain = (int)offset;
- goto out;
+ return (int)offset;
} else {
/*
* Note that reporting an error now
@@ -1749,9 +1477,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
-out:
- reloc_cache_reset(&eb->reloc_cache);
- return remain;
+
+ return 0;
}
static int eb_relocate(struct i915_execbuffer *eb)
@@ -1911,8 +1638,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
- if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
- drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
+ if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
+ drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -2659,7 +2386,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.i915 = i915;
eb.file = file;
eb.args = args;
- if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
+ if (!(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index cbbff81aa0af..ad22f42541bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -137,6 +137,7 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
+ .name = "i915_gem_object_internal",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_internal,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 70543c83df06..932ee21e6609 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+ .name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = i915_gem_object_get_pages_buddy,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index fe45bd4d63a5..fe27c5b344e3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -216,12 +216,12 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case -ENXIO: /* unable to access backing store (on device) */
return VM_FAULT_SIGBUS;
- case -ENOSPC: /* shmemfs allocation failure */
case -ENOMEM: /* our allocation failure */
return VM_FAULT_OOM;
case 0:
case -EAGAIN:
+ case -ENOSPC: /* transient failure to evict? */
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 99356c00c19e..b6ec5b50d93b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -53,7 +53,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key)
{
- __mutex_init(&obj->mm.lock, "obj->mm.lock", key);
+ __mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key);
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
@@ -72,6 +72,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
+
+ if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
+ i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
+ &obj->mm.lock);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index f457d7130491..bfdb32d46877 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -126,6 +126,17 @@ void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
intel_engine_pm_put(ce->engine);
}
+static int
+move_obj_to_gpu(struct drm_i915_gem_object *obj,
+ struct i915_request *rq,
+ bool write)
+{
+ if (obj->cache_dirty & ~obj->cache_coherent)
+ i915_gem_clflush_object(obj, 0);
+
+ return i915_request_await_object(rq, obj, write);
+}
+
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value)
@@ -143,12 +154,6 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
if (unlikely(err))
return err;
- if (obj->cache_dirty & ~obj->cache_coherent) {
- i915_gem_object_lock(obj);
- i915_gem_clflush_object(obj, 0);
- i915_gem_object_unlock(obj);
- }
-
batch = intel_emit_vma_fill_blt(ce, vma, value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -165,27 +170,22 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
if (unlikely(err))
goto out_request;
- err = i915_request_await_object(rq, obj, true);
- if (unlikely(err))
- goto out_request;
-
- if (ce->engine->emit_init_breadcrumb) {
- err = ce->engine->emit_init_breadcrumb(rq);
- if (unlikely(err))
- goto out_request;
- }
-
i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, true);
+ err = move_obj_to_gpu(vma->obj, rq, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (unlikely(err))
goto out_request;
- err = ce->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
+ if (ce->engine->emit_init_breadcrumb)
+ err = ce->engine->emit_init_breadcrumb(rq);
+
+ if (likely(!err))
+ err = ce->engine->emit_bb_start(rq,
+ batch->node.start,
+ batch->node.size,
+ 0);
out_request:
if (unlikely(err))
i915_request_set_error_once(rq, err);
@@ -317,16 +317,6 @@ out_pm:
return ERR_PTR(err);
}
-static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
-{
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (obj->cache_dirty & ~obj->cache_coherent)
- i915_gem_clflush_object(obj, 0);
-
- return i915_request_await_object(rq, obj, write);
-}
-
int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
struct drm_i915_gem_object *dst,
struct intel_context *ce)
@@ -375,7 +365,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
goto out_request;
for (i = 0; i < ARRAY_SIZE(vma); i++) {
- err = move_to_gpu(vma[i], rq, i);
+ err = move_obj_to_gpu(vma[i]->obj, rq, i);
if (unlikely(err))
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 54ee658bb168..b1f82a11aef2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -61,6 +61,8 @@ struct drm_i915_gem_object_ops {
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
void (*release)(struct drm_i915_gem_object *obj);
+
+ const char *name; /* friendly name for debug, e.g. lockdep classes */
};
enum i915_mmap_type {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 7fe9831aa9ba..28147aab47b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -27,7 +27,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
void *dst;
int i;
- if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
/*
@@ -140,6 +140,7 @@ static void phys_release(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+ .name = "i915_gem_object_phys",
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 7aff3514d97a..38113d3c0138 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -147,8 +147,7 @@ rebuild_st:
last_pfn = page_to_pfn(page);
/* Check that the i965g/gm workaround works. */
- drm_WARN_ON(&i915->drm,
- (gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+ GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
}
if (sg) { /* loop terminated early; short sg table */
sg_page_sizes |= sg->length;
@@ -430,6 +429,7 @@ static void shmem_release(struct drm_i915_gem_object *obj)
}
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
+ .name = "i915_gem_object_shmem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index dc250278bd2c..e0f21f12d3ce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -566,6 +566,7 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
+ .name = "i915_gem_object_stolen",
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
.release = i915_gem_object_release_stolen,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index c31a6744daee..9c53eb883400 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -235,7 +235,7 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
- if (WARN_ON(obj->userptr.mm == NULL))
+ if (GEM_WARN_ON(!obj->userptr.mm))
return -EINVAL;
mn = i915_mmu_notifier_find(obj->userptr.mm);
@@ -712,6 +712,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
+ .name = "i915_gem_object_userptr",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_MMAP |
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 2b46c6530da9..a768ec61e966 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -88,6 +88,7 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops huge_ops = {
+ .name = "huge-gem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index c9988b6d5c88..8291ede6902c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -139,6 +139,7 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
+ .name = "huge-gem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = get_huge_pages,
@@ -283,12 +284,14 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops fake_ops = {
+ .name = "fake-gem",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = fake_get_huge_pages,
.put_pages = fake_put_huge_pages,
};
static const struct drm_i915_gem_object_ops fake_ops_single = {
+ .name = "fake-gem",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = fake_get_huge_pages_single,
.put_pages = fake_put_huge_pages,
@@ -1409,147 +1412,6 @@ out:
return err;
}
-static int igt_ppgtt_pin_update(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *dev_priv = ctx->i915;
- unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
- struct drm_i915_gem_object *obj;
- struct i915_gem_engines_iter it;
- struct i915_address_space *vm;
- struct intel_context *ce;
- struct i915_vma *vma;
- unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
- unsigned int n;
- int first, last;
- int err = 0;
-
- /*
- * Make sure there's no funny business when doing a PIN_UPDATE -- in the
- * past we had a subtle issue with being able to incorrectly do multiple
- * alloc va ranges on the same object when doing a PIN_UPDATE, which
- * resulted in some pretty nasty bugs, though only when using
- * huge-gtt-pages.
- */
-
- vm = i915_gem_context_get_vm_rcu(ctx);
- if (!i915_vm_is_4lvl(vm)) {
- pr_info("48b PPGTT not supported, skipping\n");
- goto out_vm;
- }
-
- first = ilog2(I915_GTT_PAGE_SIZE_64K);
- last = ilog2(I915_GTT_PAGE_SIZE_2M);
-
- for_each_set_bit_from(first, &supported, last + 1) {
- unsigned int page_size = BIT(first);
-
- obj = i915_gem_object_create_internal(dev_priv, page_size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_vm;
- }
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
-
- err = i915_vma_pin(vma, SZ_2M, 0, flags);
- if (err)
- goto out_put;
-
- if (vma->page_sizes.sg < page_size) {
- pr_info("Unable to allocate page-size %x, finishing test early\n",
- page_size);
- goto out_unpin;
- }
-
- err = igt_check_page_sizes(vma);
- if (err)
- goto out_unpin;
-
- if (vma->page_sizes.gtt != page_size) {
- dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
-
- /*
- * The only valid reason for this to ever fail would be
- * if the dma-mapper screwed us over when we did the
- * dma_map_sg(), since it has the final say over the dma
- * address.
- */
- if (IS_ALIGNED(addr, page_size)) {
- pr_err("page_sizes.gtt=%u, expected=%u\n",
- vma->page_sizes.gtt, page_size);
- err = -EINVAL;
- } else {
- pr_info("dma address misaligned, finishing test early\n");
- }
-
- goto out_unpin;
- }
-
- err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
- if (err)
- goto out_unpin;
-
- i915_vma_unpin(vma);
- i915_gem_object_put(obj);
- }
-
- obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_vm;
- }
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
-
- err = i915_vma_pin(vma, 0, 0, flags);
- if (err)
- goto out_put;
-
- /*
- * Make sure we don't end up with something like where the pde is still
- * pointing to the 2M page, and the pt we just filled-in is dangling --
- * we can check this by writing to the first page where it would then
- * land in the now stale 2M page.
- */
-
- n = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- if (!intel_engine_can_store_dword(ce->engine))
- continue;
-
- err = gpu_write(ce, vma, n++, 0xdeadbeaf);
- if (err)
- break;
- }
- i915_gem_context_unlock_engines(ctx);
- if (err)
- goto out_unpin;
-
- while (n--) {
- err = cpu_check(obj, n, 0xdeadbeaf);
- if (err)
- goto out_unpin;
- }
-
-out_unpin:
- i915_vma_unpin(vma);
-out_put:
- i915_gem_object_put(obj);
-out_vm:
- i915_vm_put(vm);
-
- return err;
-}
-
static int igt_tmpfs_fallback(void *arg)
{
struct i915_gem_context *ctx = arg;
@@ -1760,7 +1622,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_shrink_thp),
- SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 8fe3ad2ee34e..299c29e9ad86 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -702,8 +702,5 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- if (!HAS_ENGINE(i915, BCS0))
- return 0;
-
return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index a49016f8ee0d..57c14d3340cd 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -37,20 +37,14 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
return err;
/* 8-Byte aligned */
- if (!__reloc_entry_gpu(eb, vma,
- offsets[0] * sizeof(u32),
- 0)) {
- err = -EIO;
+ err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
+ if (err)
goto unpin_vma;
- }
/* !8-Byte aligned */
- if (!__reloc_entry_gpu(eb, vma,
- offsets[1] * sizeof(u32),
- 1)) {
- err = -EIO;
+ err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
+ if (err)
goto unpin_vma;
- }
/* Skip to the end of the cmd page */
i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
@@ -60,12 +54,9 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
eb->reloc_cache.rq_size += i;
/* Force batch chaining */
- if (!__reloc_entry_gpu(eb, vma,
- offsets[2] * sizeof(u32),
- 2)) {
- err = -EIO;
+ err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
+ if (err)
goto unpin_vma;
- }
GEM_BUG_ON(!eb->reloc_cache.rq);
rq = i915_request_get(eb->reloc_cache.rq);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 31549ad83fa6..23b6e11bbc3e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -193,7 +193,7 @@ err_src:
}
struct igt_thread_arg {
- struct drm_i915_private *i915;
+ struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
struct file *file;
struct rnd_state prng;
@@ -203,7 +203,7 @@ struct igt_thread_arg {
static int igt_fill_blt_thread(void *arg)
{
struct igt_thread_arg *thread = arg;
- struct drm_i915_private *i915 = thread->i915;
+ struct intel_engine_cs *engine = thread->engine;
struct rnd_state *prng = &thread->prng;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
@@ -215,7 +215,7 @@ static int igt_fill_blt_thread(void *arg)
ctx = thread->ctx;
if (!ctx) {
- ctx = live_context(i915, thread->file);
+ ctx = live_context_for_engine(engine, thread->file);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -223,7 +223,7 @@ static int igt_fill_blt_thread(void *arg)
ctx->sched.priority = I915_USER_PRIORITY(prio);
}
- ce = i915_gem_context_get_engine(ctx, BCS0);
+ ce = i915_gem_context_get_engine(ctx, 0);
GEM_BUG_ON(IS_ERR(ce));
/*
@@ -256,7 +256,7 @@ static int igt_fill_blt_thread(void *arg)
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
- obj = huge_gem_object(i915, phys_sz, sz);
+ obj = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -321,7 +321,7 @@ err_flush:
static int igt_copy_blt_thread(void *arg)
{
struct igt_thread_arg *thread = arg;
- struct drm_i915_private *i915 = thread->i915;
+ struct intel_engine_cs *engine = thread->engine;
struct rnd_state *prng = &thread->prng;
struct drm_i915_gem_object *src, *dst;
struct i915_gem_context *ctx;
@@ -333,7 +333,7 @@ static int igt_copy_blt_thread(void *arg)
ctx = thread->ctx;
if (!ctx) {
- ctx = live_context(i915, thread->file);
+ ctx = live_context_for_engine(engine, thread->file);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -341,7 +341,7 @@ static int igt_copy_blt_thread(void *arg)
ctx->sched.priority = I915_USER_PRIORITY(prio);
}
- ce = i915_gem_context_get_engine(ctx, BCS0);
+ ce = i915_gem_context_get_engine(ctx, 0);
GEM_BUG_ON(IS_ERR(ce));
/*
@@ -374,7 +374,7 @@ static int igt_copy_blt_thread(void *arg)
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
- src = huge_gem_object(i915, phys_sz, sz);
+ src = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(src)) {
err = PTR_ERR(src);
goto err_flush;
@@ -394,7 +394,7 @@ static int igt_copy_blt_thread(void *arg)
if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
src->cache_dirty = true;
- dst = huge_gem_object(i915, phys_sz, sz);
+ dst = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto err_put_src;
@@ -456,7 +456,7 @@ err_flush:
return err;
}
-static int igt_threaded_blt(struct drm_i915_private *i915,
+static int igt_threaded_blt(struct intel_engine_cs *engine,
int (*blt_fn)(void *arg),
unsigned int flags)
#define SINGLE_CTX BIT(0)
@@ -477,14 +477,14 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
if (!thread)
goto out_tsk;
- thread[0].file = mock_file(i915);
+ thread[0].file = mock_file(engine->i915);
if (IS_ERR(thread[0].file)) {
err = PTR_ERR(thread[0].file);
goto out_thread;
}
if (flags & SINGLE_CTX) {
- thread[0].ctx = live_context(i915, thread[0].file);
+ thread[0].ctx = live_context_for_engine(engine, thread[0].file);
if (IS_ERR(thread[0].ctx)) {
err = PTR_ERR(thread[0].ctx);
goto out_file;
@@ -492,7 +492,7 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
}
for (i = 0; i < n_cpus; ++i) {
- thread[i].i915 = i915;
+ thread[i].engine = engine;
thread[i].file = thread[0].file;
thread[i].ctx = thread[0].ctx;
thread[i].n_cpus = n_cpus;
@@ -532,24 +532,40 @@ out_tsk:
return err;
}
+static int test_copy_engines(struct drm_i915_private *i915,
+ int (*fn)(void *arg),
+ unsigned int flags)
+{
+ struct intel_engine_cs *engine;
+ int ret;
+
+ for_each_uabi_class_engine(engine, I915_ENGINE_CLASS_COPY, i915) {
+ ret = igt_threaded_blt(engine, fn, flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int igt_fill_blt(void *arg)
{
- return igt_threaded_blt(arg, igt_fill_blt_thread, 0);
+ return test_copy_engines(arg, igt_fill_blt_thread, 0);
}
static int igt_fill_blt_ctx0(void *arg)
{
- return igt_threaded_blt(arg, igt_fill_blt_thread, SINGLE_CTX);
+ return test_copy_engines(arg, igt_fill_blt_thread, SINGLE_CTX);
}
static int igt_copy_blt(void *arg)
{
- return igt_threaded_blt(arg, igt_copy_blt_thread, 0);
+ return test_copy_engines(arg, igt_copy_blt_thread, 0);
}
static int igt_copy_blt_ctx0(void *arg)
{
- return igt_threaded_blt(arg, igt_copy_blt_thread, SINGLE_CTX);
+ return test_copy_engines(arg, igt_copy_blt_thread, SINGLE_CTX);
}
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
@@ -564,9 +580,6 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- if (!HAS_ENGINE(i915, BCS0))
- return 0;
-
return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index e7e3c620f542..aa0d06cf1903 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -100,6 +100,43 @@ err_ctx:
}
struct i915_gem_context *
+live_context_for_engine(struct intel_engine_cs *engine, struct file *file)
+{
+ struct i915_gem_engines *engines;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+
+ engines = alloc_engines(1);
+ if (!engines)
+ return ERR_PTR(-ENOMEM);
+
+ ctx = live_context(engine->i915, file);
+ if (IS_ERR(ctx)) {
+ __free_engines(engines, 0);
+ return ctx;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ __free_engines(engines, 0);
+ return ERR_CAST(ce);
+ }
+
+ intel_context_set_gem(ce, ctx);
+ engines->engines[0] = ce;
+ engines->num_engines = 1;
+
+ mutex_lock(&ctx->engines_mutex);
+ i915_gem_context_set_user_engines(ctx);
+ engines = rcu_replace_pointer(ctx->engines, engines, 1);
+ mutex_unlock(&ctx->engines_mutex);
+
+ engines_idle_release(ctx, engines);
+
+ return ctx;
+}
+
+struct i915_gem_context *
kernel_context(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
index fb83d2f09212..2a6121d33352 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -9,6 +9,7 @@
struct file;
struct drm_i915_private;
+struct intel_engine_cs;
void mock_init_contexts(struct drm_i915_private *i915);
@@ -21,6 +22,9 @@ void mock_context_close(struct i915_gem_context *ctx);
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct file *file);
+struct i915_gem_context *
+live_context_for_engine(struct intel_engine_cs *engine, struct file *file);
+
struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
void kernel_context_close(struct i915_gem_context *ctx);
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
new file mode 100644
index 000000000000..b491a64919c8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gen2_engine_cs.h"
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_ring.h"
+
+int gen2_emit_flush(struct i915_request *rq, u32 mode)
+{
+ unsigned int num_store_dw = 12;
+ u32 cmd, *cs;
+
+ cmd = MI_FLUSH;
+ if (mode & EMIT_INVALIDATE)
+ cmd |= MI_READ_FLUSH;
+
+ cs = intel_ring_begin(rq, 2 + 4 * num_store_dw);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = cmd;
+ while (num_store_dw--) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ }
+ *cs++ = cmd;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ u32 cmd, *cs;
+ int i;
+
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH;
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_EXE_FLUSH;
+ if (IS_G4X(rq->engine->i915) || IS_GEN(rq->engine->i915, 5))
+ cmd |= MI_INVALIDATE_ISP;
+ }
+
+ i = 2;
+ if (mode & EMIT_INVALIDATE)
+ i += 20;
+
+ cs = intel_ring_begin(rq, i);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = cmd;
+
+ /*
+ * A random delay to let the CS invalidate take effect? Without this
+ * delay, the GPU relocation path fails as the CS does not see
+ * the updated contents. Just as important, if we apply the flushes
+ * to the EMIT_FLUSH branch (i.e. immediately after the relocation
+ * write and before the invalidate on the next batch), the relocations
+ * still fail. This implies that is a delay following invalidation
+ * that is required to reset the caches as opposed to a delay to
+ * ensure the memory is written.
+ */
+ if (mode & EMIT_INVALIDATE) {
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ for (i = 0; i < 12; i++)
+ *cs++ = MI_FLUSH;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ }
+
+ *cs++ = cmd;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
+ int flush, int post)
+{
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH;
+
+ while (flush--) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
+ *cs++ = rq->fence.seqno;
+ }
+
+ while (post--) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
+ }
+
+ *cs++ = MI_USER_INTERRUPT;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return __gen2_emit_breadcrumb(rq, cs, 16, 8);
+}
+
+u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return __gen2_emit_breadcrumb(rq, cs, 8, 8);
+}
+
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT SZ_256K
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES * SZ_4K, I830_BATCH_LIMIT)
+int i830_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 *cs, cs_offset =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
+
+ GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Evict the invalid PTE TLBs */
+ *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+ *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+ *cs++ = cs_offset;
+ *cs++ = 0xdeadbeef;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ cs = intel_ring_begin(rq, 6 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Blit the batch (which has now all relocs applied) to the
+ * stable batch scratch bo area (so that the CS never
+ * stumbles over its tlb invalidation bug) ...
+ */
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+ *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+ *cs++ = cs_offset;
+ *cs++ = 4096;
+ *cs++ = offset;
+
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* ... and execute it. */
+ offset = cs_offset;
+ }
+
+ if (!(dispatch_flags & I915_DISPATCH_SECURE))
+ offset |= MI_BATCH_NON_SECURE;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen3_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 *cs;
+
+ if (!(dispatch_flags & I915_DISPATCH_SECURE))
+ offset |= MI_BATCH_NON_SECURE;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen4_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags)
+{
+ u32 security;
+ u32 *cs;
+
+ security = MI_BATCH_NON_SECURE_I965;
+ if (dispatch_flags & I915_DISPATCH_SECURE)
+ security = 0;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | security;
+ *cs++ = offset;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+void gen2_irq_enable(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
+ ENGINE_POSTING_READ16(engine, RING_IMR);
+}
+
+void gen2_irq_disable(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
+}
+
+void gen3_irq_enable(struct intel_engine_cs *engine)
+{
+ engine->i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
+}
+
+void gen3_irq_disable(struct intel_engine_cs *engine)
+{
+ engine->i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+}
+
+void gen5_irq_enable(struct intel_engine_cs *engine)
+{
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void gen5_irq_disable(struct intel_engine_cs *engine)
+{
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
new file mode 100644
index 000000000000..a5cd64a65c9e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN2_ENGINE_CS_H__
+#define __GEN2_ENGINE_CS_H__
+
+#include <linux/types.h>
+
+struct i915_request;
+struct intel_engine_cs;
+
+int gen2_emit_flush(struct i915_request *rq, u32 mode);
+int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode);
+
+u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs);
+u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs);
+
+int i830_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+int gen3_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+int gen4_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags);
+
+void gen2_irq_enable(struct intel_engine_cs *engine);
+void gen2_irq_disable(struct intel_engine_cs *engine);
+void gen3_irq_enable(struct intel_engine_cs *engine);
+void gen3_irq_disable(struct intel_engine_cs *engine);
+void gen5_irq_enable(struct intel_engine_cs *engine);
+void gen5_irq_disable(struct intel_engine_cs *engine);
+
+#endif /* __GEN2_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
new file mode 100644
index 000000000000..ce38d1bcaba3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gen6_engine_cs.h"
+#include "intel_engine.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_ring.h"
+
+#define HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
+
+/*
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+static int
+gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
+{
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0; /* low dword */
+ *cs++ = 0; /* high dword */
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+ u32 *cs, flags = 0;
+ int ret;
+
+ /* Force SNB workarounds for PIPE_CONTROL flushes */
+ ret = gen6_emit_post_sync_nonzero_flush(rq);
+ if (ret)
+ return ret;
+
+ /*
+ * Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact. And when rearranging requests, the order of flushes is
+ * unknown.
+ */
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+
+ /* Finally we can flush and with it emit the breadcrumb */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+static int mi_flush_dw(struct i915_request *rq, u32 flags)
+{
+ u32 cmd, *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ cmd |= flags;
+
+ *cs++ = cmd;
+ *cs++ = HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
+{
+ return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
+}
+
+int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
+}
+
+int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode)
+{
+ return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
+}
+
+int gen6_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 security;
+ u32 *cs;
+
+ security = MI_BATCH_NON_SECURE_I965;
+ if (dispatch_flags & I915_DISPATCH_SECURE)
+ security = 0;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = __gen6_emit_bb_start(cs, offset, security);
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int
+hsw_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 security;
+ u32 *cs;
+
+ security = MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW;
+ if (dispatch_flags & I915_DISPATCH_SECURE)
+ security = 0;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = __gen6_emit_bb_start(cs, offset, security);
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen7_stall_cs(struct i915_request *rq)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+ u32 *cs, flags = 0;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /*
+ * CS_STALL suggests at least a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ /*
+ * Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
+
+ /*
+ * Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set.
+ */
+ gen7_stall_cs(rq);
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+#define GEN7_XCS_WA 32
+u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ int i;
+
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->fence.seqno;
+
+ for (i = 0; i < GEN7_XCS_WA; i++) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
+ }
+
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+#undef GEN7_XCS_WA
+
+void gen6_irq_enable(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ ENGINE_POSTING_READ(engine, RING_IMR);
+
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void gen6_irq_disable(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void hsw_irq_enable_vecs(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ ENGINE_POSTING_READ(engine, RING_IMR);
+
+ gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void hsw_irq_disable_vecs(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~0);
+ gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.h b/drivers/gpu/drm/i915/gt/gen6_engine_cs.h
new file mode 100644
index 000000000000..76c6bc9f3bde
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN6_ENGINE_CS_H__
+#define __GEN6_ENGINE_CS_H__
+
+#include <linux/types.h>
+
+#include "intel_gpu_commands.h"
+
+struct i915_request;
+struct intel_engine_cs;
+
+int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode);
+int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode);
+u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode);
+u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+int gen6_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+int hsw_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+
+void gen6_irq_enable(struct intel_engine_cs *engine);
+void gen6_irq_disable(struct intel_engine_cs *engine);
+
+void hsw_irq_enable_vecs(struct intel_engine_cs *engine);
+void hsw_irq_disable_vecs(struct intel_engine_cs *engine);
+
+#endif /* __GEN6_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index 487299cb91f2..27ae48049239 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -30,7 +30,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+ *cs++ = intel_sseu_make_rpcs(rq->engine->i915, &sseu);
intel_ring_advance(rq, cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 9bf6d4989968..a9249a23903a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -187,7 +187,6 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
-#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
#define I915_HWS_CSB_BUF0_INDEX 0x10
#define I915_HWS_CSB_WRITE_INDEX 0x1f
@@ -335,7 +334,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
+ ktime_t *now);
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 8691eb61e185..7bf2f76212f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -414,12 +414,12 @@ void intel_engines_release(struct intel_gt *gt)
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
- intel_wakeref_wait_for_idle(&engine->wakeref);
- GEM_BUG_ON(intel_engine_pm_is_awake(engine));
-
if (!engine->release)
continue;
+ intel_wakeref_wait_for_idle(&engine->wakeref);
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+
engine->release(engine);
engine->release = NULL;
@@ -661,7 +661,6 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
if (!frame)
return -ENOMEM;
- frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
@@ -1095,19 +1094,21 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
- if (__tasklet_is_scheduled(t)) {
- local_bh_disable();
- if (tasklet_trylock(t)) {
- /* Must wait for any GPU reset in progress. */
- if (__tasklet_is_enabled(t))
- t->func(t->data);
- tasklet_unlock(t);
- }
- local_bh_enable();
- }
+ if (!t->func)
+ return;
- /* Otherwise flush the tasklet if it was running on another cpu */
- tasklet_unlock_wait(t);
+ /* Synchronise and wait for the tasklet on another CPU */
+ tasklet_kill(t);
+
+ /* Having cancelled the tasklet, ensure that is run */
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->func(t->data);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
}
/**
@@ -1194,8 +1195,7 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
-static int print_sched_attr(struct drm_i915_private *i915,
- const struct i915_sched_attr *attr,
+static int print_sched_attr(const struct i915_sched_attr *attr,
char *buf, int x, int len)
{
if (attr->priority == I915_PRIORITY_INVALID)
@@ -1215,7 +1215,7 @@ static void print_request(struct drm_printer *m,
char buf[80] = "";
int x = 0;
- x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
+ x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
prefix,
@@ -1423,9 +1423,11 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = scnprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ccid:%08x, ",
+ "\t\tActive[%d]: ccid:%08x%s%s, ",
(int)(port - execlists->active),
- rq->context->lrc.ccid);
+ rq->context->lrc.ccid,
+ intel_context_is_closed(rq->context) ? "!" : "",
+ intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
@@ -1435,9 +1437,11 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = scnprintf(hdr, sizeof(hdr),
- "\t\tPending[%d]: ccid:%08x, ",
+ "\t\tPending[%d]: ccid:%08x%s%s, ",
(int)(port - execlists->pending),
- rq->context->lrc.ccid);
+ rq->context->lrc.ccid,
+ intel_context_is_closed(rq->context) ? "!" : "",
+ intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
@@ -1506,6 +1510,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_request *rq;
intel_wakeref_t wakeref;
unsigned long flags;
+ ktime_t dummy;
if (header) {
va_list ap;
@@ -1523,6 +1528,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
yesno(!llist_empty(&engine->barrier_tasks)));
drm_printf(m, "\tLatency: %luus\n",
ewma__engine_latency_read(&engine->latency));
+ if (intel_engine_supports_stats(engine))
+ drm_printf(m, "\tRuntime: %llums\n",
+ ktime_to_ms(intel_engine_get_busy_time(engine,
+ &dummy)));
+ drm_printf(m, "\tForcewake: %x domains, %d active\n",
+ engine->fw_domain, atomic_read(&engine->fw_active));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
@@ -1589,7 +1600,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
-static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
+static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
+ ktime_t *now)
{
ktime_t total = engine->stats.total;
@@ -1597,9 +1609,9 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
* If the engine is executing something at the moment
* add it to the total.
*/
+ *now = ktime_get();
if (atomic_read(&engine->stats.active))
- total = ktime_add(total,
- ktime_sub(ktime_get(), engine->stats.start));
+ total = ktime_add(total, ktime_sub(*now, engine->stats.start));
return total;
}
@@ -1607,17 +1619,18 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
/**
* intel_engine_get_busy_time() - Return current accumulated engine busyness
* @engine: engine to report on
+ * @now: monotonic timestamp of sampling
*
* Returns accumulated time @engine was busy since engine stats were enabled.
*/
-ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
{
unsigned int seq;
ktime_t total;
do {
seq = read_seqbegin(&engine->stats.lock);
- total = __intel_engine_get_busy_time(engine);
+ total = __intel_engine_get_busy_time(engine, now);
} while (read_seqretry(&engine->stats.lock, seq));
return total;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 5136c8bf112d..8db7e93abde5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -4,6 +4,7 @@
* Copyright © 2019 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_request.h"
#include "intel_context.h"
@@ -48,8 +49,10 @@ static void show_heartbeat(const struct i915_request *rq,
struct drm_printer p = drm_debug_printer("heartbeat");
intel_engine_dump(engine, &p,
- "%s heartbeat {prio:%d} not ticking\n",
+ "%s heartbeat {seqno:%llx:%lld, prio:%d} not ticking\n",
engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
rq->sched.attr.priority);
}
@@ -63,6 +66,9 @@ static void heartbeat(struct work_struct *wrk)
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
+ /* Just in case everything has gone horribly wrong, give it a kick */
+ intel_engine_flush_submission(engine);
+
rq = engine->heartbeat.systole;
if (rq && i915_request_completed(rq)) {
i915_request_put(rq);
@@ -76,8 +82,19 @@ static void heartbeat(struct work_struct *wrk)
goto out;
if (engine->heartbeat.systole) {
- if (engine->schedule &&
- rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
+ if (!i915_sw_fence_signaled(&rq->submit)) {
+ /*
+ * Not yet submitted, system is stalled.
+ *
+ * This more often happens for ring submission,
+ * where all contexts are funnelled into a common
+ * ringbuffer. If one context is blocked on an
+ * external fence, not only is it not submitted,
+ * but all other contexts, including the kernel
+ * context are stuck waiting for the signal.
+ */
+ } else if (engine->schedule &&
+ rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
/*
* Gradually raise the priority of the heartbeat to
* give high priority work [which presumably desires
@@ -117,7 +134,7 @@ static void heartbeat(struct work_struct *wrk)
goto unlock;
idle_pulse(engine, rq);
- if (i915_modparams.enable_hangcheck)
+ if (engine->i915->params.enable_hangcheck)
engine->heartbeat.systole = i915_request_get(rq);
__i915_request_commit(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 2b6cdf47d428..073c3769e8cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -24,6 +24,7 @@
#include "i915_selftest.h"
#include "intel_sseu.h"
#include "intel_timeline_types.h"
+#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
@@ -313,6 +314,16 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
+ /*
+ * Some w/a require forcewake to be held (which prevents RC6) while
+ * a particular engine is active. If so, we set fw_domain to which
+ * domains need to be held for the duration of request activity,
+ * and 0 if none. We try to limit the duration of the hold as much
+ * as possible.
+ */
+ enum forcewake_domains fw_domain;
+ atomic_t fw_active;
+
unsigned long context_tag;
struct rb_node uabi_node;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 66165b10256e..323c328d444a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -108,13 +108,32 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
- struct i915_vma *vma;
+ struct i915_vma *vma, *vn;
+ int open;
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
- list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma);
+ if (i915_vma_is_pinned(vma))
+ continue;
+
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ __i915_vma_evict(vma);
+ drm_mm_remove_node(&vma->node);
+ }
+ }
+
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
ggtt->invalidate(ggtt);
+ atomic_set(&ggtt->vm.open, open);
+
+ mutex_unlock(&ggtt->vm.mutex);
intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
@@ -424,22 +443,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
+ if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
+ return 0;
+
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
-
return 0;
}
@@ -1166,6 +1180,11 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
+static unsigned int clear_bind(struct i915_vma *vma)
+{
+ return atomic_fetch_and(~I915_VMA_BIND_MASK, &vma->flags);
+}
+
void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
struct i915_vma *vma;
@@ -1183,14 +1202,11 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
+ unsigned int was_bound = clear_bind(vma);
- if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
- continue;
-
- clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
- PIN_GLOBAL, NULL));
+ was_bound, NULL));
if (obj) { /* only used during resume => exclusive access */
flush |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f069551e412f..ebc29b6ee86c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -616,6 +616,11 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
void intel_gt_driver_release(struct intel_gt *gt)
{
struct i915_address_space *vm;
+ intel_wakeref_t wakeref;
+
+ /* Scrub all HW state upon release */
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ __intel_gt_reset(gt, ALL_ENGINES);
vm = fetch_and_zero(&gt->vm);
if (vm) /* FIXME being called twice on error paths :( */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 1495054a4305..418ae184cecf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -212,8 +212,9 @@ void intel_gt_flush_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
- if (cancel_delayed_work_sync(&pool->work))
+ do {
pool_free_imm(pool);
+ } while (cancel_delayed_work_sync(&pool->work));
}
void intel_gt_fini_buffer_pool(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 6bdb434a442d..f1d5333f9456 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -214,8 +214,8 @@ int intel_gt_resume(struct intel_gt *gt)
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
- drm_err(&gt->i915->drm,
- "Failed to initialize GPU, declaring it wedged!\n");
+ i915_probe_error(gt->i915,
+ "Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7c3d8ef4a47c..e866b8d721ed 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -446,6 +446,9 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
* we have to flip the index value to become priority.
*/
p = to_priolist(rb);
+ if (!I915_USER_PRIORITY_SHIFT)
+ return p->priority;
+
return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
}
@@ -1377,6 +1380,8 @@ __execlists_schedule_in(struct i915_request *rq)
ce->lrc.ccid |= engine->execlists.ccid;
__intel_gt_pm_get(engine->gt);
+ if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active))
+ intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@@ -1409,8 +1414,8 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
struct i915_request *next = READ_ONCE(ve->request);
- if (next && next->execution_mask & ~rq->execution_mask)
- tasklet_schedule(&ve->base.execlists.tasklet);
+ if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
}
static inline void
@@ -1445,6 +1450,8 @@ __execlists_schedule_out(struct i915_request *rq,
intel_context_update_runtime(ce);
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ if (engine->fw_domain && !atomic_dec_return(&engine->fw_active))
+ intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
intel_gt_pm_put_async(engine->gt);
/*
@@ -1636,9 +1643,9 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
ccid = ce->lrc.ccid;
/*
- * Sentinels are supposed to be lonely so they flush the
- * current exection off the HW. Check that they are the
- * only request in the pending submission.
+ * Sentinels are supposed to be the last request so they flush
+ * the current execution off the HW. Check that they are the only
+ * request in the pending submission.
*/
if (sentinel) {
GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
@@ -1647,15 +1654,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
port - execlists->pending);
return false;
}
-
sentinel = i915_request_has_sentinel(rq);
- if (sentinel && port != execlists->pending) {
- GEM_TRACE_ERR("%s: sentinel context:%llx not in prime position[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
/* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags))
@@ -1967,7 +1966,7 @@ static int
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
{
if (list_is_last(&rq->sched.link, &engine->active.requests))
- return INT_MIN;
+ return engine->execlists.queue_priority_hint;
return rq_prio(list_next_entry(rq, sched.link));
}
@@ -2445,6 +2444,7 @@ done:
set_preempt_timeout(engine, *active);
execlists_submit_ports(engine);
} else {
+ start_timeslice(engine, execlists->queue_priority_hint);
skip_submit:
ring_set_paused(engine, 0);
}
@@ -3170,13 +3170,6 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
if (reset_in_progress(execlists))
return; /* defer until we restart the engine following reset */
- /* Hopefully we clear execlists->pending[] to let us through */
- if (READ_ONCE(execlists->pending[0]) &&
- tasklet_trylock(&execlists->tasklet)) {
- process_csb(engine);
- tasklet_unlock(&execlists->tasklet);
- }
-
__execlists_submission_tasklet(engine);
}
@@ -3199,11 +3192,25 @@ static bool ancestor_on_hold(const struct intel_engine_cs *engine,
return !list_empty(&engine->active.hold) && hold_request(rq);
}
+static void flush_csb(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+
+ if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) {
+ if (!reset_in_progress(el))
+ process_csb(engine);
+ tasklet_unlock(&el->tasklet);
+ }
+}
+
static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
+ /* Hopefully we clear execlists->pending[] to let us through */
+ flush_csb(engine);
+
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags);
@@ -3536,7 +3543,7 @@ static int emit_pdps(struct i915_request *rq)
int err, i;
u32 *cs;
- GEM_BUG_ON(intel_vgpu_active(rq->i915));
+ GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
/*
* Beware ye of the dragons, this sequence is magic!
@@ -4515,11 +4522,11 @@ static int gen8_emit_flush_render(struct i915_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN(request->i915, 9))
+ if (IS_GEN(request->engine->i915, 9))
vf_flush_wa = true;
/* WaForGAMHang:kbl */
- if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
+ if (IS_KBL_REVID(request->engine->i915, 0, KBL_REVID_B0))
dc_flush_wa = true;
}
@@ -5598,7 +5605,7 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve));
- tasklet_schedule(&ve->base.execlists.tasklet);
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
}
spin_unlock_irqrestore(&ve->base.active.lock, flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index f59e7875cc5e..6db23389e427 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -61,7 +61,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
#define OUT_BATCH(batch, i, val) \
do { \
if ((i) >= PAGE_SIZE / sizeof(u32)) \
- goto err; \
+ goto out; \
(batch)[(i)++] = (val); \
} while(0)
@@ -70,15 +70,12 @@ static int render_state_setup(struct intel_renderstate *so,
{
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
- unsigned int needs_clflush;
+ int ret = -EINVAL;
u32 *d;
- int ret;
- ret = i915_gem_object_prepare_write(so->vma->obj, &needs_clflush);
- if (ret)
- return ret;
-
- d = kmap_atomic(i915_gem_object_get_dirty_page(so->vma->obj, 0));
+ d = i915_gem_object_pin_map(so->vma->obj, I915_MAP_WB);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -89,7 +86,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
- goto err;
+ goto out;
d[i++] = s;
s = upper_32_bits(r);
@@ -103,7 +100,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (rodata->reloc[reloc_index] != -1) {
drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
- goto err;
+ goto out;
}
so->batch_offset = i915_ggtt_offset(so->vma);
@@ -150,19 +147,11 @@ static int render_state_setup(struct intel_renderstate *so,
*/
so->aux_size = ALIGN(so->aux_size, 8);
- if (needs_clflush)
- drm_clflush_virt_range(d, i * sizeof(u32));
- kunmap_atomic(d);
-
ret = 0;
out:
- i915_gem_object_finish_access(so->vma->obj);
+ __i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
+ i915_gem_object_unpin_map(so->vma->obj);
return ret;
-
-err:
- kunmap_atomic(d);
- ret = -EINVAL;
- goto out;
}
#undef OUT_BATCH
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 39070b514e65..0156f1f5c736 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -638,7 +638,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
bool intel_has_gpu_reset(const struct intel_gt *gt)
{
- if (!i915_modparams.reset)
+ if (!gt->i915->params.reset)
return NULL;
return intel_get_gpu_reset(gt);
@@ -646,7 +646,7 @@ bool intel_has_gpu_reset(const struct intel_gt *gt)
bool intel_has_reset_engine(const struct intel_gt *gt)
{
- if (i915_modparams.reset < 2)
+ if (gt->i915->params.reset < 2)
return false;
return INTEL_INFO(gt->i915)->has_reset_engine;
@@ -1038,7 +1038,7 @@ void intel_gt_reset(struct intel_gt *gt,
awake = reset_prepare(gt);
if (!intel_has_gpu_reset(gt)) {
- if (i915_modparams.reset)
+ if (gt->i915->params.reset)
drm_err(&gt->i915->drm, "GPU reset not supported\n");
else
drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index ca7286e58409..68a08486fc87 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -27,21 +27,15 @@
*
*/
-#include <linux/log2.h>
-
-#include "gem/i915_gem_context.h"
-
+#include "gen2_engine_cs.h"
+#include "gen6_engine_cs.h"
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_context.h"
#include "intel_gt.h"
-#include "intel_gt_irq.h"
-#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
#include "intel_ring.h"
-#include "intel_workarounds.h"
#include "shmem_utils.h"
/* Rough estimate of the typical request size, performing a flush,
@@ -49,436 +43,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
-static int
-gen2_render_ring_flush(struct i915_request *rq, u32 mode)
-{
- unsigned int num_store_dw;
- u32 cmd, *cs;
-
- cmd = MI_FLUSH;
- num_store_dw = 0;
- if (mode & EMIT_INVALIDATE)
- cmd |= MI_READ_FLUSH;
- if (mode & EMIT_FLUSH)
- num_store_dw = 4;
-
- cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = cmd;
- while (num_store_dw--) {
- *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT);
- *cs++ = 0;
- }
- *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-gen4_render_ring_flush(struct i915_request *rq, u32 mode)
-{
- u32 cmd, *cs;
- int i;
-
- /*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
- */
-
- cmd = MI_FLUSH;
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_EXE_FLUSH;
- if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
- cmd |= MI_INVALIDATE_ISP;
- }
-
- i = 2;
- if (mode & EMIT_INVALIDATE)
- i += 20;
-
- cs = intel_ring_begin(rq, i);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = cmd;
-
- /*
- * A random delay to let the CS invalidate take effect? Without this
- * delay, the GPU relocation path fails as the CS does not see
- * the updated contents. Just as important, if we apply the flushes
- * to the EMIT_FLUSH branch (i.e. immediately after the relocation
- * write and before the invalidate on the next batch), the relocations
- * still fail. This implies that is a delay following invalidation
- * that is required to reset the caches as opposed to a delay to
- * ensure the memory is written.
- */
- if (mode & EMIT_INVALIDATE) {
- *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT) |
- PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
- *cs++ = 0;
-
- for (i = 0; i < 12; i++)
- *cs++ = MI_FLUSH;
-
- *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT) |
- PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
- *cs++ = 0;
- }
-
- *cs++ = cmd;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-/*
- * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
- * implementing two workarounds on gen6. From section 1.4.7.1
- * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
- *
- * [DevSNB-C+{W/A}] Before any depth stall flush (including those
- * produced by non-pipelined state commands), software needs to first
- * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
- * 0.
- *
- * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
- * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
- *
- * And the workaround for these two requires this workaround first:
- *
- * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
- * BEFORE the pipe-control with a post-sync op and no write-cache
- * flushes.
- *
- * And this last workaround is tricky because of the requirements on
- * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
- * volume 2 part 1:
- *
- * "1 of the following must also be set:
- * - Render Target Cache Flush Enable ([12] of DW1)
- * - Depth Cache Flush Enable ([0] of DW1)
- * - Stall at Pixel Scoreboard ([1] of DW1)
- * - Depth Stall ([13] of DW1)
- * - Post-Sync Operation ([13] of DW1)
- * - Notify Enable ([8] of DW1)"
- *
- * The cache flushes require the workaround flush that triggered this
- * one, so we can't use it. Depth stall would trigger the same.
- * Post-sync nonzero is what triggered this second workaround, so we
- * can't use that one either. Notify enable is IRQs, which aren't
- * really our business. That leaves only stall at scoreboard.
- */
-static int
-gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
-{
- u32 scratch_addr =
- intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
- u32 *cs;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(5);
- *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
- *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0; /* low dword */
- *cs++ = 0; /* high dword */
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(5);
- *cs++ = PIPE_CONTROL_QW_WRITE;
- *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
- *cs++ = 0;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-gen6_render_ring_flush(struct i915_request *rq, u32 mode)
-{
- u32 scratch_addr =
- intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
- u32 *cs, flags = 0;
- int ret;
-
- /* Force SNB workarounds for PIPE_CONTROL flushes */
- ret = gen6_emit_post_sync_nonzero_flush(rq);
- if (ret)
- return ret;
-
- /* Just flush everything. Experiments have shown that reducing the
- * number of bits based on the write domains has little performance
- * impact.
- */
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- /*
- * Ensure that any following seqno writes only happen
- * when the render cache is indeed flushed.
- */
- flags |= PIPE_CONTROL_CS_STALL;
- }
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- /*
- * TLB invalidate requires a post-sync write.
- */
- flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
- }
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = flags;
- *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
- *cs++ = 0;
- *cs++ = 0;
-
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT) |
- PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
-
- /* Finally we can flush and with it emit the breadcrumb */
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
- PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = rq->fence.seqno;
-
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-
-static int
-gen7_render_ring_cs_stall_wa(struct i915_request *rq)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
- *cs++ = 0;
- *cs++ = 0;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-gen7_render_ring_flush(struct i915_request *rq, u32 mode)
-{
- u32 scratch_addr =
- intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
- u32 *cs, flags = 0;
-
- /*
- * Ensure that any following seqno writes only happen when the render
- * cache is indeed flushed.
- *
- * Workaround: 4th PIPE_CONTROL command (except the ones with only
- * read-cache invalidate bits set) must have the CS_STALL bit set. We
- * don't try to be clever and just set it unconditionally.
- */
- flags |= PIPE_CONTROL_CS_STALL;
-
- /*
- * CS_STALL suggests at least a post-sync write.
- */
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-
- /* Just flush everything. Experiments have shown that reducing the
- * number of bits based on the write domains has little performance
- * impact.
- */
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- }
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
-
- /* Workaround: we must issue a pipe_control with CS-stall bit
- * set before a pipe_control command that has the state cache
- * invalidate bit set. */
- gen7_render_ring_cs_stall_wa(rq);
- }
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = flags;
- *cs++ = scratch_addr;
- *cs++ = 0;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL);
- *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
- *cs++ = rq->fence.seqno;
-
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-
-static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->fence.seqno;
-
- *cs++ = MI_USER_INTERRUPT;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-
-#define GEN7_XCS_WA 32
-static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- int i;
-
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
- *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
- MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->fence.seqno;
-
- for (i = 0; i < GEN7_XCS_WA; i++) {
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
- }
-
- *cs++ = MI_FLUSH_DW;
- *cs++ = 0;
- *cs++ = 0;
-
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-#undef GEN7_XCS_WA
-
static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
{
/*
@@ -865,32 +429,6 @@ static void reset_finish(struct intel_engine_cs *engine)
{
}
-static int rcs_resume(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
-
- /*
- * Disable CONSTANT_BUFFER before it is loaded from the context
- * image. For as it is loaded, it is executed and the stored
- * address may no longer be valid, leading to a GPU hang.
- *
- * This imposes the requirement that userspace reload their
- * CONSTANT_BUFFER on every batch, fortunately a requirement
- * they are already accustomed to from before contexts were
- * enabled.
- */
- if (IS_GEN(i915, 4))
- intel_uncore_write(uncore, ECOSKPD,
- _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
-
- if (IS_GEN_RANGE(i915, 6, 7))
- intel_uncore_write(uncore, INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
-
- return xcs_resume(engine);
-}
-
static void reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
@@ -918,255 +456,6 @@ static void i9xx_submit_request(struct i915_request *request)
intel_ring_set_tail(request->ring, request->tail));
}
-static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
- *cs++ = MI_FLUSH;
-
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
-
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-
-#define GEN5_WA_STORES 8 /* must be at least 1! */
-static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- int i;
-
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
- *cs++ = MI_FLUSH;
-
- BUILD_BUG_ON(GEN5_WA_STORES < 1);
- for (i = 0; i < GEN5_WA_STORES; i++) {
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
- }
-
- *cs++ = MI_USER_INTERRUPT;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-#undef GEN5_WA_STORES
-
-static void
-gen5_irq_enable(struct intel_engine_cs *engine)
-{
- gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-gen5_irq_disable(struct intel_engine_cs *engine)
-{
- gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-i9xx_irq_enable(struct intel_engine_cs *engine)
-{
- engine->i915->irq_mask &= ~engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
- intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
-}
-
-static void
-i9xx_irq_disable(struct intel_engine_cs *engine)
-{
- engine->i915->irq_mask |= engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
-}
-
-static void
-i8xx_irq_enable(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
-
- i915->irq_mask &= ~engine->irq_enable_mask;
- intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
- ENGINE_POSTING_READ16(engine, RING_IMR);
-}
-
-static void
-i8xx_irq_disable(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
-
- i915->irq_mask |= engine->irq_enable_mask;
- intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
-}
-
-static int
-bsd_ring_flush(struct i915_request *rq, u32 mode)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_FLUSH;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
- return 0;
-}
-
-static void
-gen6_irq_enable(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
-
- /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- ENGINE_POSTING_READ(engine, RING_IMR);
-
- gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-gen6_irq_disable(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
- gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-hsw_vebox_irq_enable(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
-
- /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- ENGINE_POSTING_READ(engine, RING_IMR);
-
- gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-hsw_vebox_irq_disable(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~0);
- gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static int
-i965_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 length,
- unsigned int dispatch_flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
- I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
- *cs++ = offset;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT SZ_256K
-#define I830_TLB_ENTRIES (2)
-#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
-static int
-i830_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- u32 *cs, cs_offset =
- intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT);
-
- GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Evict the invalid PTE TLBs */
- *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
- *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
- *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
- *cs++ = cs_offset;
- *cs++ = 0xdeadbeef;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
- if (len > I830_BATCH_LIMIT)
- return -ENOSPC;
-
- cs = intel_ring_begin(rq, 6 + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Blit the batch (which has now all relocs applied) to the
- * stable batch scratch bo area (so that the CS never
- * stumbles over its tlb invalidation bug) ...
- */
- *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
- *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
- *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
- *cs++ = cs_offset;
- *cs++ = 4096;
- *cs++ = offset;
-
- *cs++ = MI_FLUSH;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- /* ... and execute it. */
- offset = cs_offset;
- }
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
- MI_BATCH_NON_SECURE);
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-i915_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
- MI_BATCH_NON_SECURE);
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
static void __ring_context_fini(struct intel_context *ce)
{
i915_vma_put(ce->state);
@@ -1356,8 +645,8 @@ static inline int mi_set_context(struct i915_request *rq,
struct intel_context *ce,
u32 flags)
{
- struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
+ struct drm_i915_private *i915 = engine->i915;
enum intel_engine_id id;
const int num_engines =
IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
@@ -1471,7 +760,7 @@ static inline int mi_set_context(struct i915_request *rq,
static int remap_l3_slice(struct i915_request *rq, int slice)
{
- u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
+ u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
@@ -1582,7 +871,7 @@ static int switch_context(struct i915_request *rq)
void **residuals = NULL;
int ret;
- GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
+ GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
if (engine->wa_ctx.vma->private != ce) {
@@ -1704,99 +993,6 @@ static void gen6_bsd_submit_request(struct i915_request *request)
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
-static int mi_flush_dw(struct i915_request *rq, u32 flags)
-{
- u32 cmd, *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cmd = MI_FLUSH_DW;
-
- /*
- * We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- /*
- * Bspec vol 1c.3 - blitter engine command streamer:
- * "If ENABLED, all TLBs will be invalidated once the flush
- * operation is complete. This bit is only valid when the
- * Post-Sync Operation field is a value of 1h or 3h."
- */
- cmd |= flags;
-
- *cs++ = cmd;
- *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = 0;
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
-{
- return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
-}
-
-static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
-{
- return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
-}
-
-static int
-hsw_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
- /* bit0-7 is the length on GEN6+ */
- *cs++ = offset;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-gen6_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE_I965);
- /* bit0-7 is the length on GEN6+ */
- *cs++ = offset;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-/* Blitter support (SandyBridge+) */
-
-static int gen6_ring_flush(struct i915_request *rq, u32 mode)
-{
- return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
-}
-
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
@@ -1843,11 +1039,11 @@ static void setup_irq(struct intel_engine_cs *engine)
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
} else if (INTEL_GEN(i915) >= 3) {
- engine->irq_enable = i9xx_irq_enable;
- engine->irq_disable = i9xx_irq_disable;
+ engine->irq_enable = gen3_irq_enable;
+ engine->irq_disable = gen3_irq_disable;
} else {
- engine->irq_enable = i8xx_irq_enable;
- engine->irq_disable = i8xx_irq_disable;
+ engine->irq_enable = gen2_irq_enable;
+ engine->irq_disable = gen2_irq_disable;
}
}
@@ -1874,7 +1070,7 @@ static void setup_common(struct intel_engine_cs *engine)
* equivalent to our next initial bread so we can elide
* engine->emit_init_breadcrumb().
*/
- engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
if (IS_GEN(i915, 5))
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
@@ -1883,11 +1079,11 @@ static void setup_common(struct intel_engine_cs *engine)
if (INTEL_GEN(i915) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(i915) >= 4)
- engine->emit_bb_start = i965_emit_bb_start;
+ engine->emit_bb_start = gen4_emit_bb_start;
else if (IS_I830(i915) || IS_I845G(i915))
engine->emit_bb_start = i830_emit_bb_start;
else
- engine->emit_bb_start = i915_emit_bb_start;
+ engine->emit_bb_start = gen3_emit_bb_start;
}
static void setup_rcs(struct intel_engine_cs *engine)
@@ -1900,25 +1096,23 @@ static void setup_rcs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (INTEL_GEN(i915) >= 7) {
- engine->emit_flush = gen7_render_ring_flush;
- engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
+ engine->emit_flush = gen7_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
} else if (IS_GEN(i915, 6)) {
- engine->emit_flush = gen6_render_ring_flush;
- engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
+ engine->emit_flush = gen6_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
} else if (IS_GEN(i915, 5)) {
- engine->emit_flush = gen4_render_ring_flush;
+ engine->emit_flush = gen4_emit_flush_rcs;
} else {
if (INTEL_GEN(i915) < 4)
- engine->emit_flush = gen2_render_ring_flush;
+ engine->emit_flush = gen2_emit_flush;
else
- engine->emit_flush = gen4_render_ring_flush;
+ engine->emit_flush = gen4_emit_flush_rcs;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
if (IS_HASWELL(i915))
engine->emit_bb_start = hsw_emit_bb_start;
-
- engine->resume = rcs_resume;
}
static void setup_vcs(struct intel_engine_cs *engine)
@@ -1929,15 +1123,15 @@ static void setup_vcs(struct intel_engine_cs *engine)
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN(i915, 6))
engine->set_default_submission = gen6_bsd_set_default_submission;
- engine->emit_flush = gen6_bsd_ring_flush;
+ engine->emit_flush = gen6_emit_flush_vcs;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
if (IS_GEN(i915, 6))
- engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
- engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
} else {
- engine->emit_flush = bsd_ring_flush;
+ engine->emit_flush = gen4_emit_flush_vcs;
if (IS_GEN(i915, 5))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
@@ -1949,13 +1143,13 @@ static void setup_bcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- engine->emit_flush = gen6_ring_flush;
+ engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
if (IS_GEN(i915, 6))
- engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
- engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
}
static void setup_vecs(struct intel_engine_cs *engine)
@@ -1964,12 +1158,12 @@ static void setup_vecs(struct intel_engine_cs *engine)
GEM_BUG_ON(INTEL_GEN(i915) < 7);
- engine->emit_flush = gen6_ring_flush;
+ engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- engine->irq_enable = hsw_vebox_irq_enable;
- engine->irq_disable = hsw_vebox_irq_disable;
+ engine->irq_enable = hsw_irq_enable_vecs;
+ engine->irq_disable = hsw_irq_disable_vecs;
- engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
}
static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 2f59fc6df3c2..296391deeb94 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -51,15 +51,16 @@ static void rps_timer(struct timer_list *t)
{
struct intel_rps *rps = from_timer(rps, t, timer);
struct intel_engine_cs *engine;
+ ktime_t dt, last, timestamp;
enum intel_engine_id id;
s64 max_busy[3] = {};
- ktime_t dt, last;
+ timestamp = 0;
for_each_engine(engine, rps_to_gt(rps), id) {
s64 busy;
int i;
- dt = intel_engine_get_busy_time(engine);
+ dt = intel_engine_get_busy_time(engine, &timestamp);
last = engine->stats.rps;
engine->stats.rps = dt;
@@ -69,16 +70,14 @@ static void rps_timer(struct timer_list *t)
swap(busy, max_busy[i]);
}
}
-
- dt = ktime_get();
last = rps->pm_timestamp;
- rps->pm_timestamp = dt;
+ rps->pm_timestamp = timestamp;
if (intel_rps_is_active(rps)) {
s64 busy;
int i;
- dt = ktime_sub(dt, last);
+ dt = ktime_sub(timestamp, last);
/*
* Our goal is to evaluate each engine independently, so we run
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 85d2bef51524..2da366821dda 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -205,6 +205,18 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
#define WA_SET_FIELD_MASKED(addr, mask, value) \
wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
+static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+}
+
+static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+}
+
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
@@ -355,7 +367,10 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
- if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
+ if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@@ -600,11 +615,14 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
* Wa_1604555607:gen12 and Wa_1608008084:gen12
* FF_MODE2 register will return the wrong value when read. The default
* value for this register is zero for all fields and there are no bit
- * masks. So instead of doing a RMW we should just write the TDS timer
- * value for Wa_1604555607.
+ * masks. So instead of doing a RMW we should just write the GS Timer
+ * and TDS timer values for Wa_1604555607 and Wa_16011163337.
*/
- wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
- FF_MODE2_TDS_TIMER_128, 0);
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128,
+ 0);
/* WaDisableGPGPUMidThreadPreemption:tgl */
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
@@ -630,7 +648,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine, wal);
- else if (IS_COFFEELAKE(i915))
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_ctx_workarounds_init(engine, wal);
else if (IS_GEMINILAKE(i915))
glk_ctx_workarounds_init(engine, wal);
@@ -644,6 +662,10 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915))
bdw_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 7))
+ gen7_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 6))
+ gen6_ctx_workarounds_init(engine, wal);
else if (INTEL_GEN(i915) < 8)
return;
else
@@ -917,7 +939,7 @@ static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
/* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(i915))
+ if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
wa_write_or(wal,
GAM_ECOCHK,
ECOCHK_DIS_TLB);
@@ -1180,7 +1202,7 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
- else if (IS_COFFEELAKE(i915))
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_gt_workarounds_init(i915, wal);
else if (IS_GEMINILAKE(i915))
glk_gt_workarounds_init(i915, wal);
@@ -1428,6 +1450,18 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_RANGE_4);
}
+static void cml_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ whitelist_reg_ext(w,
+ RING_CTX_TIMESTAMP(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
+
+ cfl_whitelist_build(engine);
+}
+
static void cnl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
@@ -1478,9 +1512,15 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
+ whitelist_reg_ext(w,
+ RING_CTX_TIMESTAMP(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
default:
+ whitelist_reg_ext(w,
+ RING_CTX_TIMESTAMP(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@@ -1512,6 +1552,9 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
whitelist_reg(w, HIZ_CHICKEN);
break;
default:
+ whitelist_reg_ext(w,
+ RING_CTX_TIMESTAMP(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@@ -1529,6 +1572,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
icl_whitelist_build(engine);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(engine);
+ else if (IS_COMETLAKE(i915))
+ cml_whitelist_build(engine);
else if (IS_COFFEELAKE(i915))
cfl_whitelist_build(engine);
else if (IS_GEMINILAKE(i915))
@@ -1725,6 +1770,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /* Wa_22010271021:ehl */
+ if (IS_ELKHARTLAKE(i915))
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_GEN_RANGE(i915, 9, 12)) {
@@ -1734,7 +1785,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
}
- if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
wa_write_or(wal,
GEN8_GARBCNTL,
@@ -1818,6 +1872,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
+
+ if (IS_GEN(i915, 4))
+ /*
+ * Disable CONSTANT_BUFFER before it is loaded from the context
+ * image. For as it is loaded, it is executed and the stored
+ * address may no longer be valid, leading to a GPU hang.
+ *
+ * This imposes the requirement that userspace reload their
+ * CONSTANT_BUFFER on every batch, fortunately a requirement
+ * they are already accustomed to from before contexts were
+ * enabled.
+ */
+ wa_add(wal, ECOSKPD,
+ 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
+ 0 /* XXX bit doesn't stick on Broadwater */);
}
static void
@@ -1932,7 +2001,7 @@ wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
- struct drm_i915_private *i915 = rq->i915;
+ struct drm_i915_private *i915 = rq->engine->i915;
unsigned int i, count = 0;
const struct i915_wa *wa;
u32 srm, *cs;
@@ -2021,7 +2090,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
err = 0;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
- if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
+ if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
continue;
if (!wa_verify(wa, results[i], wal->name, from))
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index f88e445a1cae..729c3c7b11e2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -49,7 +49,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(rq->engine->i915) >= 8)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 697114dd1f47..73243ba59c7d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -10,6 +10,7 @@
#include "intel_gt_requests.h"
#include "i915_selftest.h"
+#include "selftest_engine_heartbeat.h"
static int timeline_sync(struct intel_timeline *tl)
{
@@ -142,24 +143,6 @@ out:
return err;
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine,
- unsigned long *saved)
-{
- *saved = engine->props.heartbeat_interval_ms;
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
- unsigned long saved)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms = saved;
-}
-
static int live_idle_flush(void *arg)
{
struct intel_gt *gt = arg;
@@ -170,11 +153,9 @@ static int live_idle_flush(void *arg)
/* Check that we can flush the idle barriers */
for_each_engine(engine, gt, id) {
- unsigned long heartbeat;
-
- engine_heartbeat_disable(engine, &heartbeat);
+ st_engine_heartbeat_disable(engine);
err = __live_idle_pulse(engine, intel_engine_flush_barriers);
- engine_heartbeat_enable(engine, heartbeat);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
}
@@ -192,11 +173,9 @@ static int live_idle_pulse(void *arg)
/* Check that heartbeat pulses flush the idle barriers */
for_each_engine(engine, gt, id) {
- unsigned long heartbeat;
-
- engine_heartbeat_disable(engine, &heartbeat);
+ st_engine_heartbeat_disable(engine);
err = __live_idle_pulse(engine, intel_engine_pulse);
- engine_heartbeat_enable(engine, heartbeat);
+ st_engine_heartbeat_enable(engine);
if (err && err != -ENODEV)
break;
@@ -386,11 +365,27 @@ int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- saved_hangcheck = i915_modparams.enable_hangcheck;
- i915_modparams.enable_hangcheck = INT_MAX;
+ saved_hangcheck = i915->params.enable_hangcheck;
+ i915->params.enable_hangcheck = INT_MAX;
err = intel_gt_live_subtests(tests, &i915->gt);
- i915_modparams.enable_hangcheck = saved_hangcheck;
+ i915->params.enable_hangcheck = saved_hangcheck;
return err;
}
+
+void st_engine_heartbeat_disable(struct intel_engine_cs *engine)
+{
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+void st_engine_heartbeat_enable(struct intel_engine_cs *engine)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms =
+ engine->defaults.heartbeat_interval_ms;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h
new file mode 100644
index 000000000000..cd27113d5400
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_ENGINE_HEARTBEAT_H
+#define SELFTEST_ENGINE_HEARTBEAT_H
+
+struct intel_engine_cs;
+
+void st_engine_heartbeat_disable(struct intel_engine_cs *engine);
+void st_engine_heartbeat_enable(struct intel_engine_cs *engine);
+
+#endif /* SELFTEST_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index cbf6b0735272..b08fc5390e8a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -6,7 +6,107 @@
#include "i915_selftest.h"
#include "selftest_engine.h"
+#include "selftest_engine_heartbeat.h"
#include "selftests/igt_atomic.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_spinner.h"
+
+static int live_engine_busy_stats(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * Check that if an engine supports busy-stats, they tell the truth.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ ktime_t de, dt;
+ ktime_t t[2];
+
+ if (!intel_engine_supports_stats(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ err = -EBUSY;
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+
+ ENGINE_TRACE(engine, "measuring idle time\n");
+ preempt_disable();
+ de = intel_engine_get_busy_time(engine, &t[0]);
+ udelay(100);
+ de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
+ preempt_enable();
+ dt = ktime_sub(t[1], t[0]);
+ if (de < 0 || de > 10) {
+ pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
+ engine->name,
+ de, (int)div64_u64(100 * de, dt), dt);
+ GEM_TRACE_DUMP();
+ err = -EINVAL;
+ goto end;
+ }
+
+ /* 100% busy */
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ err = -ETIME;
+ goto end;
+ }
+
+ ENGINE_TRACE(engine, "measuring busy time\n");
+ preempt_disable();
+ de = intel_engine_get_busy_time(engine, &t[0]);
+ udelay(100);
+ de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
+ preempt_enable();
+ dt = ktime_sub(t[1], t[0]);
+ if (100 * de < 95 * dt || 95 * de > 100 * dt) {
+ pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
+ engine->name,
+ de, (int)div64_u64(100 * de, dt), dt);
+ GEM_TRACE_DUMP();
+ err = -EINVAL;
+ goto end;
+ }
+
+end:
+ st_engine_heartbeat_enable(engine);
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ return err;
+}
static int live_engine_pm(void *arg)
{
@@ -77,6 +177,7 @@ static int live_engine_pm(void *arg)
int live_engine_pm_selftests(struct intel_gt *gt)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_busy_stats),
SUBTEST(live_engine_pm),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 242181a5214c..6180a47c1b51 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -5,10 +5,141 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/sort.h>
+
+#include "intel_gt_clock_utils.h"
+
#include "selftest_llc.h"
#include "selftest_rc6.h"
#include "selftest_rps.h"
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static void measure_clocks(struct intel_engine_cs *engine,
+ u32 *out_cycles, ktime_t *out_dt)
+{
+ ktime_t dt[5];
+ u32 cycles[5];
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ preempt_disable();
+ cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP);
+ dt[i] = ktime_get();
+
+ udelay(1000);
+
+ dt[i] = ktime_sub(ktime_get(), dt[i]);
+ cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP);
+ preempt_enable();
+ }
+
+ /* Use the median of both cycle/dt; close enough */
+ sort(cycles, 5, sizeof(*cycles), cmp_u32, NULL);
+ *out_cycles = (cycles[1] + 2 * cycles[2] + cycles[3]) / 4;
+
+ sort(dt, 5, sizeof(*dt), cmp_u64, NULL);
+ *out_dt = div_u64(dt[1] + 2 * dt[2] + dt[3], 4);
+}
+
+static int live_gt_clocks(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */
+ pr_info("CS_TIMESTAMP frequency unknown\n");
+ return 0;
+ }
+
+ if (INTEL_GEN(gt->i915) < 4) /* Any CS_TIMESTAMP? */
+ return 0;
+
+ if (IS_GEN(gt->i915, 5))
+ /*
+ * XXX CS_TIMESTAMP low dword is dysfunctional?
+ *
+ * Ville's experiments indicate the high dword still works,
+ * but at a correspondingly reduced frequency.
+ */
+ return 0;
+
+ if (IS_GEN(gt->i915, 4))
+ /*
+ * XXX CS_TIMESTAMP appears gibberish
+ *
+ * Ville's experiments indicate that it mostly appears 'stuck'
+ * in that we see the register report the same cycle count
+ * for a couple of reads.
+ */
+ return 0;
+
+ intel_gt_pm_get(gt);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ for_each_engine(engine, gt, id) {
+ u32 cycles;
+ u32 expected;
+ u64 time;
+ u64 dt;
+
+ if (INTEL_GEN(engine->i915) < 7 && engine->id != RCS0)
+ continue;
+
+ measure_clocks(engine, &cycles, &dt);
+
+ time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+ expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt);
+
+ pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
+ engine->name, cycles, time, dt, expected,
+ RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000);
+
+ if (9 * time < 8 * dt || 8 * time > 9 * dt) {
+ pr_err("%s: CS ticks did not match walltime!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (9 * expected < 8 * cycles || 8 * expected > 9 * cycles) {
+ pr_err("%s: walltime did not match CS ticks!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_gt_pm_put(gt);
+
+ return err;
+}
+
static int live_gt_resume(void *arg)
{
struct intel_gt *gt = arg;
@@ -52,6 +183,7 @@ static int live_gt_resume(void *arg)
int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_gt_clocks),
SUBTEST(live_rc6_manual),
SUBTEST(live_rps_clock_interval),
SUBTEST(live_rps_control),
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 4aa4cc917d8b..fb5ebf930ab2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -29,6 +29,7 @@
#include "intel_gt.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "selftest_engine_heartbeat.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@@ -203,12 +204,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
@@ -217,12 +218,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(gt->i915) >= 4) {
@@ -230,24 +231,24 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
}
@@ -310,22 +311,6 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
1000));
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine)
-{
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms =
- engine->defaults.heartbeat_interval_ms;
-}
-
static int igt_hang_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@@ -482,7 +467,7 @@ static int igt_reset_nop_engine(void *arg)
reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
int i;
@@ -499,6 +484,20 @@ static int igt_reset_nop_engine(void *arg)
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+
err = PTR_ERR(rq);
break;
}
@@ -526,7 +525,7 @@ static int igt_reset_nop_engine(void *arg)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
@@ -576,7 +575,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
if (active) {
@@ -628,7 +627,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
@@ -805,10 +804,10 @@ static int __igt_reset_engines(struct intel_gt *gt,
threads[tmp].resets =
i915_reset_engine_count(global, other);
- if (!(flags & TEST_OTHERS))
+ if (other == engine && !(flags & TEST_SELF))
continue;
- if (other == engine && !(flags & TEST_SELF))
+ if (other != engine && !(flags & TEST_OTHERS))
continue;
threads[tmp].engine = other;
@@ -827,7 +826,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
yield(); /* start all threads before we begin */
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
@@ -866,13 +865,29 @@ static int __igt_reset_engines(struct intel_gt *gt,
count++;
if (rq) {
+ if (rq->fence.error != -EIO) {
+ pr_err("i915_reset_engine(%s:%s):"
+ " failed to reset request %llx:%lld\n",
+ engine->name, test_name,
+ rq->fence.context,
+ rq->fence.seqno);
+ i915_request_put(rq);
+
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ break;
+ }
+
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
- " failed to complete request after reset\n",
- engine->name, test_name);
+ " failed to complete request %llx:%lld after reset\n",
+ engine->name, test_name,
+ rq->fence.context,
+ rq->fence.seqno);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
i915_request_put(rq);
@@ -901,7 +916,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
@@ -983,7 +998,7 @@ static int igt_reset_engines(void *arg)
},
{
"self-priority",
- TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
+ TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
},
{ }
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 924bc01ef526..daa4aabab9a7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
+#include "gt/selftest_engine_heartbeat.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@@ -51,22 +52,6 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
return vma;
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine)
-{
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms =
- engine->defaults.heartbeat_interval_ms;
-}
-
static bool is_active(struct i915_request *rq)
{
if (i915_request_is_active(rq))
@@ -75,7 +60,7 @@ static bool is_active(struct i915_request *rq)
if (i915_request_on_hold(rq))
return true;
- if (i915_request_started(rq))
+ if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
return true;
return false;
@@ -234,7 +219,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
err = -EIO;
break;
}
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
struct intel_context *tmp;
@@ -332,7 +317,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
i915_request_put(rq[0]);
err_ce:
- tasklet_kill(&engine->execlists.tasklet); /* flush submission */
+ intel_engine_flush_submission(engine);
igt_spinner_end(&spin);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
if (IS_ERR_OR_NULL(ce[n]))
@@ -342,7 +327,7 @@ err_ce:
intel_context_put(ce[n]);
}
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_live_test_end(&t))
err = -EIO;
if (err)
@@ -363,6 +348,156 @@ static int live_unlite_preempt(void *arg)
return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
}
+static int live_unlite_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Setup a preemption event that will cause almost the entire ring
+ * to be unwound, potentially fooling our intel_ring_direction()
+ * into emitting a forward lite-restore instead of the rollback.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ /* Create max prio spinner, followed by N low prio nops */
+ rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (intel_ring_direction(ce[0]->ring,
+ rq->wa_tail,
+ ce[0]->ring->tail) <= 0) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
+ rq->tail,
+ ce[0]->ring->tail) <= 0);
+ i915_request_put(rq);
+
+ /* Create a second ring to preempt the first ring after rq[0] */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submitted\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
static int live_pin_rewind(void *arg)
{
struct intel_gt *gt = arg;
@@ -471,7 +606,7 @@ static int live_hold_reset(void *arg)
break;
}
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -531,7 +666,7 @@ static int live_hold_reset(void *arg)
i915_request_put(rq);
out:
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
intel_context_put(ce);
if (err)
break;
@@ -578,7 +713,7 @@ static int live_error_interrupt(void *arg)
const struct error_phase *p;
int err = 0;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
for (p = phases; p->error[0] != GOOD; p++) {
struct i915_request *client[ARRAY_SIZE(phases->error)];
@@ -677,7 +812,7 @@ out:
}
}
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err) {
intel_gt_set_wedged(gt);
return err;
@@ -845,10 +980,11 @@ static int live_timeslice_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct i915_vma *vma;
void *vaddr;
int err = 0;
- int count;
/*
* If a request takes too long, we would like to give other users
@@ -885,26 +1021,21 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_pin;
- for_each_prime_number_from(count, 1, 16) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, gt, id) {
- if (!intel_engine_has_preemption(engine))
- continue;
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
- memset(vaddr, 0, PAGE_SIZE);
+ memset(vaddr, 0, PAGE_SIZE);
- engine_heartbeat_disable(engine);
- err = slice_semaphore_queue(engine, vma, count);
- engine_heartbeat_enable(engine);
- if (err)
- goto err_pin;
+ st_engine_heartbeat_disable(engine);
+ err = slice_semaphore_queue(engine, vma, 5);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ goto err_pin;
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto err_pin;
- }
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto err_pin;
}
}
@@ -1020,7 +1151,7 @@ static int live_timeslice_rewind(void *arg)
* Expect execution/evaluation order XZY
*/
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
slot = memset32(engine->status_page.addr + 1000, 0, 4);
@@ -1031,18 +1162,18 @@ static int live_timeslice_rewind(void *arg)
goto err;
}
- rq[0] = create_rewinder(ce, NULL, slot, X);
- if (IS_ERR(rq[0])) {
+ rq[A1] = create_rewinder(ce, NULL, slot, X);
+ if (IS_ERR(rq[A1])) {
intel_context_put(ce);
goto err;
}
- rq[1] = create_rewinder(ce, NULL, slot, Y);
+ rq[A2] = create_rewinder(ce, NULL, slot, Y);
intel_context_put(ce);
- if (IS_ERR(rq[1]))
+ if (IS_ERR(rq[A2]))
goto err;
- err = wait_for_submit(engine, rq[1], HZ / 2);
+ err = wait_for_submit(engine, rq[A2], HZ / 2);
if (err) {
pr_err("%s: failed to submit first context\n",
engine->name);
@@ -1055,12 +1186,12 @@ static int live_timeslice_rewind(void *arg)
goto err;
}
- rq[2] = create_rewinder(ce, rq[0], slot, Z);
+ rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
intel_context_put(ce);
if (IS_ERR(rq[2]))
goto err;
- err = wait_for_submit(engine, rq[2], HZ / 2);
+ err = wait_for_submit(engine, rq[B1], HZ / 2);
if (err) {
pr_err("%s: failed to submit second context\n",
engine->name);
@@ -1068,6 +1199,7 @@ static int live_timeslice_rewind(void *arg)
}
/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
/* Wait for the timeslice to kick in */
del_timer(&engine->execlists.timer);
@@ -1114,7 +1246,7 @@ err:
wmb();
engine->props.timeslice_duration_ms = timeslice;
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
for (i = 0; i < 3; i++)
i915_request_put(rq[i]);
if (igt_flush_test(gt->i915))
@@ -1140,9 +1272,17 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
return rq;
}
-static long timeslice_threshold(const struct intel_engine_cs *engine)
+static long slice_timeout(struct intel_engine_cs *engine)
{
- return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1;
+ long timeout;
+
+ /* Enough time for a timeslice to kick in, and kick out */
+ timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
+
+ /* Enough time for the nop request to complete */
+ timeout += HZ / 5;
+
+ return timeout + 1;
}
static int live_timeslice_queue(void *arg)
@@ -1198,7 +1338,7 @@ static int live_timeslice_queue(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
memset(vaddr, 0, PAGE_SIZE);
/* ELSP[0]: semaphore wait */
@@ -1243,24 +1383,8 @@ static int live_timeslice_queue(void *arg)
intel_engine_flush_submission(engine);
} while (READ_ONCE(engine->execlists.pending[0]));
- if (!READ_ONCE(engine->execlists.timer.expires) &&
- execlists_active(&engine->execlists) == rq &&
- !i915_request_completed(rq)) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- GEM_TRACE_ERR("%s: Failed to enable timeslicing!\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
- GEM_TRACE_DUMP();
-
- memset(vaddr, 0xff, PAGE_SIZE);
- err = -EINVAL;
- }
-
/* Timeslice every jiffy, so within 2 we should signal */
- if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) {
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
@@ -1275,7 +1399,7 @@ static int live_timeslice_queue(void *arg)
err_rq:
i915_request_put(rq);
err_heartbeat:
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
}
@@ -1321,7 +1445,7 @@ static int live_timeslice_nopreempt(void *arg)
break;
}
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
/* Create an unpreemptible spinner */
@@ -1349,7 +1473,7 @@ static int live_timeslice_nopreempt(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
- err = PTR_ERR(rq);
+ err = PTR_ERR(ce);
goto out_spin;
}
@@ -1379,7 +1503,7 @@ static int live_timeslice_nopreempt(void *arg)
* allow the maximum priority barrier through. Wait long
* enough to see if it is timesliced in by mistake.
*/
- if (i915_request_wait(rq, 0, timeslice_threshold(engine)) >= 0) {
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
engine->name);
err = -EINVAL;
@@ -1390,7 +1514,7 @@ out_spin:
igt_spinner_end(&spin);
out_heartbeat:
xchg(&engine->props.timeslice_duration_ms, timeslice);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
@@ -2294,7 +2418,7 @@ static int live_suppress_self_preempt(void *arg)
if (igt_flush_test(gt->i915))
goto err_wedged;
- intel_engine_pm_get(engine);
+ st_engine_heartbeat_disable(engine);
engine->execlists.preempt_hang.count = 0;
rq_a = spinner_create_request(&a.spin,
@@ -2302,14 +2426,14 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
goto err_client_b;
}
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
pr_err("First client failed to start\n");
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
goto err_wedged;
}
@@ -2321,7 +2445,7 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
goto err_client_b;
}
i915_request_add(rq_b);
@@ -2332,7 +2456,7 @@ static int live_suppress_self_preempt(void *arg)
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client failed to start\n");
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
goto err_wedged;
}
@@ -2346,12 +2470,12 @@ static int live_suppress_self_preempt(void *arg)
engine->name,
engine->execlists.preempt_hang.count,
depth);
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
err = -EINVAL;
goto err_client_b;
}
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
goto err_wedged;
}
@@ -2371,183 +2495,6 @@ err_wedged:
goto err_client_b;
}
-static int __i915_sw_fence_call
-dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
-{
- return NOTIFY_DONE;
-}
-
-static struct i915_request *dummy_request(struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
-
- rq = kzalloc(sizeof(*rq), GFP_KERNEL);
- if (!rq)
- return NULL;
-
- rq->engine = engine;
-
- spin_lock_init(&rq->lock);
- INIT_LIST_HEAD(&rq->fence.cb_list);
- rq->fence.lock = &rq->lock;
- rq->fence.ops = &i915_fence_ops;
-
- i915_sched_node_init(&rq->sched);
-
- /* mark this request as permanently incomplete */
- rq->fence.seqno = 1;
- BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
- rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
- GEM_BUG_ON(i915_request_completed(rq));
-
- i915_sw_fence_init(&rq->submit, dummy_notify);
- set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
-
- spin_lock_init(&rq->lock);
- rq->fence.lock = &rq->lock;
- INIT_LIST_HEAD(&rq->fence.cb_list);
-
- return rq;
-}
-
-static void dummy_request_free(struct i915_request *dummy)
-{
- /* We have to fake the CS interrupt to kick the next request */
- i915_sw_fence_commit(&dummy->submit);
-
- i915_request_mark_complete(dummy);
- dma_fence_signal(&dummy->fence);
-
- i915_sched_node_fini(&dummy->sched);
- i915_sw_fence_fini(&dummy->submit);
-
- dma_fence_free(&dummy->fence);
-}
-
-static int live_suppress_wait_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct preempt_client client[4];
- struct i915_request *rq[ARRAY_SIZE(client)] = {};
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
- int i;
-
- /*
- * Waiters are given a little priority nudge, but not enough
- * to actually cause any preemption. Double check that we do
- * not needlessly generate preempt-to-idle cycles.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &client[0])) /* ELSP[0] */
- return -ENOMEM;
- if (preempt_client_init(gt, &client[1])) /* ELSP[1] */
- goto err_client_0;
- if (preempt_client_init(gt, &client[2])) /* head of queue */
- goto err_client_1;
- if (preempt_client_init(gt, &client[3])) /* bystander */
- goto err_client_2;
-
- for_each_engine(engine, gt, id) {
- int depth;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!engine->emit_init_breadcrumb)
- continue;
-
- for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
- struct i915_request *dummy;
-
- engine->execlists.preempt_hang.count = 0;
-
- dummy = dummy_request(engine);
- if (!dummy)
- goto err_client_3;
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_request *this;
-
- this = spinner_create_request(&client[i].spin,
- client[i].ctx, engine,
- MI_NOOP);
- if (IS_ERR(this)) {
- err = PTR_ERR(this);
- goto err_wedged;
- }
-
- /* Disable NEWCLIENT promotion */
- __i915_active_fence_set(&i915_request_timeline(this)->last_request,
- &dummy->fence);
-
- rq[i] = i915_request_get(this);
- i915_request_add(this);
- }
-
- dummy_request_free(dummy);
-
- GEM_BUG_ON(i915_request_completed(rq[0]));
- if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
- pr_err("%s: First client failed to start\n",
- engine->name);
- goto err_wedged;
- }
- GEM_BUG_ON(!i915_request_started(rq[0]));
-
- if (i915_request_wait(rq[depth],
- I915_WAIT_PRIORITY,
- 1) != -ETIME) {
- pr_err("%s: Waiter depth:%d completed!\n",
- engine->name, depth);
- goto err_wedged;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- igt_spinner_end(&client[i].spin);
- i915_request_put(rq[i]);
- rq[i] = NULL;
- }
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
- engine->name,
- engine->execlists.preempt_hang.count,
- depth);
- err = -EINVAL;
- goto err_client_3;
- }
- }
- }
-
- err = 0;
-err_client_3:
- preempt_client_fini(&client[3]);
-err_client_2:
- preempt_client_fini(&client[2]);
-err_client_1:
- preempt_client_fini(&client[1]);
-err_client_0:
- preempt_client_fini(&client[0]);
- return err;
-
-err_wedged:
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- igt_spinner_end(&client[i].spin);
- i915_request_put(rq[i]);
- }
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_3;
-}
-
static int live_chain_preempt(void *arg)
{
struct intel_gt *gt = arg;
@@ -2796,6 +2743,168 @@ err_ce:
return err;
}
+static int __live_preempt_ring(struct intel_engine_cs *engine,
+ struct igt_spinner *spin,
+ int queue_sz, int ring_sz)
+{
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err = 0;
+ int n;
+
+ if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
+ return -EIO;
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ tmp->ring = __intel_context_ring_size(ring_sz);
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, queue_sz, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ i915_request_put(rq);
+
+ /* Create a second request to preempt the first ring */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submited\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that we rollback large chunks of a ring in order to do a
+ * preemption event. Similar to live_unlite_ring, but looking at
+ * ring size rather than the impact of intel_ring_direction().
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n <= 3; n++) {
+ err = __live_preempt_ring(engine, &spin,
+ n * SZ_4K / 4, SZ_4K);
+ if (err)
+ break;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
static int live_preempt_gang(void *arg)
{
struct intel_gt *gt = arg;
@@ -2840,16 +2949,8 @@ static int live_preempt_gang(void *arg)
/* Submit each spinner at increasing priority */
engine->schedule(rq, &attr);
-
- if (prio <= I915_PRIORITY_MAX)
- continue;
-
- if (prio > (INT_MAX >> I915_USER_PRIORITY_SHIFT))
- break;
-
- if (__igt_timeout(end_time, NULL))
- break;
- } while (1);
+ } while (prio <= I915_PRIORITY_MAX &&
+ !__igt_timeout(end_time, NULL));
pr_debug("%s: Preempt chain of %d requests\n",
engine->name, prio);
@@ -3417,7 +3518,7 @@ static int smoke_crescendo_thread(void *arg)
return err;
count++;
- } while (!__igt_timeout(end_time, NULL));
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
smoke->count = count;
return 0;
@@ -3493,7 +3594,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
count++;
}
- } while (!__igt_timeout(end_time, NULL));
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
@@ -3506,7 +3607,7 @@ static int live_preempt_smoke(void *arg)
struct preempt_smoke smoke = {
.gt = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
- .ncontext = 1024,
+ .ncontext = 256,
};
const unsigned int phase[] = { 0, BATCH };
struct igt_live_test t;
@@ -3706,13 +3807,43 @@ out:
return err;
}
+static unsigned int
+__select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ bool (*filter)(const struct intel_engine_cs *))
+{
+ unsigned int n = 0;
+ unsigned int inst;
+
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ if (filter && !filter(gt->engine_class[class][inst]))
+ continue;
+
+ siblings[n++] = gt->engine_class[class][inst];
+ }
+
+ return n;
+}
+
+static unsigned int
+select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings)
+{
+ return __select_siblings(gt, class, siblings, NULL);
+}
+
static int live_virtual_engine(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int class, inst;
+ unsigned int class;
int err;
if (intel_uc_uses_guc_submission(&gt->uc))
@@ -3730,13 +3861,7 @@ static int live_virtual_engine(void *arg)
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, n;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- siblings[nsibling++] = gt->engine_class[class][inst];
- }
+ nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
@@ -3845,7 +3970,7 @@ static int live_virtual_mask(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class, inst;
+ unsigned int class;
int err;
if (intel_uc_uses_guc_submission(&gt->uc))
@@ -3854,17 +3979,178 @@ static int live_virtual_mask(void *arg)
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
unsigned int nsibling;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- break;
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = mask_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int slicein_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must take part in timeslicing on the target engines.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+ __func__, rq->engine->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must allow others a fair timeslice.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ /* XXX We do not handle oversubscription and fairness with normal rq */
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ for (n = 0; !err && n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
- siblings[nsibling++] = gt->engine_class[class][inst];
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+ __func__, siblings[n]->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
}
+ i915_request_put(rq);
+ }
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = __select_siblings(gt, class, siblings,
+ intel_engine_has_timeslices);
if (nsibling < 2)
continue;
- err = mask_virtual_engine(gt, siblings, nsibling);
+ err = slicein_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+
+ err = sliceout_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
}
@@ -3982,7 +4268,7 @@ static int live_virtual_preserved(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class, inst;
+ unsigned int class;
/*
* Check that the context image retains non-privileged (user) registers
@@ -4000,13 +4286,7 @@ static int live_virtual_preserved(void *arg)
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, err;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- siblings[nsibling++] = gt->engine_class[class][inst];
- }
+ nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
@@ -4217,7 +4497,7 @@ static int live_virtual_bond(void *arg)
};
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class, inst;
+ unsigned int class;
int err;
if (intel_uc_uses_guc_submission(&gt->uc))
@@ -4227,14 +4507,7 @@ static int live_virtual_bond(void *arg)
const struct phase *p;
int nsibling;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- break;
-
- GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
- siblings[nsibling++] = gt->engine_class[class][inst];
- }
+ nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
@@ -4280,7 +4553,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
}
for (n = 0; n < nsibling; n++)
- engine_heartbeat_disable(siblings[n]);
+ st_engine_heartbeat_disable(siblings[n]);
rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -4351,7 +4624,7 @@ out_rq:
i915_request_put(rq);
out_heartbeat:
for (n = 0; n < nsibling; n++)
- engine_heartbeat_enable(siblings[n]);
+ st_engine_heartbeat_enable(siblings[n]);
intel_context_put(ve);
out_spin:
@@ -4363,7 +4636,7 @@ static int live_virtual_reset(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class, inst;
+ unsigned int class;
/*
* Check that we handle a reset event within a virtual engine.
@@ -4381,13 +4654,7 @@ static int live_virtual_reset(void *arg)
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, err;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- siblings[nsibling++] = gt->engine_class[class][inst];
- }
+ nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
@@ -4405,6 +4672,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
+ SUBTEST(live_unlite_ring),
SUBTEST(live_pin_rewind),
SUBTEST(live_hold_reset),
SUBTEST(live_error_interrupt),
@@ -4418,8 +4686,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_nopreempt),
SUBTEST(live_preempt_cancel),
SUBTEST(live_suppress_self_preempt),
- SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_ring),
SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_user),
@@ -4427,6 +4695,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
SUBTEST(live_virtual_preserved),
+ SUBTEST(live_virtual_slice),
SUBTEST(live_virtual_bond),
SUBTEST(live_virtual_reset),
};
@@ -5030,7 +5299,7 @@ static int live_lrc_gpr(void *arg)
return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
err = __live_lrc_gpr(engine, scratch, false);
if (err)
@@ -5041,7 +5310,7 @@ static int live_lrc_gpr(void *arg)
goto err;
err:
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@@ -5190,7 +5459,7 @@ static int live_lrc_timestamp(void *arg)
for_each_engine(data.engine, gt, id) {
int i, err = 0;
- engine_heartbeat_disable(data.engine);
+ st_engine_heartbeat_disable(data.engine);
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
struct intel_context *tmp;
@@ -5223,7 +5492,7 @@ static int live_lrc_timestamp(void *arg)
}
err:
- engine_heartbeat_enable(data.engine);
+ st_engine_heartbeat_enable(data.engine);
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
if (!data.ce[i])
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index 63f87d8608c3..b25eba50c88e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -157,7 +157,7 @@ static int read_mocs_table(struct i915_request *rq,
{
u32 addr;
- if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
addr = global_mocs_offset();
else
addr = mocs_offset(rq->engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 2dc460624bbc..3c8434846fa1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -132,7 +132,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
}
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(rq->engine->i915) >= 8)
cmd++;
*cs++ = cmd;
@@ -197,10 +197,10 @@ int live_rc6_ctx_wa(void *arg)
int pass;
for (pass = 0; pass < 2; pass++) {
+ struct i915_gpu_error *error = &gt->i915->gpu_error;
struct intel_context *ce;
unsigned int resets =
- i915_reset_engine_count(&gt->i915->gpu_error,
- engine);
+ i915_reset_engine_count(error, engine);
const u32 *res;
/* Use a sacrifical context */
@@ -230,8 +230,7 @@ int live_rc6_ctx_wa(void *arg)
engine->name, READ_ONCE(*res));
if (resets !=
- i915_reset_engine_count(&gt->i915->gpu_error,
- engine)) {
+ i915_reset_engine_count(error, engine)) {
pr_err("%s: GPU reset required\n",
engine->name);
add_taint_for_CI(TAINT_WARN);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 5049c3dd08a6..bb753f0c12eb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -12,6 +12,7 @@
#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
+#include "selftest_engine_heartbeat.h"
#include "selftest_rps.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_spinner.h"
@@ -20,22 +21,6 @@
/* Try to isolate the impact of cstates from determing frequency response */
#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
-static void engine_heartbeat_disable(struct intel_engine_cs *engine)
-{
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms =
- engine->defaults.heartbeat_interval_ms;
-}
-
static void dummy_rps_work(struct work_struct *wrk)
{
}
@@ -249,13 +234,13 @@ int live_rps_clock_interval(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
err = PTR_ERR(rq);
break;
}
@@ -266,7 +251,7 @@ int live_rps_clock_interval(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@@ -322,7 +307,7 @@ int live_rps_clock_interval(void *arg)
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err == 0) {
u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
@@ -408,7 +393,7 @@ int live_rps_control(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
@@ -424,7 +409,7 @@ int live_rps_control(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@@ -434,7 +419,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not set minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
show_pstate_limits(rps);
err = -EINVAL;
break;
@@ -451,7 +436,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
show_pstate_limits(rps);
err = -EINVAL;
break;
@@ -466,7 +451,7 @@ int live_rps_control(void *arg)
min_dt = ktime_sub(ktime_get(), min_dt);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
engine->name,
@@ -637,14 +622,14 @@ int live_rps_frequency_cs(void *arg)
int freq;
} min, max;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
vma = create_spin_counter(engine,
engine->kernel_context->vm, false,
&cancel, &cntr);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
break;
}
@@ -725,7 +710,7 @@ err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@@ -779,14 +764,14 @@ int live_rps_frequency_srm(void *arg)
int freq;
} min, max;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
vma = create_spin_counter(engine,
engine->kernel_context->vm, true,
&cancel, &cntr);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
break;
}
@@ -866,7 +851,7 @@ err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@@ -1061,11 +1046,11 @@ int live_rps_interrupt(void *arg)
intel_gt_pm_wait_for_idle(engine->gt);
GEM_BUG_ON(intel_rps_is_active(rps));
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
err = __rps_up_interrupt(rps, engine, &spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
goto out;
@@ -1074,13 +1059,13 @@ int live_rps_interrupt(void *arg)
/* Keep the engine awake but idle and check for DOWN */
if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
intel_rc6_disable(&gt->rc6);
err = __rps_down_interrupt(rps, engine);
intel_rc6_enable(&gt->rc6);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
goto out;
}
@@ -1165,13 +1150,13 @@ int live_rps_power(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
err = PTR_ERR(rq);
break;
}
@@ -1182,7 +1167,7 @@ int live_rps_power(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@@ -1195,7 +1180,7 @@ int live_rps_power(void *arg)
min.power = measure_power_at(rps, &min.freq);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
engine->name,
@@ -1252,6 +1237,11 @@ int live_rps_dynamic(void *arg)
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
+ if (intel_rps_has_interrupts(rps))
+ pr_info("RPS has interrupt support\n");
+ if (intel_rps_uses_timer(rps))
+ pr_info("RPS has timer support\n");
+
for_each_engine(engine, gt, id) {
struct i915_request *rq;
struct {
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index ef1c35073dc0..fcdee951579b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -12,6 +12,7 @@
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
+#include "selftest_engine_heartbeat.h"
#include "../selftests/i915_random.h"
#include "../i915_selftest.h"
@@ -426,12 +427,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (INTEL_GEN(rq->i915) >= 8) {
+ if (INTEL_GEN(rq->engine->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = addr;
*cs++ = 0;
*cs++ = value;
- } else if (INTEL_GEN(rq->i915) >= 4) {
+ } else if (INTEL_GEN(rq->engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = addr;
@@ -751,22 +752,6 @@ out_free:
return err;
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine)
-{
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms =
- engine->defaults.heartbeat_interval_ms;
-}
-
static int live_hwsp_rollover_kernel(void *arg)
{
struct intel_gt *gt = arg;
@@ -785,7 +770,7 @@ static int live_hwsp_rollover_kernel(void *arg)
struct i915_request *rq[3] = {};
int i;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
if (intel_gt_wait_for_idle(gt, HZ / 2)) {
err = -EIO;
goto out;
@@ -836,7 +821,7 @@ static int live_hwsp_rollover_kernel(void *arg)
out:
for (i = 0; i < ARRAY_SIZE(rq); i++)
i915_request_put(rq[i]);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 32785463ec9e..febc9e6692ba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -417,6 +417,20 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
return false;
}
+static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
+{
+ reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
+ switch (reg) {
+ case 0x358:
+ case 0x35c:
+ case 0x3a8:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static bool ro_register(u32 reg)
{
if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
@@ -497,6 +511,9 @@ static int check_dirty_whitelist(struct intel_context *ce)
if (wo_register(engine, reg))
continue;
+ if (timestamp(engine, reg))
+ continue; /* timestamps are expected to autoincrement */
+
ro_reg = ro_register(reg);
/* Clear non priv flags */
diff --git a/drivers/gpu/drm/i915/gt/shaders/README b/drivers/gpu/drm/i915/gt/shaders/README
new file mode 100644
index 000000000000..e7e96d7073c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/README
@@ -0,0 +1,46 @@
+ASM sources for auto generated shaders
+======================================
+
+The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
+pre-compiled batch chunks that will clear any residual render cache during
+context switch.
+
+They are generated from their respective platform ASM files present on
+i915/gt/shaders/clear_kernel directory.
+
+The generated .c files should never be modified directly. Instead, any modification
+needs to be done on the on their respective ASM files and build instructions below
+needes to be followed.
+
+Building
+========
+
+Environment
+-----------
+
+IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
+on building.
+
+Please make sure your Mesa tool is compiled with "-Dtools=intel" and
+"-Ddri-drivers=i965", and run this script from IGT source root directory"
+
+The instructions bellow assume:
+ * IGT gpu tools source code is located on your home directory (~) as ~/igt
+ * Mesa source code is located on your home directory (~) as ~/mesa
+ and built under the ~/mesa/build directory
+ * Linux kernel source code is under your home directory (~) as ~/linux
+
+Instructions
+------------
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/ivb.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g ivb \
+ -m ~/mesa/build/src/intel/tools/i965_asm
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/hsw.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g hsw \
+ -m ~/mesa/build/src/intel/tools/i965_asm \ No newline at end of file
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
new file mode 100644
index 000000000000..5fdf384bb621
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout:
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 352D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -32D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -64D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
new file mode 100644
index 000000000000..97c7ac9e3854
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout :
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 44D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -4D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -8D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index fb10f3597ea5..9bbe8a795cb8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -424,25 +424,28 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
static u32 __get_default_log_level(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
/* A negative value means "use platform/config default" */
- if (i915_modparams.guc_log_level < 0) {
+ if (i915->params.guc_log_level < 0) {
return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
}
- if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
+ if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915_modparams.guc_log_level,
+ "guc_log_level", i915->params.guc_log_level,
"verbosity too high");
return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
}
- GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED);
- GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX);
- return i915_modparams.guc_log_level;
+ GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED);
+ GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX);
+ return i915->params.guc_log_level;
}
int intel_guc_log_create(struct intel_guc_log *log)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 94eb63f309ce..fdfeb4b9b0f5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -660,10 +660,12 @@ void intel_guc_submission_disable(struct intel_guc *guc)
static bool __guc_submission_selected(struct intel_guc *guc)
{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
if (!intel_guc_submission_is_supported(guc))
return false;
- return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+ return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
}
void intel_guc_submission_init_early(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index f518fe05c6f9..1c2d6358826c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -47,15 +47,15 @@ static void __confirm_options(struct intel_uc *uc)
drm_dbg(&i915->drm,
"enable_guc=%d (guc:%s submission:%s huc:%s)\n",
- i915_modparams.enable_guc,
+ i915->params.enable_guc,
yesno(intel_uc_wants_guc(uc)),
yesno(intel_uc_wants_guc_submission(uc)),
yesno(intel_uc_wants_huc(uc)));
- if (i915_modparams.enable_guc == -1)
+ if (i915->params.enable_guc == -1)
return;
- if (i915_modparams.enable_guc == 0) {
+ if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
GEM_BUG_ON(intel_uc_wants_huc(uc));
@@ -65,25 +65,25 @@ static void __confirm_options(struct intel_uc *uc)
if (!intel_uc_supports_guc(uc))
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
- i915_modparams.enable_guc, "GuC is not supported!");
+ i915->params.enable_guc, "GuC is not supported!");
- if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
+ if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
- i915_modparams.enable_guc, "HuC is not supported!");
+ i915->params.enable_guc, "HuC is not supported!");
- if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
+ if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
- i915_modparams.enable_guc, "GuC submission is N/A");
+ i915->params.enable_guc, "GuC submission is N/A");
- if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
+ if (i915->params.enable_guc & ~(ENABLE_GUC_SUBMISSION |
ENABLE_GUC_LOAD_HUC))
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
- i915_modparams.enable_guc, "undocumented flag");
+ i915->params.enable_guc, "undocumented flag");
}
void intel_uc_init_early(struct intel_uc *uc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index e1caae93996d..59b27aba15c6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -47,12 +47,15 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas
* between 33.0 and 35.2 are only related to new additions to support new Gen12
* features.
+ *
+ * Note that RKL uses the same firmware as TGL.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
+ fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
- fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
@@ -112,11 +115,13 @@ struct __packed uc_fw_platform_requirement {
},
static void
-__uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
+__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
static const struct uc_fw_platform_requirement fw_blobs[] = {
INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB)
};
+ enum intel_platform p = INTEL_INFO(i915)->platform;
+ u8 rev = INTEL_REVID(i915);
int i;
for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
@@ -151,35 +156,35 @@ __uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
}
/* We don't want to enable GuC/HuC on pre-Gen11 by default */
- if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE)
+ if (i915->params.enable_guc == -1 && p < INTEL_ICELAKE)
uc_fw->path = NULL;
}
-static const char *__override_guc_firmware_path(void)
+static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
{
- if (i915_modparams.enable_guc & (ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
- return i915_modparams.guc_firmware_path;
+ if (i915->params.enable_guc & (ENABLE_GUC_SUBMISSION |
+ ENABLE_GUC_LOAD_HUC))
+ return i915->params.guc_firmware_path;
return "";
}
-static const char *__override_huc_firmware_path(void)
+static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
{
- if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC)
- return i915_modparams.huc_firmware_path;
+ if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
+ return i915->params.huc_firmware_path;
return "";
}
-static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
+static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
const char *path = NULL;
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_GUC:
- path = __override_guc_firmware_path();
+ path = __override_guc_firmware_path(i915);
break;
case INTEL_UC_FW_TYPE_HUC:
- path = __override_huc_firmware_path();
+ path = __override_huc_firmware_path(i915);
break;
}
@@ -213,10 +218,8 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
uc_fw->type = type;
if (HAS_GT_UC(i915)) {
- __uc_fw_auto_select(uc_fw,
- INTEL_INFO(i915)->platform,
- INTEL_REVID(i915));
- __uc_fw_user_override(uc_fw);
+ __uc_fw_auto_select(i915, uc_fw);
+ __uc_fw_user_override(i915, uc_fw);
}
intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 8b87f130f7f1..f1940939260a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1904,19 +1904,10 @@ static int perform_bb_shadow(struct parser_exec_state *s)
goto err_free_bb;
}
- ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
- if (ret)
- goto err_free_obj;
-
bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
if (IS_ERR(bb->va)) {
ret = PTR_ERR(bb->va);
- goto err_finish_shmem_access;
- }
-
- if (bb->clflush & CLFLUSH_BEFORE) {
- drm_clflush_virt_range(bb->va, bb->obj->base.size);
- bb->clflush &= ~CLFLUSH_BEFORE;
+ goto err_free_obj;
}
ret = copy_gma_to_hva(s->vgpu, mm,
@@ -1935,7 +1926,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
- bb->accessing = true;
bb->bb_start_cmd_va = s->ip_va;
if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
@@ -1956,8 +1946,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
return 0;
err_unmap:
i915_gem_object_unpin_map(bb->obj);
-err_finish_shmem_access:
- i915_gem_object_finish_access(bb->obj);
err_free_obj:
i915_gem_object_put(bb->obj);
err_free_bb:
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a1696e9ce4b6..7ba16ddfe75f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -199,8 +199,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) {
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
@@ -314,8 +316,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv)) &&
+ if ((IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
@@ -498,8 +502,10 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* TODO: add more platforms support */
- if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915)) {
+ if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;
@@ -527,8 +533,10 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -551,8 +559,10 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution);
else
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 37fc460414a8..c3eb3838fe88 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -198,6 +198,7 @@ static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
}
static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
+ .name = "i915_gem_object_vgpu",
.flags = I915_GEM_OBJECT_IS_PROXY,
.get_pages = vgpu_gem_get_pages,
.put_pages = vgpu_gem_put_pages,
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 190651df5db1..22247805c345 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -149,7 +149,7 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (IS_BROXTON(i915))
port = bxt_get_port_from_gmbus0(pin_select);
- else if (IS_COFFEELAKE(i915))
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 3e88e3b5c43a..26cae4846c82 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -59,7 +59,7 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_KBL;
else if (IS_BROXTON(i915))
return D_BXT;
- else if (IS_COFFEELAKE(i915))
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
return D_CFL;
return 0;
@@ -1435,7 +1435,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
case GEN9_PCODE_READ_MEM_LATENCY:
if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
IS_KABYLAKE(vgpu->gvt->gt->i915) ||
- IS_COFFEELAKE(vgpu->gvt->gt->i915)) {
+ IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
+ IS_COMETLAKE(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1460,7 +1461,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
IS_KABYLAKE(vgpu->gvt->gt->i915) ||
- IS_COFFEELAKE(vgpu->gvt->gt->i915))
+ IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
+ IS_COMETLAKE(vgpu->gvt->gt->i915))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -1722,7 +1724,8 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int ret;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
- if (IS_COFFEELAKE(vgpu->gvt->gt->i915))
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
+ IS_COMETLAKE(vgpu->gvt->gt->i915))
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
@@ -1731,7 +1734,8 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
- if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
+ if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
+ IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
data & _MASKED_BIT_ENABLE(2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
@@ -3393,7 +3397,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
} else if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915)) {
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 0fb1df71c637..3c3b9842bbbd 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -348,7 +348,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
u32 *cs;
int err;
- if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
+ if (IS_GEN(req->engine->i915, 9) && is_inhibit_context(req->context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/*
@@ -509,26 +509,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
+ bb->bb_offset;
- if (bb->ppgtt) {
- /* for non-priv bb, scan&shadow is only for
- * debugging purpose, so the content of shadow bb
- * is the same as original bb. Therefore,
- * here, rather than switch to shadow bb's gma
- * address, we directly use original batch buffer's
- * gma address, and send original bb to hardware
- * directly
- */
- if (bb->clflush & CLFLUSH_AFTER) {
- drm_clflush_virt_range(bb->va,
- bb->obj->base.size);
- bb->clflush &= ~CLFLUSH_AFTER;
- }
- i915_gem_object_finish_access(bb->obj);
- bb->accessing = false;
-
- } else {
+ /*
+ * For non-priv bb, scan&shadow is only for
+ * debugging purpose, so the content of shadow bb
+ * is the same as original bb. Therefore,
+ * here, rather than switch to shadow bb's gma
+ * address, we directly use original batch buffer's
+ * gma address, and send original bb to hardware
+ * directly
+ */
+ if (!bb->ppgtt) {
bb->vma = i915_gem_object_ggtt_pin(bb->obj,
- NULL, 0, 0, 0);
+ NULL, 0, 0, 0);
if (IS_ERR(bb->vma)) {
ret = PTR_ERR(bb->vma);
goto err;
@@ -539,27 +531,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
if (gmadr_bytes == 8)
bb->bb_start_cmd_va[2] = 0;
- /* No one is going to touch shadow bb from now on. */
- if (bb->clflush & CLFLUSH_AFTER) {
- drm_clflush_virt_range(bb->va,
- bb->obj->base.size);
- bb->clflush &= ~CLFLUSH_AFTER;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(bb->obj,
- false);
- if (ret)
- goto err;
-
ret = i915_vma_move_to_active(bb->vma,
workload->req,
0);
if (ret)
goto err;
-
- i915_gem_object_finish_access(bb->obj);
- bb->accessing = false;
}
+
+ /* No one is going to touch shadow bb from now on. */
+ i915_gem_object_flush_map(bb->obj);
}
return 0;
err:
@@ -630,9 +610,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
- if (bb->accessing)
- i915_gem_object_finish_access(bb->obj);
-
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
@@ -939,7 +916,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
+ if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
context_page_num = 19;
context_base = (void *) ctx->lrc_reg_state -
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 15d317f2a4a4..64e7a0b791c3 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -124,8 +124,6 @@ struct intel_vgpu_shadow_bb {
struct i915_vma *vma;
void *va;
u32 *bb_start_cmd_va;
- unsigned int clflush;
- bool accessing;
unsigned long bb_offset;
bool ppgtt;
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bca036ac6621..8594a8ef08ce 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -64,7 +64,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
- i915_params_dump(&i915_modparams, &p);
+ i915_params_dump(&i915->params, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 62b2c5f0495d..4e2b077692cb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -138,9 +138,6 @@ static ssize_t i915_param_charp_write(struct file *file,
char **s = m->private;
char *new, *old;
- /* FIXME: remove locking after params aren't the module params */
- kernel_param_lock(THIS_MODULE);
-
old = *s;
new = strndup_user(ubuf, PAGE_SIZE);
if (IS_ERR(new)) {
@@ -152,8 +149,6 @@ static ssize_t i915_param_charp_write(struct file *file,
kfree(old);
out:
- kernel_param_unlock(THIS_MODULE);
-
return len;
}
@@ -229,7 +224,7 @@ _i915_param_create_file(struct dentry *parent, const char *name,
struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
- struct i915_params *params = &i915_modparams;
+ struct i915_params *params = &i915->params;
struct dentry *dir;
dir = debugfs_create_dir("i915_params", minor->debugfs_root);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 34ee12f3f02d..67102dc26fce 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -501,6 +501,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
cpu_latency_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
+
+ i915_params_free(&dev_priv->params);
}
/**
@@ -915,6 +917,9 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
+ /* Device parameters start as a copy of module parameters. */
+ i915_params_copy(&i915->params, &i915_modparams);
+
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
memcpy(device_info, match_info, sizeof(*device_info));
@@ -948,7 +953,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
- if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
+ if (!i915->params.nuclear_pageflip && match_info->gen < 5)
i915->drm.driver_features &= ~DRIVER_ATOMIC;
/*
@@ -958,7 +963,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
- i915_modparams.fake_lmem_start) {
+ i915->params.fake_lmem_start) {
mkwrite_device_info(i915)->memory_regions =
REGION_SMEM | REGION_LMEM | REGION_STOLEN;
mkwrite_device_info(i915)->is_dgfx = true;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index adb9bf34cf97..9aad3ec979bd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -273,6 +273,7 @@ struct drm_i915_display_funcs {
void (*set_cdclk)(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe);
+ int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
@@ -829,6 +830,9 @@ struct drm_i915_private {
/* FIXME: Device release actions should all be moved to drmm_ */
bool do_release;
+ /* i915 device parameters */
+ struct i915_params params;
+
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
@@ -950,6 +954,13 @@ struct drm_i915_private {
struct intel_global_obj obj;
} cdclk;
+ struct {
+ /* The current hardware dbuf configuration */
+ u8 enabled_slices;
+
+ struct intel_global_obj obj;
+ } dbuf;
+
/**
* wq - Driver workqueue for GEM.
*
@@ -1126,12 +1137,12 @@ struct drm_i915_private {
* Set during HW readout of watermarks/DDB. Some platforms
* need to know when we're still using BIOS-provided values
* (which we don't fully trust).
+ *
+ * FIXME get rid of this.
*/
bool distrust_bios_wm;
} wm;
- u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
-
struct dram_info {
bool valid;
bool is_16gb_dimm;
@@ -1256,6 +1267,11 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
(engine__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
+#define for_each_uabi_class_engine(engine__, class__, i915__) \
+ for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
+ (engine__) && (engine__)->uabi_class == (class__); \
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
+
#define I915_GTT_OFFSET_NONE ((u32)-1)
/*
@@ -1406,10 +1422,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
+#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
+#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -1453,6 +1471,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
+
+#define IS_CML_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_CML_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \
+ INTEL_INFO(dev_priv)->gt == 2)
+
#define IS_CNL_WITH_PORT_F(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ICL_WITH_PORT_F(dev_priv) \
@@ -1523,6 +1549,13 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TGL_REVID(p, since, until) \
(IS_TIGERLAKE(p) && IS_REVID(p, since, until))
+#define RKL_REVID_A0 0x0
+#define RKL_REVID_B0 0x1
+#define RKL_REVID_C0 0x4
+
+#define IS_RKL_REVID(p, since, until) \
+ (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
@@ -1616,6 +1649,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
+#define HAS_PSR_HW_TRACKING(dev_priv) \
+ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
@@ -1658,7 +1693,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
/* Only valid when HAS_DISPLAY() is true */
-#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display)
+#define INTEL_DISPLAY_ENABLED(dev_priv) \
+ (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
static inline bool intel_vtd_active(void)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0cbcb9f54e7d..9aa3066cb75d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -933,6 +933,18 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
+static void discard_ggtt_vma(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ spin_lock(&obj->vma.lock);
+ if (!RB_EMPTY_NODE(&vma->obj_node)) {
+ rb_erase(&vma->obj_node, &obj->vma.tree);
+ RB_CLEAR_NODE(&vma->obj_node);
+ }
+ spin_unlock(&obj->vma.lock);
+}
+
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -979,6 +991,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
}
+new_vma:
vma = i915_vma_instance(obj, &ggtt->vm, view);
if (IS_ERR(vma))
return vma;
@@ -993,6 +1006,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
}
+ if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
+ discard_ggtt_vma(vma);
+ goto new_vma;
+ }
+
ret = i915_vma_unbind(vma);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index f6226df9f972..c9b0ee5e1d23 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,7 +42,6 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
-#define PIN_UPDATE BIT_ULL(9)
#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index d042644b9cd2..40390b2352b1 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -80,7 +80,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_HAS_GPU_RESET:
- value = i915_modparams.enable_hangcheck &&
+ value = i915->params.enable_hangcheck &&
intel_has_gpu_reset(&i915->gt);
if (value && intel_has_reset_engine(&i915->gt))
value = 2;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index eec292d06f11..866166ada10e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1698,7 +1698,7 @@ static void capture_gen(struct i915_gpu_coredump *error)
error->reset_count = i915_reset_count(&i915->gpu_error);
error->suspend_count = i915->suspend_count;
- i915_params_copy(&error->params, &i915_modparams);
+ i915_params_copy(&error->params, &i915->params);
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
@@ -1713,7 +1713,7 @@ i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
{
struct i915_gpu_coredump *error;
- if (!i915_modparams.error_capture)
+ if (!i915->params.error_capture)
return NULL;
error = kzalloc(sizeof(*error), gfp);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 284cf078135a..710224d930c5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -777,7 +777,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
mode = &vblank->hwmode;
- if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
+ if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
return __intel_get_crtc_scanline_from_timestamp(crtc);
vtotal = mode->crtc_vtotal;
@@ -836,7 +836,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
unsigned long irqflags;
bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
- mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
+ crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
drm_dbg(&dev_priv->drm,
@@ -2097,67 +2097,68 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
*/
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
- struct drm_i915_private *dev_priv = arg;
+ struct drm_i915_private *i915 = arg;
+ void __iomem * const regs = i915->uncore.regs;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
- if (!intel_irqs_enabled(dev_priv))
+ if (unlikely(!intel_irqs_enabled(i915)))
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
/* disable master interrupt before clearing iir */
- de_ier = I915_READ(DEIER);
- I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ de_ier = raw_reg_read(regs, DEIER);
+ raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
- if (!HAS_PCH_NOP(dev_priv)) {
- sde_ier = I915_READ(SDEIER);
- I915_WRITE(SDEIER, 0);
+ if (!HAS_PCH_NOP(i915)) {
+ sde_ier = raw_reg_read(regs, SDEIER);
+ raw_reg_write(regs, SDEIER, 0);
}
/* Find, clear, then process each source of interrupt */
- gt_iir = I915_READ(GTIIR);
+ gt_iir = raw_reg_read(regs, GTIIR);
if (gt_iir) {
- I915_WRITE(GTIIR, gt_iir);
- ret = IRQ_HANDLED;
- if (INTEL_GEN(dev_priv) >= 6)
- gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
+ raw_reg_write(regs, GTIIR, gt_iir);
+ if (INTEL_GEN(i915) >= 6)
+ gen6_gt_irq_handler(&i915->gt, gt_iir);
else
- gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
+ gen5_gt_irq_handler(&i915->gt, gt_iir);
+ ret = IRQ_HANDLED;
}
- de_iir = I915_READ(DEIIR);
+ de_iir = raw_reg_read(regs, DEIIR);
if (de_iir) {
- I915_WRITE(DEIIR, de_iir);
- ret = IRQ_HANDLED;
- if (INTEL_GEN(dev_priv) >= 7)
- ivb_display_irq_handler(dev_priv, de_iir);
+ raw_reg_write(regs, DEIIR, de_iir);
+ if (INTEL_GEN(i915) >= 7)
+ ivb_display_irq_handler(i915, de_iir);
else
- ilk_display_irq_handler(dev_priv, de_iir);
+ ilk_display_irq_handler(i915, de_iir);
+ ret = IRQ_HANDLED;
}
- if (INTEL_GEN(dev_priv) >= 6) {
- u32 pm_iir = I915_READ(GEN6_PMIIR);
+ if (INTEL_GEN(i915) >= 6) {
+ u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
if (pm_iir) {
- I915_WRITE(GEN6_PMIIR, pm_iir);
+ raw_reg_write(regs, GEN6_PMIIR, pm_iir);
+ gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
ret = IRQ_HANDLED;
- gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
}
}
- I915_WRITE(DEIER, de_ier);
- if (!HAS_PCH_NOP(dev_priv))
- I915_WRITE(SDEIER, sde_ier);
+ raw_reg_write(regs, DEIER, de_ier);
+ if (sde_ier)
+ raw_reg_write(regs, SDEIER, sde_ier);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
return ret;
}
@@ -2254,7 +2255,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
+ if (IS_ROCKETLAKE(dev_priv))
+ return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
+ else if (INTEL_GEN(dev_priv) >= 11)
return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
else if (INTEL_GEN(dev_priv) >= 9)
return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2869,13 +2872,15 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
+ u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
if (INTEL_GEN(dev_priv) >= 12) {
enum transcoder trans;
- for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2902,8 +2907,10 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
- /* Wa_14010685332:icl */
- if (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) {
+ /* Wa_14010685332:icl,jsl,ehl */
+ if (INTEL_PCH_TYPE(dev_priv) == PCH_ICP ||
+ INTEL_PCH_TYPE(dev_priv) == PCH_JSP ||
+ INTEL_PCH_TYPE(dev_priv) == PCH_MCC) {
intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
@@ -3396,6 +3403,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
+ u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
if (INTEL_GEN(dev_priv) <= 10)
@@ -3416,7 +3425,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 12) {
enum transcoder trans;
- for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 02559da61e6e..a7b61e6ec508 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -40,6 +40,15 @@ struct i915_params i915_modparams __read_mostly = {
#undef MEMBER
};
+/*
+ * Note: As a rule, keep module parameter sysfs permissions read-only
+ * 0400. Runtime changes are only supported through i915 debugfs.
+ *
+ * For any exceptions requiring write access and runtime changes through module
+ * parameter sysfs, prevent debugfs file creation by setting the parameter's
+ * debugfs mode to 0.
+ */
+
i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
@@ -49,7 +58,7 @@ i915_param_named_unsafe(enable_dc, int, 0400,
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
"3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
-i915_param_named_unsafe(enable_fbc, int, 0600,
+i915_param_named_unsafe(enable_fbc, int, 0400,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
@@ -57,7 +66,7 @@ i915_param_named_unsafe(lvds_channel_mode, int, 0400,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-i915_param_named_unsafe(panel_use_ssc, int, 0600,
+i915_param_named_unsafe(panel_use_ssc, int, 0400,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
@@ -65,29 +74,34 @@ i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-i915_param_named_unsafe(reset, uint, 0600,
+i915_param_named_unsafe(reset, uint, 0400,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
i915_param_named_unsafe(vbt_firmware, charp, 0400,
"Load VBT from specified file under /lib/firmware");
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-i915_param_named(error_capture, bool, 0600,
+i915_param_named(error_capture, bool, 0400,
"Record the GPU state following a hang. "
"This information in /sys/class/drm/card<N>/error is vital for "
"triaging and debugging hangs.");
#endif
-i915_param_named_unsafe(enable_hangcheck, bool, 0600,
+i915_param_named_unsafe(enable_hangcheck, bool, 0400,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-i915_param_named_unsafe(enable_psr, int, 0600,
+i915_param_named_unsafe(enable_psr, int, 0400,
"Enable PSR "
"(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
+i915_param_named(psr_safest_params, bool, 0400,
+ "Replace PSR VBT parameters by the safest and not optimal ones. This "
+ "is helpfull to detect if PSR issues are related to bad values set in "
+ " VBT. (0=use VBT paramters, 1=use safest parameters)");
+
i915_param_named_unsafe(force_probe, charp, 0400,
"Force probe the driver for specified devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
@@ -96,22 +110,22 @@ i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
-i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)");
+i915_param_named_unsafe(enable_ips, int, 0400, "Enable IPS (default: true)");
-i915_param_named(fastboot, int, 0600,
+i915_param_named(fastboot, int, 0400,
"Try to skip unnecessary mode sets at boot time "
"(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
-i915_param_named_unsafe(load_detect_test, bool, 0600,
+i915_param_named_unsafe(load_detect_test, bool, 0400,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
-i915_param_named_unsafe(force_reset_modeset_test, bool, 0600,
+i915_param_named_unsafe(force_reset_modeset_test, bool, 0400,
"Force a modeset during gpu reset for testing (default:false). "
"For developers only.");
-i915_param_named_unsafe(invert_brightness, int, 0600,
+i915_param_named_unsafe(invert_brightness, int, 0400,
"Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
@@ -121,10 +135,11 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
-i915_param_named(mmio_debug, int, 0600,
+i915_param_named(mmio_debug, int, 0400,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
+/* Special case writable file */
i915_param_named(verbose_state_checks, bool, 0600,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
@@ -155,7 +170,7 @@ i915_param_named_unsafe(huc_firmware_path, charp, 0400,
i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
"DMC firmware path to use instead of the default one");
-i915_param_named_unsafe(enable_dp_mst, bool, 0600,
+i915_param_named_unsafe(enable_dp_mst, bool, 0400,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
@@ -163,7 +178,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
-i915_param_named(enable_dpcd_backlight, int, 0600,
+i915_param_named(enable_dpcd_backlight, int, 0400,
"Enable support for DPCD backlight control"
"(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 4f21bfffbf0e..53fb5ba8fbed 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -53,6 +53,7 @@ struct drm_printer;
param(int, enable_dc, -1, 0400) \
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
+ param(bool, psr_safest_params, false, 0600) \
param(int, disable_power_well, -1, 0400) \
param(int, enable_ips, 1, 0600) \
param(int, invert_brightness, 0, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index eb0b5be7c35d..e5fdf17cd9cd 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -536,6 +536,7 @@ static const struct intel_device_info vlv_info = {
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_psr = 1, \
+ .display.has_psr_hw_tracking = 1, \
.display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
HSW_PIPE_OFFSETS, \
@@ -690,6 +691,7 @@ static const struct intel_device_info skl_gt4_info = {
.display.has_fbc = 1, \
.display.has_hdcp = 1, \
.display.has_psr = 1, \
+ .display.has_psr_hw_tracking = 1, \
.has_runtime_pm = 1, \
.display.has_csr = 1, \
.has_rc6 = 1, \
@@ -766,6 +768,20 @@ static const struct intel_device_info cfl_gt3_info = {
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
+#define CML_PLATFORM \
+ GEN9_FEATURES, \
+ PLATFORM(INTEL_COMETLAKE)
+
+static const struct intel_device_info cml_gt1_info = {
+ CML_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info cml_gt2_info = {
+ CML_PLATFORM,
+ .gt = 2,
+};
+
#define GEN10_FEATURES \
GEN9_FEATURES, \
GEN(10), \
@@ -788,6 +804,7 @@ static const struct intel_device_info cnl_info = {
#define GEN11_FEATURES \
GEN10_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
+ .abox_mask = BIT(0), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
@@ -831,6 +848,7 @@ static const struct intel_device_info ehl_info = {
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
+ .abox_mask = GENMASK(2, 1), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
@@ -863,6 +881,19 @@ static const struct intel_device_info tgl_info = {
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
+static const struct intel_device_info rkl_info = {
+ GEN12_FEATURES,
+ PLATFORM(INTEL_ROCKETLAKE),
+ .abox_mask = BIT(0),
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C),
+ .require_force_probe = 1,
+ .display.has_psr_hw_tracking = 0,
+ .engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
+};
+
#define GEN12_DGFX_FEATURES \
GEN12_FEATURES, \
.is_dgfx = 1
@@ -933,14 +964,15 @@ static const struct pci_device_id pciidlist[] = {
INTEL_WHL_U_GT2_IDS(&cfl_gt2_info),
INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info),
INTEL_WHL_U_GT3_IDS(&cfl_gt3_info),
- INTEL_CML_GT1_IDS(&cfl_gt1_info),
- INTEL_CML_GT2_IDS(&cfl_gt2_info),
- INTEL_CML_U_GT1_IDS(&cfl_gt1_info),
- INTEL_CML_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_CML_GT1_IDS(&cml_gt1_info),
+ INTEL_CML_GT2_IDS(&cml_gt2_info),
+ INTEL_CML_U_GT1_IDS(&cml_gt1_info),
+ INTEL_CML_U_GT2_IDS(&cml_gt2_info),
INTEL_CNL_IDS(&cnl_info),
INTEL_ICL_11_IDS(&icl_info),
INTEL_EHL_IDS(&ehl_info),
INTEL_TGL_12_IDS(&tgl_info),
+ INTEL_RKL_IDS(&rkl_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 962ded9ce73f..28bc5f13ae52 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -441,7 +441,11 @@ static u64 count_interrupts(struct drm_i915_private *i915)
static void i915_pmu_event_destroy(struct perf_event *event)
{
- WARN_ON(event->parent);
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+
+ drm_WARN_ON(&i915->drm, event->parent);
+
module_put(THIS_MODULE);
}
@@ -561,7 +565,10 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
/* Do nothing */
} else if (sample == I915_SAMPLE_BUSY &&
intel_engine_supports_stats(engine)) {
- val = ktime_to_ns(intel_engine_get_busy_time(engine));
+ ktime_t unused;
+
+ val = ktime_to_ns(intel_engine_get_busy_time(engine,
+ &unused));
} else {
val = engine->pmu.sample[sample].cur;
}
@@ -1058,8 +1065,10 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
- WARN_ON(pmu->cpuhp.slot == CPUHP_INVALID);
- WARN_ON(cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+
+ drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID);
+ drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
cpuhp_remove_multi_state(pmu->cpuhp.slot);
pmu->cpuhp.slot = CPUHP_INVALID;
}
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index e75c528ebbe0..c1ebda9b5627 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -109,8 +109,7 @@ query_engine_info(struct drm_i915_private *i915,
for_each_uabi_engine(engine, i915)
num_uabi_engines++;
- len = sizeof(struct drm_i915_query_engine_info) +
- num_uabi_engines * sizeof(struct drm_i915_engine_info);
+ len = struct_size(query_ptr, engines, num_uabi_engines);
ret = copy_query_item(&query, sizeof(query), len, query_item);
if (ret != 0)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 06cd1d28a176..f09120cac89a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1869,9 +1869,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _ICL_COMBOPHY_A 0x162000
#define _ICL_COMBOPHY_B 0x6C000
#define _EHL_COMBOPHY_C 0x160000
+#define _RKL_COMBOPHY_D 0x161000
#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \
_ICL_COMBOPHY_B, \
- _EHL_COMBOPHY_C)
+ _EHL_COMBOPHY_C, \
+ _RKL_COMBOPHY_D)
/* CNL/ICL Port CL_DW registers */
#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
@@ -2877,9 +2879,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
-#define MBUS_ABOX_CTL _MMIO(0x45038)
-#define MBUS_ABOX1_CTL _MMIO(0x45048)
-#define MBUS_ABOX2_CTL _MMIO(0x4504C)
+#define _MBUS_ABOX0_CTL 0x45038
+#define _MBUS_ABOX1_CTL 0x45048
+#define _MBUS_ABOX2_CTL 0x4504C
+#define MBUS_ABOX_CTL(x) _MMIO(_PICK(x, _MBUS_ABOX0_CTL, \
+ _MBUS_ABOX1_CTL, \
+ _MBUS_ABOX2_CTL))
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
@@ -3768,19 +3773,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Clocking configuration register */
#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
-#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_400_ALT (5 << 0) /* hrawclk 100 */
#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
-/*
- * Note that on at least on ELK the below value is reported for both
- * 333 and 400 MHz BIOS FSB setting, but given that the gmch datasheet
- * lists only 200/266/333 MHz FSB as supported let's decode it as 333 MHz.
- */
#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
+#define CLKCFG_FSB_1600_ALT (6 << 0) /* hrawclk 400 */
#define CLKCFG_FSB_MASK (7 << 0)
#define CLKCFG_MEM_533 (1 << 4)
#define CLKCFG_MEM_667 (2 << 4)
@@ -4512,25 +4514,39 @@ enum {
#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
-#define _PSR2_CTL_A 0x60900
-#define _PSR2_CTL_EDP 0x6f900
-#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
-#define EDP_PSR2_ENABLE (1 << 31)
-#define EDP_SU_TRACK_ENABLE (1 << 30)
-#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
-#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */
-#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
-#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
-#define EDP_PSR2_TP2_TIME_500us (0 << 8)
-#define EDP_PSR2_TP2_TIME_100us (1 << 8)
-#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
-#define EDP_PSR2_TP2_TIME_50us (3 << 8)
-#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
-#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
-#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
-#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
-#define EDP_PSR2_IDLE_FRAME_MASK 0xf
-#define EDP_PSR2_IDLE_FRAME_SHIFT 0
+#define _PSR2_CTL_A 0x60900
+#define _PSR2_CTL_EDP 0x6f900
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
+#define EDP_PSR2_ENABLE (1 << 31)
+#define EDP_SU_TRACK_ENABLE (1 << 30)
+#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28)
+#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28)
+#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
+#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */
+#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
+#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8
+#define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13)
+#define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13)
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << 13)
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13)
+#define EDP_PSR2_FAST_WAKE_MAX_LINES 8
+#define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11)
+#define EDP_PSR2_FAST_WAKE_MASK (3 << 11)
+#define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5
+#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << 10)
+#define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10)
+#define EDP_PSR2_TP2_TIME_500us (0 << 8)
+#define EDP_PSR2_TP2_TIME_100us (1 << 8)
+#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
+#define EDP_PSR2_TP2_TIME_50us (3 << 8)
+#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
+#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
+#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
+#define EDP_PSR2_IDLE_FRAME_MASK 0xf
+#define EDP_PSR2_IDLE_FRAME_SHIFT 0
#define _PSR_EVENT_TRANS_A 0x60848
#define _PSR_EVENT_TRANS_B 0x61848
@@ -6915,6 +6931,8 @@ enum {
#define _PLANE_CUS_CTL_1_A 0x701c8
#define _PLANE_CUS_CTL_2_A 0x702c8
#define PLANE_CUS_ENABLE (1 << 31)
+#define PLANE_CUS_PLANE_4_RKL (0 << 30)
+#define PLANE_CUS_PLANE_5_RKL (1 << 30)
#define PLANE_CUS_PLANE_6 (0 << 30)
#define PLANE_CUS_PLANE_7 (1 << 30)
#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19)
@@ -6933,7 +6951,7 @@ enum {
#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
-#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17)
+#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 (1 << 17)
#define PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709 (2 << 17)
#define PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020 (3 << 17)
#define PLANE_COLOR_CSC_MODE_RGB709_TO_RGB2020 (4 << 17)
@@ -7581,6 +7599,9 @@ enum {
GEN11_PIPE_PLANE7_FAULT | \
GEN11_PIPE_PLANE6_FAULT | \
GEN11_PIPE_PLANE5_FAULT)
+#define RKL_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
+ GEN11_PIPE_PLANE5_FAULT)
#define GEN8_DE_PORT_ISR _MMIO(0x44440)
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
@@ -7835,13 +7856,20 @@ enum {
#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
-#define BW_BUDDY1_CTL _MMIO(0x45140)
-#define BW_BUDDY2_CTL _MMIO(0x45150)
+#define _BW_BUDDY0_CTL 0x45130
+#define _BW_BUDDY1_CTL 0x45140
+#define BW_BUDDY_CTL(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_CTL, \
+ _BW_BUDDY1_CTL))
#define BW_BUDDY_DISABLE REG_BIT(31)
#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
+#define BW_BUDDY_TLB_REQ_TIMER(x) REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
-#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
-#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)
+#define _BW_BUDDY0_PAGE_MASK 0x45134
+#define _BW_BUDDY1_PAGE_MASK 0x45144
+#define BW_BUDDY_PAGE_MASK(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_PAGE_MASK, \
+ _BW_BUDDY1_PAGE_MASK))
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
@@ -8002,6 +8030,8 @@ enum {
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
#define FF_MODE2 _MMIO(0x6604)
+#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
+#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index def62100e666..3bb7320249ae 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -42,7 +42,6 @@
#include "intel_pm.h"
struct execute_cb {
- struct list_head link;
struct irq_work work;
struct i915_sw_fence *fence;
void (*hook)(struct i915_request *rq, struct dma_fence *signal);
@@ -57,7 +56,7 @@ static struct i915_global_request {
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
- return dev_name(to_request(fence)->i915->drm.dev);
+ return dev_name(to_request(fence)->engine->i915->drm.dev);
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
@@ -189,14 +188,15 @@ static void irq_execute_cb_hook(struct irq_work *wrk)
static void __notify_execute_cb(struct i915_request *rq)
{
- struct execute_cb *cb;
+ struct execute_cb *cb, *cn;
lockdep_assert_held(&rq->lock);
- if (list_empty(&rq->execute_cb))
+ GEM_BUG_ON(!i915_request_is_active(rq));
+ if (llist_empty(&rq->execute_cb))
return;
- list_for_each_entry(cb, &rq->execute_cb, link)
+ llist_for_each_entry_safe(cb, cn, rq->execute_cb.first, work.llnode)
irq_work_queue(&cb->work);
/*
@@ -209,7 +209,7 @@ static void __notify_execute_cb(struct i915_request *rq)
* preempt-to-idle cycle on the target engine, all the while the
* master execute_cb may refire.
*/
- INIT_LIST_HEAD(&rq->execute_cb);
+ init_llist_head(&rq->execute_cb);
}
static inline void
@@ -327,7 +327,7 @@ bool i915_request_retire(struct i915_request *rq)
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
__notify_execute_cb(rq);
}
- GEM_BUG_ON(!list_empty(&rq->execute_cb));
+ GEM_BUG_ON(!llist_empty(&rq->execute_cb));
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
@@ -357,6 +357,12 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (i915_request_retire(tmp) && tmp != rq);
}
+static void __llist_add(struct llist_node *node, struct llist_head *head)
+{
+ node->next = head->first;
+ head->first = node;
+}
+
static struct i915_request * const *
__engine_active(struct intel_engine_cs *engine)
{
@@ -442,7 +448,7 @@ __await_execution(struct i915_request *rq,
i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb);
} else {
- list_add_tail(&cb->link, &signal->execute_cb);
+ __llist_add(&cb->work.llnode, &signal->execute_cb);
}
spin_unlock_irq(&signal->lock);
@@ -560,15 +566,15 @@ xfer: /* We may be recursing from the signal callback of another i915 fence */
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
list_move_tail(&request->sched.link, &engine->active.requests);
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+ __notify_execute_cb(request);
}
+ GEM_BUG_ON(!llist_empty(&request->execute_cb));
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_signal_breadcrumbs(engine);
- __notify_execute_cb(request);
-
spin_unlock(&request->lock);
return result;
@@ -751,7 +757,7 @@ static void __i915_request_ctor(void *arg)
rq->file_priv = NULL;
rq->capture_list = NULL;
- INIT_LIST_HEAD(&rq->execute_cb);
+ init_llist_head(&rq->execute_cb);
}
struct i915_request *
@@ -806,7 +812,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
}
}
- rq->i915 = ce->engine->i915;
rq->context = ce;
rq->engine = ce->engine;
rq->ring = ce->ring;
@@ -841,7 +846,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->batch = NULL;
GEM_BUG_ON(rq->file_priv);
GEM_BUG_ON(rq->capture_list);
- GEM_BUG_ON(!list_empty(&rq->execute_cb));
+ GEM_BUG_ON(!llist_empty(&rq->execute_cb));
/*
* Reserve space in the ring buffer for all the commands required to
@@ -1005,12 +1010,12 @@ __emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
u32 seqno)
{
- const int has_token = INTEL_GEN(to->i915) >= 12;
+ const int has_token = INTEL_GEN(to->engine->i915) >= 12;
u32 hwsp_offset;
int len, err;
u32 *cs;
- GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+ GEM_BUG_ON(INTEL_GEN(to->engine->i915) < 8);
GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
/* We need to pin the signaler's HWSP until we are finished reading. */
@@ -1205,7 +1210,7 @@ __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
{
mark_external(rq);
return i915_sw_fence_await_dma_fence(&rq->submit, fence,
- i915_fence_context_timeout(rq->i915,
+ i915_fence_context_timeout(rq->engine->i915,
fence->context),
I915_FENCE_GFP);
}
@@ -1776,7 +1781,8 @@ long i915_request_wait(struct i915_request *rq,
* (bad for battery).
*/
if (flags & I915_WAIT_PRIORITY) {
- if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
+ if (!i915_request_started(rq) &&
+ INTEL_GEN(rq->engine->i915) >= 6)
intel_rps_boost(rq);
}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 8ec7ee4dbadc..590762820761 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -162,9 +162,6 @@ struct i915_request {
struct dma_fence fence;
spinlock_t lock;
- /** On Which ring this request was generated */
- struct drm_i915_private *i915;
-
/**
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
@@ -214,7 +211,7 @@ struct i915_request {
ktime_t emitted;
} duration;
};
- struct list_head execute_cb;
+ struct llist_head execute_cb;
struct i915_sw_fence semaphore;
/*
@@ -564,7 +561,7 @@ static inline void i915_request_clear_hold(struct i915_request *rq)
}
static inline struct intel_timeline *
-i915_request_timeline(struct i915_request *rq)
+i915_request_timeline(const struct i915_request *rq)
{
/* Valid only while the request is being constructed (or retired). */
return rcu_dereference_protected(rq->timeline,
@@ -572,14 +569,14 @@ i915_request_timeline(struct i915_request *rq)
}
static inline struct i915_gem_context *
-i915_request_gem_context(struct i915_request *rq)
+i915_request_gem_context(const struct i915_request *rq)
{
/* Valid only while the request is being constructed (or retired). */
return rcu_dereference_protected(rq->context->gem_context, true);
}
static inline struct intel_timeline *
-i915_request_active_timeline(struct i915_request *rq)
+i915_request_active_timeline(const struct i915_request *rq)
{
/*
* When in use during submission, we are protected by a guarantee that
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index bc854ad60954..a4addcc64978 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -735,7 +735,7 @@ TRACE_EVENT(i915_request_queue,
),
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -761,7 +761,7 @@ DECLARE_EVENT_CLASS(i915_request,
),
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -804,7 +804,7 @@ TRACE_EVENT(i915_request_in,
),
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -833,7 +833,7 @@ TRACE_EVENT(i915_request_out,
),
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -895,7 +895,7 @@ TRACE_EVENT(i915_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index e28eae4a8f70..f42a9e9a0b4f 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -91,7 +91,7 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout)
return;
}
- timeout = msecs_to_jiffies_timeout(timeout);
+ timeout = msecs_to_jiffies(timeout);
/*
* Paranoia to make sure the compiler computes the timeout before
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index fc14ebf9a0b7..1f63c4a1f055 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -397,17 +397,15 @@ int i915_vma_bind(struct i915_vma *vma,
vma_flags = atomic_read(&vma->flags);
vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- if (flags & PIN_UPDATE)
- bind_flags |= vma_flags;
- else
- bind_flags &= ~vma_flags;
+
+ bind_flags &= ~vma_flags;
if (bind_flags == 0)
return 0;
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
- if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+ if (work && bind_flags & vma->vm->bind_async_flags) {
struct dma_fence *prev;
work->vma = vma;
@@ -868,7 +866,6 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
- GEM_BUG_ON(flags & PIN_UPDATE);
GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
/* First try and grab the pin without rebinding the vma */
@@ -1090,7 +1087,8 @@ void i915_vma_release(struct kref *ref)
spin_lock(&obj->vma.lock);
list_del(&vma->obj_link);
- rb_erase(&vma->obj_node, &obj->vma.tree);
+ if (!RB_EMPTY_NODE(&vma->obj_node))
+ rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
}
@@ -1232,31 +1230,9 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return 0;
}
-int __i915_vma_unbind(struct i915_vma *vma)
+void __i915_vma_evict(struct i915_vma *vma)
{
- int ret;
-
- lockdep_assert_held(&vma->vm->mutex);
-
- if (i915_vma_is_pinned(vma)) {
- vma_print_allocator(vma, "is pinned");
- return -EAGAIN;
- }
-
- /*
- * After confirming that no one else is pinning this vma, wait for
- * any laggards who may have crept in during the wait (through
- * a residual pin skipping the vm->mutex) to complete.
- */
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
-
- if (!drm_mm_node_allocated(&vma->node))
- return 0;
-
GEM_BUG_ON(i915_vma_is_pinned(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_map_and_fenceable(vma)) {
/* Force a pagefault for domain tracking on next user access */
@@ -1295,6 +1271,33 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_detach(vma);
vma_unbind_pages(vma);
+}
+
+int __i915_vma_unbind(struct i915_vma *vma)
+{
+ int ret;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
+ /*
+ * After confirming that no one else is pinning this vma, wait for
+ * any laggards who may have crept in during the wait (through
+ * a residual pin skipping the vm->mutex) to complete.
+ */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ __i915_vma_evict(vma);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
@@ -1306,13 +1309,13 @@ int i915_vma_unbind(struct i915_vma *vma)
intel_wakeref_t wakeref = 0;
int err;
- if (!drm_mm_node_allocated(&vma->node))
- return 0;
-
/* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma);
if (err)
- goto out_rpm;
+ return err;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8ad1daabcd58..d0d01f909548 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -203,6 +203,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void __i915_vma_evict(struct i915_vma *vma);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 8a635bd4d5d8..544ac61fbc36 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -57,10 +57,12 @@ static const char * const platform_names[] = {
PLATFORM_NAME(KABYLAKE),
PLATFORM_NAME(GEMINILAKE),
PLATFORM_NAME(COFFEELAKE),
+ PLATFORM_NAME(COMETLAKE),
PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
PLATFORM_NAME(ELKHARTLAKE),
PLATFORM_NAME(TIGERLAKE),
+ PLATFORM_NAME(ROCKETLAKE),
};
#undef PLATFORM_NAME
@@ -933,7 +935,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (IS_ROCKETLAKE(dev_priv))
+ for_each_pipe(dev_priv, pipe)
+ runtime->num_sprites[pipe] = 4;
+ else if (INTEL_GEN(dev_priv) >= 11)
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 6;
else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 62e03ffa377e..8d62b8538585 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -73,6 +73,7 @@ enum intel_platform {
INTEL_KABYLAKE,
INTEL_GEMINILAKE,
INTEL_COFFEELAKE,
+ INTEL_COMETLAKE,
/* gen10 */
INTEL_CANNONLAKE,
/* gen11 */
@@ -80,6 +81,7 @@ enum intel_platform {
INTEL_ELKHARTLAKE,
/* gen12 */
INTEL_TIGERLAKE,
+ INTEL_ROCKETLAKE,
INTEL_MAX_PLATFORMS
};
@@ -146,6 +148,7 @@ enum intel_ppgtt_type {
func(has_modular_fia); \
func(has_overlay); \
func(has_psr); \
+ func(has_psr_hw_tracking); \
func(overlay_needs_physical); \
func(supports_tv);
@@ -172,6 +175,8 @@ struct intel_device_info {
u8 pipe_mask;
u8 cpu_transcoder_mask;
+ u8 abox_mask;
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 21b91313cc5d..99fe8aef1c67 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -52,6 +52,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_COFFEELAKE(dev_priv))
return true;
+ if (IS_COMETLAKE(dev_priv))
+ return true;
return false;
}
@@ -64,7 +66,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
*/
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
{
- if (!i915_modparams.enable_gvt)
+ if (!dev_priv->params.enable_gvt)
return;
if (intel_vgpu_active(dev_priv)) {
@@ -80,7 +82,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
return;
bail:
- i915_modparams.enable_gvt = 0;
+ dev_priv->params.enable_gvt = 0;
}
/**
@@ -100,7 +102,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- if (!i915_modparams.enable_gvt) {
+ if (!dev_priv->params.enable_gvt) {
drm_dbg(&dev_priv->drm,
"GVT-g is disabled by kernel params\n");
return 0;
@@ -121,7 +123,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
bail:
- i915_modparams.enable_gvt = 0;
+ dev_priv->params.enable_gvt = 0;
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 20ab9a5023b5..c668e99eb2e4 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -64,36 +64,49 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ !IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ !IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
/* KBP is SPT compatible */
return PCH_SPT;
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
- drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm,
"Found Cannon Lake LP PCH (CNP-LP)\n");
- drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
- drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv) &&
+ !IS_ROCKETLAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
- drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
@@ -107,7 +120,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
+ !IS_ROCKETLAKE(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
@@ -141,13 +155,15 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_TIGERLAKE(dev_priv))
+ if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
else if (IS_ELKHARTLAKE(dev_priv))
id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
else if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
- else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ else if (IS_CANNONLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 07f663cd2d1c..2a32d6230795 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -33,6 +33,7 @@
#include <drm/drm_plane_helper.h>
#include "display/intel_atomic.h"
+#include "display/intel_bw.h"
#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
@@ -43,7 +44,6 @@
#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
-#include "display/intel_bw.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
@@ -1437,6 +1437,7 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
struct intel_atomic_state *intel_state =
@@ -1465,8 +1466,8 @@ static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
max(optimal->wm.plane[plane_id],
active->wm.plane[plane_id]);
- WARN_ON(intermediate->wm.plane[plane_id] >
- g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
+ drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
}
intermediate->sr.plane = max(optimal->sr.plane,
@@ -1483,21 +1484,25 @@ static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
intermediate->hpll.fbc = max(optimal->hpll.fbc,
active->hpll.fbc);
- WARN_ON((intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
- intermediate->cxsr);
- WARN_ON((intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
- intermediate->hpll_en);
-
- WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
- intermediate->fbc_en && intermediate->cxsr);
- WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
- intermediate->fbc_en && intermediate->hpll_en);
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
+ intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
+ intermediate->hpll_en);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
+ intermediate->fbc_en && intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
+ intermediate->fbc_en && intermediate->hpll_en);
out:
/*
@@ -1681,6 +1686,7 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1749,11 +1755,11 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
fifo_left -= plane_extra;
}
- WARN_ON(active_planes != 0 && fifo_left != 0);
+ drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
/* give it all to the first plane if none are active */
if (active_planes == 0) {
- WARN_ON(fifo_left != fifo_size);
+ drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
fifo_state->plane[PLANE_PRIMARY] = fifo_left;
}
@@ -4025,10 +4031,9 @@ icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
return offset;
}
-static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
+u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
{
u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
-
drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
if (INTEL_GEN(dev_priv) < 11)
@@ -4037,10 +4042,38 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
return ddb_size;
}
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
+ const struct skl_ddb_entry *entry)
+{
+ u32 slice_mask = 0;
+ u16 ddb_size = intel_get_ddb_size(dev_priv);
+ u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ u16 slice_size = ddb_size / num_supported_slices;
+ u16 start_slice;
+ u16 end_slice;
+
+ if (!skl_ddb_entry_size(entry))
+ return 0;
+
+ start_slice = entry->start / slice_size;
+ end_slice = (entry->end - 1) / slice_size;
+
+ /*
+ * Per plane DDB entry can in a really worst case be on multiple slices
+ * but single entry is anyway contigious.
+ */
+ while (start_slice <= end_slice) {
+ slice_mask |= BIT(start_slice);
+ start_slice++;
+ }
+
+ return slice_mask;
+}
+
static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
u8 active_pipes);
-static void
+static int
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
@@ -4053,30 +4086,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc *crtc;
u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(intel_state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(intel_state);
+ u8 active_pipes = new_dbuf_state->active_pipes;
u16 ddb_size;
u32 ddb_range_size;
u32 i;
u32 dbuf_slice_mask;
- u32 active_pipes;
u32 offset;
u32 slice_size;
u32 total_slice_mask;
u32 start, end;
+ int ret;
+
+ *num_active = hweight8(active_pipes);
- if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
+ if (!crtc_state->hw.active) {
alloc->start = 0;
alloc->end = 0;
- *num_active = hweight8(dev_priv->active_pipes);
- return;
+ return 0;
}
- if (intel_state->active_pipe_changes)
- active_pipes = intel_state->active_pipes;
- else
- active_pipes = dev_priv->active_pipes;
-
- *num_active = hweight8(active_pipes);
-
ddb_size = intel_get_ddb_size(dev_priv);
slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
@@ -4089,13 +4121,16 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* that changes the active CRTC list or do modeset would need to
* grab _all_ crtc locks, including the one we currently hold.
*/
- if (!intel_state->active_pipe_changes && !intel_state->modeset) {
+ if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
+ !dev_priv->wm.distrust_bios_wm) {
/*
* alloc may be cleared by clear_intel_crtc_state,
* copy from old state to be sure
+ *
+ * FIXME get rid of this mess
*/
*alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
- return;
+ return 0;
}
/*
@@ -4103,10 +4138,6 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
*/
dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
- DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
- dbuf_slice_mask,
- pipe_name(for_pipe), active_pipes);
-
/*
* Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
* and slice size is 1024, the offset would be 1024
@@ -4174,7 +4205,13 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* FIXME: For now we always enable slice S1 as per
* the Bspec display initialization sequence.
*/
- intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
+ new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
end = ddb_range_size *
@@ -4183,11 +4220,12 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
alloc->start = offset + start;
alloc->end = offset + end;
- DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
- alloc->start, alloc->end);
- DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
- intel_state->enabled_dbuf_slices_mask,
- INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
+ for_crtc->base.id, for_crtc->name,
+ dbuf_slice_mask, alloc->start, alloc->end, active_pipes);
+
+ return 0;
}
static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4308,12 +4346,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
intel_display_power_put(dev_priv, power_domain, wakeref);
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
-{
- dev_priv->enabled_dbuf_slices_mask =
- intel_enabled_dbuf_slices_mask(dev_priv);
-}
-
/*
* Determines the downscale amount of a plane for the purposes of watermark calculations.
* The bspec defines downscale amount as:
@@ -4334,11 +4366,13 @@ static uint_fixed_16_16_t
skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
- if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state)))
return u32_to_fixed16(0);
/*
@@ -4606,7 +4640,7 @@ static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
* For anything else just return one slice yet.
* Should be extended for other platforms.
*/
- return BIT(DBUF_S1);
+ return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
}
static u64
@@ -4758,12 +4792,37 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
u32 blocks;
int level;
+ int ret;
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
if (!crtc_state->hw.active) {
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ /*
+ * FIXME hack to make sure we compute this sensibly when
+ * turning off all the pipes. Otherwise we leave it at
+ * whatever we had previously, and then runtime PM will
+ * mess it up by turning off all but S1. Remove this
+ * once the dbuf state computation flow becomes sane.
+ */
+ if (new_dbuf_state->active_pipes == 0) {
+ new_dbuf_state->enabled_slices = BIT(DBUF_S1);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+ }
+
alloc->start = alloc->end = 0;
return 0;
}
@@ -4778,8 +4837,12 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
plane_data_rate,
uv_plane_data_rate);
- skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
- alloc, &num_active);
+ ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
+ total_data_rate,
+ alloc, &num_active);
+ if (ret)
+ return ret;
+
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -5003,6 +5066,7 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
@@ -5012,7 +5076,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
pixel_rate = crtc_state->pixel_rate;
- if (WARN_ON(pixel_rate == 0))
+ if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0))
return u32_to_fixed16(0);
crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
@@ -5025,11 +5089,13 @@ static u32
skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u64 adjusted_pixel_rate;
uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
- if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state)))
return 0;
/*
@@ -5190,7 +5256,9 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
+ if ((IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) &&
dev_priv->ipc_enabled)
latency += 4;
@@ -5465,6 +5533,7 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
int ret;
@@ -5476,9 +5545,10 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
- WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
- WARN_ON(!fb->format->is_yuv ||
- fb->format->num_planes == 1);
+ drm_WARN_ON(&dev_priv->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state));
+ drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||
+ fb->format->num_planes == 1);
ret = skl_build_plane_wm_single(crtc_state, plane_state,
y_plane_id, 0);
@@ -5701,13 +5771,13 @@ static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state;
+ const struct intel_dbuf_state *old_dbuf_state;
+ const struct intel_dbuf_state *new_dbuf_state;
+ const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;
-
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
ret = skl_allocate_pipe_ddb(new_crtc_state);
@@ -5720,6 +5790,17 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+
+ if (new_dbuf_state &&
+ new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)
+ drm_dbg_kms(&dev_priv->drm,
+ "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
+ old_dbuf_state->enabled_slices,
+ new_dbuf_state->enabled_slices,
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+
return 0;
}
@@ -5855,7 +5936,8 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
}
-static int intel_add_all_pipes(struct intel_atomic_state *state)
+static int intel_add_affected_pipes(struct intel_atomic_state *state,
+ u8 pipe_mask)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -5863,6 +5945,9 @@ static int intel_add_all_pipes(struct intel_atomic_state *state)
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state;
+ if ((pipe_mask & BIT(crtc->pipe)) == 0)
+ continue;
+
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -5875,49 +5960,54 @@ static int
skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int ret;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i, ret;
- /*
- * If this is our first atomic update following hardware readout,
- * we can't trust the DDB that the BIOS programmed for us. Let's
- * pretend that all pipes switched active status so that we'll
- * ensure a full DDB recompute.
- */
if (dev_priv->wm.distrust_bios_wm) {
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
- state->base.acquire_ctx);
+ /*
+ * skl_ddb_get_pipe_allocation_limits() currently requires
+ * all active pipes to be included in the state so that
+ * it can redistribute the dbuf among them, and it really
+ * wants to recompute things when distrust_bios_wm is set
+ * so we add all the pipes to the state.
+ */
+ ret = intel_add_affected_pipes(state, ~0);
if (ret)
return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_dbuf_state *new_dbuf_state;
+ const struct intel_dbuf_state *old_dbuf_state;
+
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
+ break;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
/*
- * We usually only initialize state->active_pipes if we
- * we're doing a modeset; make sure this field is always
- * initialized during the sanitization process that happens
- * on the first commit too.
+ * skl_ddb_get_pipe_allocation_limits() currently requires
+ * all active pipes to be included in the state so that
+ * it can redistribute the dbuf among them.
*/
- if (!state->modeset)
- state->active_pipes = dev_priv->active_pipes;
- }
-
- /*
- * If the modeset changes which CRTC's are active, we need to
- * recompute the DDB allocation for *all* active pipes, even
- * those that weren't otherwise being modified in any way by this
- * atomic commit. Due to the shrinking of the per-pipe allocations
- * when new active CRTC's are added, it's possible for a pipe that
- * we were already using and aren't changing at all here to suddenly
- * become invalid if its DDB needs exceeds its new allocation.
- *
- * Note that if we wind up doing a full DDB recompute, we can't let
- * any other display updates race with this transaction, so we need
- * to grab the lock on *all* CRTC's.
- */
- if (state->active_pipe_changes || state->modeset) {
- ret = intel_add_all_pipes(state);
+ ret = intel_add_affected_pipes(state,
+ new_dbuf_state->active_pipes);
if (ret)
return ret;
+
+ break;
}
return 0;
@@ -6163,7 +6253,6 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- skl_ddb_get_hw_state(dev_priv);
for_each_intel_crtc(&dev_priv->drm, crtc) {
crtc_state = to_intel_crtc_state(crtc->base.state);
@@ -6735,7 +6824,9 @@ static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
return false;
/* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
return dev_priv->dram_info.symmetric_memory;
return true;
@@ -7414,7 +7505,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
- else if (IS_COFFEELAKE(dev_priv))
+ else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
dev_priv->display.init_clock_gating = cfl_init_clock_gating;
else if (IS_SKYLAKE(dev_priv))
dev_priv->display.init_clock_gating = skl_init_clock_gating;
@@ -7544,3 +7635,89 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
dev_priv->runtime_pm.suspended = false;
atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
+
+static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return NULL;
+
+ return &dbuf_state->base;
+}
+
+static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_funcs = {
+ .atomic_duplicate_state = intel_dbuf_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_destroy_state,
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
+ if (IS_ERR(dbuf_state))
+ return ERR_CAST(dbuf_state);
+
+ return to_intel_dbuf_state(dbuf_state);
+}
+
+int intel_dbuf_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
+ &dbuf_state->base, &intel_dbuf_funcs);
+
+ return 0;
+}
+
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(dev_priv,
+ old_dbuf_state->enabled_slices |
+ new_dbuf_state->enabled_slices);
+}
+
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(dev_priv,
+ new_dbuf_state->enabled_slices);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 614ac7f8d4cc..a2473594c2db 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -8,8 +8,10 @@
#include <linux/types.h>
-#include "i915_reg.h"
#include "display/intel_bw.h"
+#include "display/intel_global_state.h"
+
+#include "i915_reg.h"
struct drm_device;
struct drm_i915_private;
@@ -38,6 +40,9 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
+u16 intel_get_ddb_size(struct drm_i915_private *dev_priv);
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
+ const struct skl_ddb_entry *entry);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
@@ -63,4 +68,26 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv);
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
+struct intel_dbuf_state {
+ struct intel_global_state base;
+
+ u8 enabled_slices;
+ u8 active_pipes;
+};
+
+int intel_dbuf_init(struct drm_i915_private *dev_priv);
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
+
+#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
+#define intel_atomic_get_old_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
+#define intel_atomic_get_new_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
+
+int intel_dbuf_init(struct drm_i915_private *dev_priv);
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
+
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index 14b59b899c9b..40d8f1a95df6 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -76,7 +76,7 @@ region_lmem_init(struct intel_memory_region *mem)
{
int ret;
- if (i915_modparams.fake_lmem_start) {
+ if (mem->i915->params.fake_lmem_start) {
ret = init_fake_lmem_bar(mem);
GEM_BUG_ON(ret);
}
@@ -111,12 +111,12 @@ intel_setup_fake_lmem(struct drm_i915_private *i915)
resource_size_t start;
GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
- GEM_BUG_ON(!i915_modparams.fake_lmem_start);
+ GEM_BUG_ON(!i915->params.fake_lmem_start);
/* Your mappable aperture belongs to me now! */
mappable_end = pci_resource_len(pdev, 2);
io_start = pci_resource_start(pdev, 2),
- start = i915_modparams.fake_lmem_start;
+ start = i915->params.fake_lmem_start;
mem = intel_memory_region_create(i915,
start,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 9cb2d7548daa..153ca9e65382 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -116,6 +116,9 @@ track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
depot_stack_handle_t stack)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
unsigned long flags, n;
bool found = false;
@@ -134,9 +137,9 @@ static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
}
spin_unlock_irqrestore(&rpm->debug.lock, flags);
- if (WARN(!found,
- "Unmatched wakeref (tracking %lu), count %u\n",
- rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
+ if (drm_WARN(&i915->drm, !found,
+ "Unmatched wakeref (tracking %lu), count %u\n",
+ rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
@@ -355,10 +358,14 @@ intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
bool wakelock)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
int ret;
ret = pm_runtime_get_sync(rpm->kdev);
- WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
+ drm_WARN_ONCE(&i915->drm, ret < 0,
+ "pm_runtime_get_sync() failed: %d\n", ret);
intel_runtime_pm_acquire(rpm, wakelock);
@@ -539,6 +546,9 @@ void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
*/
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
struct device *kdev = rpm->kdev;
/*
@@ -565,7 +575,8 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
pm_runtime_dont_use_autosuspend(kdev);
ret = pm_runtime_get_sync(kdev);
- WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
+ drm_WARN(&i915->drm, ret < 0,
+ "pm_runtime_get_sync() failed: %d\n", ret);
} else {
pm_runtime_use_autosuspend(kdev);
}
@@ -580,11 +591,14 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
struct device *kdev = rpm->kdev;
/* Transfer rpm ownership back to core */
- WARN(pm_runtime_get_sync(kdev) < 0,
- "Failed to pass rpm ownership back to core\n");
+ drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
+ "Failed to pass rpm ownership back to core\n");
pm_runtime_dont_use_autosuspend(kdev);
@@ -594,12 +608,15 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
int count = atomic_read(&rpm->wakeref_count);
- WARN(count,
- "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
- intel_rpm_raw_wakeref_count(count),
- intel_rpm_wakelock_count(count));
+ drm_WARN(&i915->drm, count,
+ "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
+ intel_rpm_raw_wakeref_count(count),
+ intel_rpm_wakelock_count(count));
untrack_all_intel_runtime_pm_wakerefs(rpm);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a61cb8ca4d50..592364aed2da 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -709,7 +709,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
continue;
}
- fw_domain_arm_timer(domain);
+ uncore->funcs.force_wake_put(uncore, domain->mask);
}
}
@@ -1185,7 +1185,7 @@ __unclaimed_reg_debug(struct intel_uncore *uncore,
read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
/* Only report the first N failures */
- i915_modparams.mmio_debug--;
+ uncore->i915->params.mmio_debug--;
}
static inline void
@@ -1194,7 +1194,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
const bool read,
const bool before)
{
- if (likely(!i915_modparams.mmio_debug))
+ if (likely(!uncore->i915->params.mmio_debug))
return;
/* interrupts are disabled and re-enabled around uncore->lock usage */
@@ -2093,12 +2093,12 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
goto out;
if (unlikely(check_for_unclaimed_mmio(uncore))) {
- if (!i915_modparams.mmio_debug) {
+ if (!uncore->i915->params.mmio_debug) {
drm_dbg(&uncore->i915->drm,
"Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
- i915_modparams.mmio_debug++;
+ uncore->i915->params.mmio_debug++;
}
uncore->debug->unclaimed_mmio_check--;
ret = true;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 2e471500a646..0016ffc7d914 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -97,6 +97,7 @@ static void fake_put_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops fake_ops = {
+ .name = "fake-gem",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = fake_get_pages,
.put_pages = fake_put_pages,
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 5dd5d81646c4..a92c0e9b7e6b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -11,9 +11,9 @@
* a module parameter. It must be unique and legal for a C identifier.
*
* The function should be of type int function(void). It may be conditionally
- * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
*
- * Tests are executed in order by igt/drv_selftest
+ * Tests are executed in order by igt/i915_selftest
*/
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 6090ce35226b..3db34d3eea58 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -11,9 +11,9 @@
* a module parameter. It must be unique and legal for a C identifier.
*
* The function should be of type int function(void). It may be conditionally
- * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
*
- * Tests are executed in order by igt/drv_selftest
+ * Tests are executed in order by igt/i915_selftest
*/
selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
selftest(shmem, shmem_utils_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index 8eb3108f1767..be54570c407c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -162,7 +162,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
len = 5;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(rq->engine->i915) >= 8)
len++;
*cs++ = GFX_OP_PIPE_CONTROL(len);
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index d8da142985eb..c2389f8a257d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -11,7 +11,7 @@
* a module parameter. It must be unique and legal for a C identifier.
*
* The function should be of type int function(void). It may be conditionally
- * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
*
* Tests are executed in order by igt/i915_selftest
*/
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6014e8dfcbb1..9271aad7f779 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -24,16 +24,21 @@
#include <linux/prime_numbers.h>
#include <linux/pm_qos.h>
+#include <linux/sort.h>
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
+#include "gt/selftest_engine_heartbeat.h"
#include "i915_random.h"
#include "i915_selftest.h"
+#include "igt_flush_test.h"
#include "igt_live_test.h"
#include "igt_spinner.h"
#include "lib_sw_fence.h"
@@ -1524,6 +1529,808 @@ struct perf_series {
struct intel_context *ce[];
};
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static u32 trifilter(u32 *a)
+{
+ u64 sum;
+
+#define TF_COUNT 5
+ sort(a, TF_COUNT, sizeof(*a), cmp_u32, NULL);
+
+ sum = mul_u32_u32(a[2], 2);
+ sum += a[1];
+ sum += a[3];
+
+ GEM_BUG_ON(sum > U32_MAX);
+ return sum;
+#define TF_BIAS 2
+}
+
+static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
+{
+ u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+
+ return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
+}
+
+static u32 *emit_timestamp_store(u32 *cs, struct intel_context *ce, u32 offset)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP((ce->engine->mmio_base)));
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_store_dw(u32 *cs, u32 offset, u32 value)
+{
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static u32 *emit_semaphore_poll(u32 *cs, u32 mode, u32 value, u32 offset)
+{
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ mode;
+ *cs++ = value;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_semaphore_poll_until(u32 *cs, u32 offset, u32 value)
+{
+ return emit_semaphore_poll(cs, MI_SEMAPHORE_SAD_EQ_SDD, value, offset);
+}
+
+static void semaphore_set(u32 *sema, u32 value)
+{
+ WRITE_ONCE(*sema, value);
+ wmb(); /* flush the update to the cache, and beyond */
+}
+
+static u32 *hwsp_scratch(const struct intel_context *ce)
+{
+ return memset32(ce->engine->status_page.addr + 1000, 0, 21);
+}
+
+static u32 hwsp_offset(const struct intel_context *ce, u32 *dw)
+{
+ return (i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(dw));
+}
+
+static int measure_semaphore_response(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how many cycles it takes for the HW to detect the change
+ * in a semaphore value.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * poke semaphore
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Semaphore latency: B - A
+ */
+
+ semaphore_set(sema, -1);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4 + 12 * ARRAY_SIZE(elapsed));
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, offset, 0);
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+ cs = emit_store_dw(cs, offset, 0);
+ }
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ if (wait_for(READ_ONCE(*sema) == 0, 50)) {
+ err = -EIO;
+ goto err;
+ }
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ preempt_disable();
+ cycles = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ semaphore_set(sema, i);
+ preempt_enable();
+
+ if (wait_for(READ_ONCE(*sema) == 0, 50)) {
+ err = -EIO;
+ goto err;
+ }
+
+ elapsed[i - 1] = sema[i] - cycles;
+ }
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: semaphore response %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_idle_dispatch(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how long it takes for us to submit a request while the
+ * engine is idle, but is resting in our context.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * submit request
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Submission latency: B - A
+ */
+
+ for (i = 0; i < ARRAY_SIZE(elapsed); i++) {
+ struct i915_request *rq;
+
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ return err;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+
+ preempt_disable();
+ local_bh_disable();
+ elapsed[i] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ i915_request_add(rq);
+ local_bh_enable();
+ preempt_enable();
+ }
+
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(elapsed); i++)
+ elapsed[i] = sema[i] - elapsed[i];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: idle dispatch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_busy_dispatch(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT + 1], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how long it takes for us to submit a request while the
+ * engine is busy, polling on a semaphore in our context. With
+ * direct submission, this will include the cost of a lite restore.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * submit request
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Submission latency: B - A
+ */
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, offset + i * sizeof(u32), -1);
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+
+ if (i > 1 && wait_for(READ_ONCE(sema[i - 1]), 500)) {
+ err = -EIO;
+ goto err;
+ }
+
+ preempt_disable();
+ local_bh_disable();
+ elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ i915_request_add(rq);
+ local_bh_enable();
+ semaphore_set(sema, i - 1);
+ preempt_enable();
+ }
+
+ wait_for(READ_ONCE(sema[i - 1]), 500);
+ semaphore_set(sema, i - 1);
+
+ for (i = 1; i <= TF_COUNT; i++) {
+ GEM_BUG_ON(sema[i] == -1);
+ elapsed[i - 1] = sema[i] - elapsed[i];
+ }
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: busy dispatch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int plug(struct intel_engine_cs *engine, u32 *sema, u32 mode, int value)
+{
+ const u32 offset =
+ i915_ggtt_offset(engine->status_page.vma) +
+ offset_in_page(sema);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ cs = emit_semaphore_poll(cs, mode, value, offset);
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static int measure_inter_request(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT + 1], cycles;
+ struct i915_sw_fence *submit;
+ int i, err;
+
+ /*
+ * Measure how long it takes to advance from one request into the
+ * next. Between each request we flush the GPU caches to memory,
+ * update the breadcrumbs, and then invalidate those caches.
+ * We queue up all the requests to be submitted in one batch so
+ * it should be one set of contiguous measurements.
+ *
+ * A: read CS_TIMESTAMP on GPU
+ * advance request
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Request latency: B - A
+ */
+
+ err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
+ if (err)
+ return err;
+
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ semaphore_set(sema, 1);
+ return -ENOMEM;
+ }
+
+ intel_engine_flush_submission(ce->engine);
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_submit;
+ }
+
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ submit,
+ GFP_KERNEL);
+ if (err < 0) {
+ i915_request_add(rq);
+ goto err_submit;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err_submit;
+ }
+
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+ }
+ local_bh_disable();
+ i915_sw_fence_commit(submit);
+ local_bh_enable();
+ intel_engine_flush_submission(ce->engine);
+ heap_fence_put(submit);
+
+ semaphore_set(sema, 1);
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[i + 1] - sema[i];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: inter-request latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err_submit:
+ i915_sw_fence_commit(submit);
+ heap_fence_put(submit);
+ semaphore_set(sema, 1);
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_context_switch(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ struct i915_request *fence = NULL;
+ u32 elapsed[TF_COUNT + 1], cycles;
+ int i, j, err;
+ u32 *cs;
+
+ /*
+ * Measure how long it takes to advance from one request in one
+ * context to a request in another context. This allows us to
+ * measure how long the context save/restore take, along with all
+ * the inter-context setup we require.
+ *
+ * A: read CS_TIMESTAMP on GPU
+ * switch context
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Context switch latency: B - A
+ */
+
+ err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
+ if (err)
+ return err;
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct intel_context *arr[] = {
+ ce, ce->engine->kernel_context
+ };
+ u32 addr = offset + ARRAY_SIZE(arr) * i * sizeof(u32);
+
+ for (j = 0; j < ARRAY_SIZE(arr); j++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(arr[j]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_fence;
+ }
+
+ if (fence) {
+ err = i915_request_await_dma_fence(rq,
+ &fence->fence);
+ if (err) {
+ i915_request_add(rq);
+ goto err_fence;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err_fence;
+ }
+
+ cs = emit_timestamp_store(cs, ce, addr);
+ addr += sizeof(u32);
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_put(fence);
+ fence = i915_request_get(rq);
+
+ i915_request_add(rq);
+ }
+ }
+ i915_request_put(fence);
+ intel_engine_flush_submission(ce->engine);
+
+ semaphore_set(sema, 1);
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[2 * i + 2] - sema[2 * i + 1];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: context switch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err_fence:
+ i915_request_put(fence);
+ semaphore_set(sema, 1);
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_preemption(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * We measure two latencies while triggering preemption. The first
+ * latency is how long it takes for us to submit a preempting request.
+ * The second latency is how it takes for us to return from the
+ * preemption back to the original context.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * submit preemption
+ * B: read CS_TIMESTAMP on GPU (in preempting context)
+ * context switch
+ * C: read CS_TIMESTAMP on GPU (in original context)
+ *
+ * Preemption dispatch latency: B - A
+ * Preemption switch latency: C - B
+ */
+
+ if (!intel_engine_has_preemption(ce->engine))
+ return 0;
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ u32 addr = offset + 2 * i * sizeof(u32);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, addr, -1);
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, addr + sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ if (wait_for(READ_ONCE(sema[2 * i]) == -1, 500)) {
+ err = -EIO;
+ goto err;
+ }
+
+ rq = i915_request_create(ce->engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_timestamp_store(cs, ce, addr);
+ cs = emit_store_dw(cs, offset, i);
+
+ intel_ring_advance(rq, cs);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+
+ elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ i915_request_add(rq);
+ }
+
+ if (wait_for(READ_ONCE(sema[2 * i - 2]) != -1, 500)) {
+ err = -EIO;
+ goto err;
+ }
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[2 * i + 0] - elapsed[i - 1];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: preemption dispatch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[2 * i + 1] - sema[2 * i + 0];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: preemption switch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+struct signal_cb {
+ struct dma_fence_cb base;
+ bool seen;
+};
+
+static void signal_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct signal_cb *s = container_of(cb, typeof(*s), base);
+
+ smp_store_mb(s->seen, true); /* be safe, be strong */
+}
+
+static int measure_completion(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how long it takes for the signal (interrupt) to be
+ * sent from the GPU to be processed by the CPU.
+ *
+ * A: read CS_TIMESTAMP on GPU
+ * signal
+ * B: read CS_TIMESTAMP from CPU
+ *
+ * Completion latency: B - A
+ */
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct signal_cb cb = { .seen = false };
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, offset + i * sizeof(u32), -1);
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+
+ dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
+
+ local_bh_disable();
+ i915_request_add(rq);
+ local_bh_enable();
+
+ if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
+ err = -EIO;
+ goto err;
+ }
+
+ preempt_disable();
+ semaphore_set(sema, i);
+ while (!READ_ONCE(cb.seen))
+ cpu_relax();
+
+ elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ preempt_enable();
+ }
+
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(elapsed); i++) {
+ GEM_BUG_ON(sema[i + 1] == -1);
+ elapsed[i] = elapsed[i] - sema[i + 1];
+ }
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: completion latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static void rps_pin(struct intel_gt *gt)
+{
+ /* Pin the frequency to max */
+ atomic_inc(&gt->rps.num_waiters);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ mutex_lock(&gt->rps.lock);
+ intel_rps_set(&gt->rps, gt->rps.max_freq);
+ mutex_unlock(&gt->rps.lock);
+}
+
+static void rps_unpin(struct intel_gt *gt)
+{
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ atomic_dec(&gt->rps.num_waiters);
+}
+
+static int perf_request_latency(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ int err = 0;
+
+ if (INTEL_GEN(i915) < 8) /* per-engine CS timestamp, semaphores */
+ return 0;
+
+ cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
+
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ st_engine_heartbeat_disable(engine);
+ rps_pin(engine->gt);
+
+ if (err == 0)
+ err = measure_semaphore_response(ce);
+ if (err == 0)
+ err = measure_idle_dispatch(ce);
+ if (err == 0)
+ err = measure_busy_dispatch(ce);
+ if (err == 0)
+ err = measure_inter_request(ce);
+ if (err == 0)
+ err = measure_context_switch(ce);
+ if (err == 0)
+ err = measure_preemption(ce);
+ if (err == 0)
+ err = measure_completion(ce);
+
+ rps_unpin(engine->gt);
+ st_engine_heartbeat_enable(engine);
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ if (err)
+ goto out;
+ }
+
+out:
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ cpu_latency_qos_remove_request(&qos);
+ return err;
+}
+
static int s_sync0(void *arg)
{
struct perf_series *ps = arg;
@@ -1685,9 +2492,11 @@ static int perf_series_engines(void *arg)
intel_engine_pm_get(p->engine);
if (intel_engine_supports_stats(p->engine))
- p->busy = intel_engine_get_busy_time(p->engine) + 1;
+ p->busy = intel_engine_get_busy_time(p->engine,
+ &p->time) + 1;
+ else
+ p->time = ktime_get();
p->runtime = -intel_context_get_total_runtime_ns(ce);
- p->time = ktime_get();
}
err = (*fn)(ps);
@@ -1698,13 +2507,15 @@ static int perf_series_engines(void *arg)
struct perf_stats *p = &stats[idx];
struct intel_context *ce = ps->ce[idx];
int integer, decimal;
- u64 busy, dt;
+ u64 busy, dt, now;
- p->time = ktime_sub(ktime_get(), p->time);
- if (p->busy) {
- p->busy = ktime_sub(intel_engine_get_busy_time(p->engine),
+ if (p->busy)
+ p->busy = ktime_sub(intel_engine_get_busy_time(p->engine,
+ &now),
p->busy - 1);
- }
+ else
+ now = ktime_get();
+ p->time = ktime_sub(now, p->time);
err = switch_to_kernel_sync(ce, err);
p->runtime += intel_context_get_total_runtime_ns(ce);
@@ -1764,13 +2575,14 @@ static int p_sync0(void *arg)
return err;
}
- busy = false;
if (intel_engine_supports_stats(engine)) {
- p->busy = intel_engine_get_busy_time(engine);
+ p->busy = intel_engine_get_busy_time(engine, &p->time);
busy = true;
+ } else {
+ p->time = ktime_get();
+ busy = false;
}
- p->time = ktime_get();
count = 0;
do {
struct i915_request *rq;
@@ -1793,11 +2605,15 @@ static int p_sync0(void *arg)
count++;
} while (!__igt_timeout(end_time, NULL));
- p->time = ktime_sub(ktime_get(), p->time);
if (busy) {
- p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ ktime_t now;
+
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
p->busy);
+ p->time = ktime_sub(now, p->time);
+ } else {
+ p->time = ktime_sub(ktime_get(), p->time);
}
err = switch_to_kernel_sync(ce, err);
@@ -1830,13 +2646,14 @@ static int p_sync1(void *arg)
return err;
}
- busy = false;
if (intel_engine_supports_stats(engine)) {
- p->busy = intel_engine_get_busy_time(engine);
+ p->busy = intel_engine_get_busy_time(engine, &p->time);
busy = true;
+ } else {
+ p->time = ktime_get();
+ busy = false;
}
- p->time = ktime_get();
count = 0;
do {
struct i915_request *rq;
@@ -1861,11 +2678,15 @@ static int p_sync1(void *arg)
count++;
} while (!__igt_timeout(end_time, NULL));
i915_request_put(prev);
- p->time = ktime_sub(ktime_get(), p->time);
if (busy) {
- p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ ktime_t now;
+
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
p->busy);
+ p->time = ktime_sub(now, p->time);
+ } else {
+ p->time = ktime_sub(ktime_get(), p->time);
}
err = switch_to_kernel_sync(ce, err);
@@ -1897,14 +2718,15 @@ static int p_many(void *arg)
return err;
}
- busy = false;
if (intel_engine_supports_stats(engine)) {
- p->busy = intel_engine_get_busy_time(engine);
+ p->busy = intel_engine_get_busy_time(engine, &p->time);
busy = true;
+ } else {
+ p->time = ktime_get();
+ busy = false;
}
count = 0;
- p->time = ktime_get();
do {
struct i915_request *rq;
@@ -1917,11 +2739,15 @@ static int p_many(void *arg)
i915_request_add(rq);
count++;
} while (!__igt_timeout(end_time, NULL));
- p->time = ktime_sub(ktime_get(), p->time);
if (busy) {
- p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ ktime_t now;
+
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
p->busy);
+ p->time = ktime_sub(now, p->time);
+ } else {
+ p->time = ktime_sub(ktime_get(), p->time);
}
err = switch_to_kernel_sync(ce, err);
@@ -2042,6 +2868,7 @@ static int perf_parallel_engines(void *arg)
int i915_request_perf_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(perf_request_latency),
SUBTEST(perf_series_engines),
SUBTEST(perf_parallel_engines),
};
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index e35ba5f9e73f..ec0ecb4e4ca6 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -134,15 +134,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- if (INTEL_GEN(rq->i915) >= 8) {
+ if (INTEL_GEN(rq->engine->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
- } else if (INTEL_GEN(rq->i915) >= 6) {
+ } else if (INTEL_GEN(rq->engine->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
- } else if (INTEL_GEN(rq->i915) >= 4) {
+ } else if (INTEL_GEN(rq->engine->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
@@ -154,11 +154,11 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = arbitration_command;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(rq->engine->i915) >= 8)
*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
- else if (IS_HASWELL(rq->i915))
+ else if (IS_HASWELL(rq->engine->i915))
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
- else if (INTEL_GEN(rq->i915) >= 6)
+ else if (INTEL_GEN(rq->engine->i915) >= 6)
*batch++ = MI_BATCH_BUFFER_START;
else
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
@@ -176,7 +176,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
}
flags = 0;
- if (INTEL_GEN(rq->i915) <= 5)
+ if (INTEL_GEN(rq->engine->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
@@ -221,8 +221,8 @@ bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
- 10) &&
+ 100) &&
wait_for(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
- 1000));
+ 50));
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index b2ad41c27e67..09660f5a0a4c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -9,6 +9,7 @@
#include "mock_region.h"
static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
+ .name = "mock-region",
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,