summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst12
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c14
-rw-r--r--drivers/gpu/drm/drm_dsc.c228
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h62
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c111
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c337
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h28
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c117
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h5
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h11
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c493
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c102
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c228
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c8
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h32
-rw-r--r--drivers/gpu/drm/i915/intel_display.c330
-rw-r--r--drivers/gpu/drm/i915/intel_display.h21
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c303
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h52
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h5
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c22
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c46
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c21
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c83
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c589
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c118
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c72
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h25
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c138
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c1088
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c999
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h36
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c44
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h15
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c199
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h37
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c63
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c247
-rw-r--r--fs/sysfs/file.c4
-rw-r--r--include/drm/drm_dp_helper.h3
-rw-r--r--include/drm/drm_dsc.h485
-rw-r--r--include/linux/sysfs.h8
61 files changed, 5364 insertions, 1972 deletions
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 4b4dc236ef6f..b422eb8edf16 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -232,6 +232,18 @@ MIPI DSI Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
:export:
+Display Stream Compression Helper Functions Reference
+=====================================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+ :doc: dsc helpers
+
+.. kernel-doc:: include/drm/drm_dsc.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+ :export:
+
Output Probing Helper Functions Reference
=========================================
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1fafc2f8e8f9..e490fe2687db 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -32,7 +32,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 6d483487f2b4..2d6c491a0542 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1428,17 +1428,19 @@ u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
}
EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
-u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3])
{
+ int num_bpc = 0;
u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
if (color_depth & DP_DSC_12_BPC)
- return 12;
+ dsc_bpc[num_bpc++] = 12;
if (color_depth & DP_DSC_10_BPC)
- return 10;
+ dsc_bpc[num_bpc++] = 10;
if (color_depth & DP_DSC_8_BPC)
- return 8;
+ dsc_bpc[num_bpc++] = 8;
- return 0;
+ return num_bpc;
}
-EXPORT_SYMBOL(drm_dp_dsc_sink_max_color_depth);
+EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
new file mode 100644
index 000000000000..bc2b23adb072
--- /dev/null
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corp
+ *
+ * Author:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/byteorder/generic.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dsc.h>
+
+/**
+ * DOC: dsc helpers
+ *
+ * These functions contain some common logic and helpers to deal with VESA
+ * Display Stream Compression standard required for DSC on Display Port/eDP or
+ * MIPI display interfaces.
+ */
+
+/**
+ * drm_dsc_dp_pps_header_init() - Initializes the PPS Header
+ * for DisplayPort as per the DP 1.4 spec.
+ * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
+ */
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+{
+ memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+
+ pps_sdp->pps_header.HB1 = DP_SDP_PPS;
+ pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+}
+EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
+
+/**
+ * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * using the DSC configuration parameters in the order expected
+ * by the DSC Display Sink device. For the DSC, the sink device
+ * expects the PPS payload in the big endian format for the fields
+ * that span more than 1 byte.
+ *
+ * @pps_sdp:
+ * Secondary data packet for DSC Picture Parameter Set
+ * @dsc_cfg:
+ * DSC Configuration data filled by driver
+ */
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg)
+{
+ int i;
+
+ /* Protect against someone accidently changing struct size */
+ BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+ DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
+
+ memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+
+ /* PPS 0 */
+ pps_sdp->pps_payload.dsc_version =
+ dsc_cfg->dsc_version_minor |
+ dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
+
+ /* PPS 1, 2 is 0 */
+
+ /* PPS 3 */
+ pps_sdp->pps_payload.pps_3 =
+ dsc_cfg->line_buf_depth |
+ dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
+
+ /* PPS 4 */
+ pps_sdp->pps_payload.pps_4 =
+ ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT) |
+ dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
+ dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+ dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
+ dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
+
+ /* PPS 5 */
+ pps_sdp->pps_payload.bits_per_pixel_low =
+ (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
+
+ /*
+ * The DSC panel expects the PPS packet to have big endian format
+ * for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
+ * to big endian format. If format is little endian, it will swap
+ * bytes to convert to Big endian else keep it unchanged.
+ */
+
+ /* PPS 6, 7 */
+ pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+
+ /* PPS 8, 9 */
+ pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+
+ /* PPS 10, 11 */
+ pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+
+ /* PPS 12, 13 */
+ pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+
+ /* PPS 14, 15 */
+ pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+
+ /* PPS 16 */
+ pps_sdp->pps_payload.initial_xmit_delay_high =
+ ((dsc_cfg->initial_xmit_delay &
+ DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 17 */
+ pps_sdp->pps_payload.initial_xmit_delay_low =
+ (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
+
+ /* PPS 18, 19 */
+ pps_sdp->pps_payload.initial_dec_delay =
+ cpu_to_be16(dsc_cfg->initial_dec_delay);
+
+ /* PPS 20 is 0 */
+
+ /* PPS 21 */
+ pps_sdp->pps_payload.initial_scale_value =
+ dsc_cfg->initial_scale_value;
+
+ /* PPS 22, 23 */
+ pps_sdp->pps_payload.scale_increment_interval =
+ cpu_to_be16(dsc_cfg->scale_increment_interval);
+
+ /* PPS 24 */
+ pps_sdp->pps_payload.scale_decrement_interval_high =
+ ((dsc_cfg->scale_decrement_interval &
+ DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 25 */
+ pps_sdp->pps_payload.scale_decrement_interval_low =
+ (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
+
+ /* PPS 26[7:0], PPS 27[7:5] RESERVED */
+
+ /* PPS 27 */
+ pps_sdp->pps_payload.first_line_bpg_offset =
+ dsc_cfg->first_line_bpg_offset;
+
+ /* PPS 28, 29 */
+ pps_sdp->pps_payload.nfl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nfl_bpg_offset);
+
+ /* PPS 30, 31 */
+ pps_sdp->pps_payload.slice_bpg_offset =
+ cpu_to_be16(dsc_cfg->slice_bpg_offset);
+
+ /* PPS 32, 33 */
+ pps_sdp->pps_payload.initial_offset =
+ cpu_to_be16(dsc_cfg->initial_offset);
+
+ /* PPS 34, 35 */
+ pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+
+ /* PPS 36 */
+ pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+
+ /* PPS 37 */
+ pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+
+ /* PPS 38, 39 */
+ pps_sdp->pps_payload.rc_model_size =
+ cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+
+ /* PPS 40 */
+ pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+
+ /* PPS 41 */
+ pps_sdp->pps_payload.rc_quant_incr_limit0 =
+ dsc_cfg->rc_quant_incr_limit0;
+
+ /* PPS 42 */
+ pps_sdp->pps_payload.rc_quant_incr_limit1 =
+ dsc_cfg->rc_quant_incr_limit1;
+
+ /* PPS 43 */
+ pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+ DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
+
+ /* PPS 44 - 57 */
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
+ pps_sdp->pps_payload.rc_buf_thresh[i] =
+ dsc_cfg->rc_buf_thresh[i];
+
+ /* PPS 58 - 87 */
+ /*
+ * For DSC sink programming the RC Range parameter fields
+ * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
+ */
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ ((dsc_cfg->rc_range_params[i].range_min_qp <<
+ DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_max_qp <<
+ DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_bpg_offset));
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+ }
+
+ /* PPS 88 */
+ pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+ dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
+
+ /* PPS 89 */
+ pps_sdp->pps_payload.second_line_bpg_offset =
+ dsc_cfg->second_line_bpg_offset;
+
+ /* PPS 90, 91 */
+ pps_sdp->pps_payload.nsl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nsl_bpg_offset);
+
+ /* PPS 92, 93 */
+ pps_sdp->pps_payload.second_line_offset_adj =
+ cpu_to_be16(dsc_cfg->second_line_offset_adj);
+
+ /* PPS 94 - 127 are O */
+}
+EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0ff878c994e2..19b5fe5016bf 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -157,14 +157,17 @@ i915-y += dvo_ch7017.o \
intel_sdvo.o \
intel_tv.o \
vlv_dsi.o \
- vlv_dsi_pll.o
+ vlv_dsi_pll.o \
+ intel_vdsc.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
- selftests/igt_flush_test.o
+ selftests/igt_flush_test.o \
+ selftests/igt_reset.o \
+ selftests/igt_spinner.o
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7f455bca528e..38dcee1ca062 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -943,30 +943,30 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
- struct i915_gpu_state *error = file->private_data;
- struct drm_i915_error_state_buf str;
+ struct i915_gpu_state *error;
ssize_t ret;
- loff_t tmp;
+ void *buf;
+ error = file->private_data;
if (!error)
return 0;
- ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
- if (ret)
- return ret;
+ /* Bounce buffer required because of kernfs __user API convenience. */
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- ret = i915_error_state_to_str(&str, error);
- if (ret)
+ ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+ if (ret <= 0)
goto out;
- tmp = 0;
- ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
- if (ret < 0)
- goto out;
+ if (!copy_to_user(ubuf, buf, ret))
+ *pos += ret;
+ else
+ ret = -EFAULT;
- *pos = str.start + ret;
out:
- i915_error_state_buf_release(&str);
+ kfree(buf);
return ret;
}
@@ -3375,13 +3375,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
- int i;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
- seq_printf(m, "Workarounds applied: %d\n", wa->count);
- for (i = 0; i < wa->count; ++i)
+ seq_printf(m, "Workarounds applied: %u\n", wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
+ i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
return 0;
}
@@ -3441,31 +3443,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
- enum pipe pipe;
- int plane;
+ struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 9)
return -ENODEV;
drm_modeset_lock_all(dev);
- ddb = &dev_priv->wm.skl_hw.ddb;
-
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
- for_each_pipe(dev_priv, pipe) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_universal_plane(dev_priv, pipe, plane) {
- entry = &ddb->plane[pipe][plane];
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
entry->start, entry->end,
skl_ddb_entry_size(entry));
}
- entry = &ddb->plane[pipe][PLANE_CURSOR];
+ entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
entry->end, skl_ddb_entry_size(entry));
}
@@ -4592,6 +4595,13 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ synchronize_irq(dev_priv->drm.irq);
+ flush_work(&dev_priv->hotplug.dig_port_work);
+ flush_work(&dev_priv->hotplug.hotplug_work);
+
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
yesno(delayed_work_pending(&hotplug->reenable_work)));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b1d23c73c147..b310a897a4ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -53,6 +53,7 @@
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
+#include "intel_workarounds.h"
static struct drm_driver driver;
@@ -287,7 +288,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
* Use PCH_NOP (PCH but no South Display) for PCH platforms without
* display.
*/
- if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (pch && !HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
@@ -645,7 +646,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_inject_load_failure())
return -ENODEV;
- if (INTEL_INFO(dev_priv)->num_pipes) {
+ if (HAS_DISPLAY(dev_priv)) {
ret = drm_vblank_init(&dev_priv->drm,
INTEL_INFO(dev_priv)->num_pipes);
if (ret)
@@ -696,7 +697,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_overlay_setup(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
ret = intel_fbdev_init(dev);
@@ -868,6 +869,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_HSW_EARLY_SDV(dev_priv);
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+ pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
if (pre) {
DRM_ERROR("This is a pre-production stepping. "
@@ -1383,6 +1385,20 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
}
}
+ if (HAS_EXECLISTS(dev_priv)) {
+ /*
+ * Older GVT emulation depends upon intercepting CSB mmio,
+ * which we no longer use, preferring to use the HWSP cache
+ * instead.
+ */
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_hwsp_emulation(dev_priv)) {
+ i915_report_error(dev_priv,
+ "old vGPU host found, support for HWSP emulation required\n");
+ return -ENXIO;
+ }
+ }
+
intel_sanitize_options(dev_priv);
i915_perf_init(dev_priv);
@@ -1452,6 +1468,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
+ intel_gt_init_workarounds(dev_priv);
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1551,7 +1568,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
- if (INTEL_INFO(dev_priv)->num_pipes) {
+ if (HAS_DISPLAY(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
@@ -1575,7 +1592,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (INTEL_INFO(dev_priv)->num_pipes)
+ if (HAS_DISPLAY(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4064e49dbf70..b1c31967194b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,6 +53,7 @@
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
+#include <drm/drm_dsc.h>
#include "i915_fixed.h"
#include "i915_params.h"
@@ -68,6 +69,7 @@
#include "intel_ringbuffer.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
+#include "intel_workarounds.h"
#include "intel_uc.h"
#include "i915_gem.h"
@@ -88,8 +90,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20181122"
-#define DRIVER_TIMESTAMP 1542898187
+#define DRIVER_DATE "20181204"
+#define DRIVER_TIMESTAMP 1543944377
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -494,6 +496,7 @@ struct i915_psr {
bool sink_support;
bool prepared, enabled;
struct intel_dp *dp;
+ enum pipe pipe;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
@@ -504,6 +507,8 @@ struct i915_psr {
u8 sink_sync_latency;
ktime_t last_entry_attempt;
ktime_t last_exit;
+ bool sink_not_reliable;
+ bool irq_aux_error;
};
enum intel_pch {
@@ -1093,9 +1098,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- /* packed/y */
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
- struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
u8 enabled_slices; /* GEN11 has configurable 2 slices */
};
@@ -1188,20 +1190,6 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
-struct i915_wa_reg {
- u32 addr;
- u32 value;
- /* bitmask representing WA bits */
- u32 mask;
-};
-
-#define I915_MAX_WA_REGS 16
-
-struct i915_workarounds {
- struct i915_wa_reg reg[I915_MAX_WA_REGS];
- u32 count;
-};
-
struct i915_virtual_gpu {
bool active;
u32 caps;
@@ -1651,7 +1639,7 @@ struct drm_i915_private {
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
- struct i915_workarounds workarounds;
+ struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking;
@@ -1995,6 +1983,8 @@ struct drm_i915_private {
struct delayed_work idle_work;
ktime_t last_init_time;
+
+ struct i915_vma *scratch;
} gt;
/* perform PHY state sanity checks? */
@@ -2448,9 +2438,9 @@ intel_info(const struct drm_i915_private *dev_priv)
((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
})
-#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
- ((dev_priv)->info.overlay_needs_physical)
+ ((dev_priv)->info.display.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2471,31 +2461,31 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst)
-#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr)
#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
-#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
-#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
+#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
@@ -2556,7 +2546,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
@@ -2568,6 +2558,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+
#include "i915_trace.h"
static inline bool intel_vtd_active(void)
@@ -3340,6 +3332,9 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -3720,4 +3715,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+ return i915_ggtt_offset(i915->gt.scratch);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c55b1f75c980..d36a9755ad91 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3309,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct i915_request *request)
{
- GEM_TRACE("%s fence %llx:%d -> -EIO\n",
- request->engine->name,
- request->fence.context, request->fence.seqno);
- dma_fence_set_error(&request->fence, -EIO);
-
- i915_request_submit(request);
-}
-
-static void nop_complete_submit_request(struct i915_request *request)
-{
unsigned long flags;
GEM_TRACE("%s fence %llx:%d -> -EIO\n",
@@ -3354,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, i915, id)
i915_gem_reset_prepare_engine(engine);
- engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- i915->caps.scheduler = 0;
-
/* Even if the GPU reset fails, it should still stop the engines */
if (INTEL_GEN(i915) >= 5)
intel_gpu_reset(i915, ALL_ENGINES);
- /*
- * Make sure no one is running the old callback before we proceed with
- * cancelling requests and resetting the completion tracking. Otherwise
- * we might submit a request to the hardware which never completes.
- */
- synchronize_rcu();
-
for_each_engine(engine, i915, id) {
- /* Mark all executing requests as skipped */
- engine->cancel_requests(engine);
-
- /*
- * Only once we've force-cancelled all in-flight requests can we
- * start to complete all requests.
- */
- engine->submit_request = nop_complete_submit_request;
+ engine->submit_request = nop_submit_request;
+ engine->schedule = NULL;
}
+ i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_init_global_seqno, or the one
- * in nop_complete_submit_request.
+ * in nop_submit_request.
*/
synchronize_rcu();
- for_each_engine(engine, i915, id) {
- unsigned long flags;
-
- /*
- * Mark all pending requests as complete so that any concurrent
- * (lockless) lookup doesn't try and wait upon the request as we
- * reset it.
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- intel_engine_init_global_seqno(engine,
- intel_engine_last_submit(engine));
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ /* Mark all executing requests as skipped */
+ for_each_engine(engine, i915, id)
+ engine->cancel_requests(engine);
+ for_each_engine(engine, i915, id) {
i915_gem_reset_finish_engine(engine);
+ intel_engine_wakeup(engine);
}
out:
@@ -5334,7 +5300,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
- intel_gt_workarounds_apply(dev_priv);
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(dev_priv);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(dev_priv, "init");
i915_gem_init_swizzling(dev_priv);
@@ -5529,6 +5498,44 @@ err_active:
goto out_ctx;
}
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (!obj)
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ i915->gt.scratch = vma;
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+ i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -5575,12 +5582,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_scratch(dev_priv,
+ IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
}
+ ret = i915_gem_contexts_init(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_scratch;
+ }
+
ret = intel_engines_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@@ -5653,6 +5667,8 @@ err_pm:
err_context:
if (ret != -EIO)
i915_gem_contexts_fini(dev_priv);
+err_scratch:
+ i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5704,8 +5720,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
+ i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_wa_list_free(&dev_priv->gt_wa_list);
+
intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b97963db0287..371c07087095 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -535,16 +535,12 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- int ret;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
- ret = intel_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
+ intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
init_contexts(dev_priv);
/* lowest priority; idle task */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8123bf0e4807..07465123c166 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -27,11 +27,14 @@
*
*/
-#include <linux/utsname.h>
+#include <linux/ascii85.h>
+#include <linux/nmi.h>
+#include <linux/scatterlist.h>
#include <linux/stop_machine.h>
+#include <linux/utsname.h>
#include <linux/zlib.h>
+
#include <drm/drm_print.h>
-#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -77,112 +80,110 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+static void __sg_set_buf(struct scatterlist *sg,
+ void *addr, unsigned int len, loff_t it)
{
-
- if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
- e->err = -ENOSPC;
- return false;
- }
-
- if (e->bytes == e->size - 1 || e->err)
- return false;
-
- return true;
+ sg->page_link = (unsigned long)virt_to_page(addr);
+ sg->offset = offset_in_page(addr);
+ sg->length = len;
+ sg->dma_address = it;
}
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
- unsigned len)
+static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
{
- if (e->pos + len <= e->start) {
- e->pos += len;
+ if (!len)
return false;
- }
- /* First vsnprintf needs to fit in its entirety for memmove */
- if (len >= e->size) {
- e->err = -EIO;
- return false;
- }
+ if (e->bytes + len + 1 <= e->size)
+ return true;
- return true;
-}
+ if (e->bytes) {
+ __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
+ e->iter += e->bytes;
+ e->buf = NULL;
+ e->bytes = 0;
+ }
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- /* If this is first printf in this window, adjust it so that
- * start position matches start of the buffer
- */
+ if (e->cur == e->end) {
+ struct scatterlist *sgl;
- if (e->pos < e->start) {
- const size_t off = e->start - e->pos;
+ sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+ if (!sgl) {
+ e->err = -ENOMEM;
+ return false;
+ }
- /* Should not happen but be paranoid */
- if (off > len || e->bytes) {
- e->err = -EIO;
- return;
+ if (e->cur) {
+ e->cur->offset = 0;
+ e->cur->length = 0;
+ e->cur->page_link =
+ (unsigned long)sgl | SG_CHAIN;
+ } else {
+ e->sgl = sgl;
}
- memmove(e->buf, e->buf + off, len - off);
- e->bytes = len - off;
- e->pos = e->start;
- return;
+ e->cur = sgl;
+ e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
}
- e->bytes += len;
- e->pos += len;
+ e->size = ALIGN(len + 1, SZ_64K);
+ e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!e->buf) {
+ e->size = PAGE_ALIGN(len + 1);
+ e->buf = kmalloc(e->size, GFP_KERNEL);
+ }
+ if (!e->buf) {
+ e->err = -ENOMEM;
+ return false;
+ }
+
+ return true;
}
__printf(2, 0)
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
- const char *f, va_list args)
+ const char *fmt, va_list args)
{
- unsigned len;
+ va_list ap;
+ int len;
- if (!__i915_error_ok(e))
+ if (e->err)
return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- va_list tmp;
-
- va_copy(tmp, args);
- len = vsnprintf(NULL, 0, f, tmp);
- va_end(tmp);
-
- if (!__i915_error_seek(e, len))
- return;
+ va_copy(ap, args);
+ len = vsnprintf(NULL, 0, fmt, ap);
+ va_end(ap);
+ if (len <= 0) {
+ e->err = len;
+ return;
}
- len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ if (!__i915_error_grow(e, len))
+ return;
- __i915_error_advance(e, len);
+ GEM_BUG_ON(e->bytes >= e->size);
+ len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
+ if (len < 0) {
+ e->err = len;
+ return;
+ }
+ e->bytes += len;
}
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
- const char *str)
+static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
{
unsigned len;
- if (!__i915_error_ok(e))
+ if (e->err || !str)
return;
len = strlen(str);
+ if (!__i915_error_grow(e, len))
+ return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- if (!__i915_error_seek(e, len))
- return;
- }
-
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ GEM_BUG_ON(e->bytes + len > e->size);
memcpy(e->buf + e->bytes, str, len);
-
- __i915_error_advance(e, len);
+ e->bytes += len;
}
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
@@ -268,6 +269,8 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
+
+ touch_nmi_watchdog();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -635,21 +638,29 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
}
-int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
- const struct i915_gpu_state *error)
+static void err_free_sgl(struct scatterlist *sgl)
{
- struct drm_i915_private *dev_priv = m->i915;
- struct drm_i915_error_object *obj;
- struct timespec64 ts;
- int i, j;
+ while (sgl) {
+ struct scatterlist *sg;
- if (!error) {
- err_printf(m, "No error state collected\n");
- return 0;
+ for (sg = sgl; !sg_is_chain(sg); sg++) {
+ kfree(sg_virt(sg));
+ if (sg_is_last(sg))
+ break;
+ }
+
+ sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
+ free_page((unsigned long)sgl);
+ sgl = sg;
}
+}
- if (IS_ERR(error))
- return PTR_ERR(error);
+static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
+ struct i915_gpu_state *error)
+{
+ struct drm_i915_error_object *obj;
+ struct timespec64 ts;
+ int i, j;
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
@@ -683,12 +694,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
- err_print_pciid(m, error->i915);
+ err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev_priv)) {
- struct intel_csr *csr = &dev_priv->csr;
+ if (HAS_CSR(m->i915)) {
+ struct intel_csr *csr = &m->i915->csr;
err_printf(m, "DMC loaded: %s\n",
yesno(csr->dmc_payload != NULL));
@@ -708,22 +719,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
- err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+ err_printf(m, "Missed interrupts: 0x%08lx\n",
+ m->i915->gpu_error.missed_irq_rings);
for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_GEN(dev_priv) >= 8)
+ if (INTEL_GEN(m->i915) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(dev_priv))
+ if (IS_GEN7(m->i915))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -745,7 +757,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j]->name);
+ m->i915->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@@ -763,7 +775,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i]->name);
+ err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
ee->context.comm,
@@ -775,16 +787,16 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, dev_priv->engine[i], NULL, obj);
+ print_error_obj(m, m->i915->engine[i], NULL, obj);
}
for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"user", ee->user_bo[j]);
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
@@ -794,10 +806,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i]->name);
+ m->i915->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -807,22 +819,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"ringbuffer", ee->ringbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW Status", ee->hws_page);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW context", ee->ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA context", ee->wa_ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA batchbuffer", ee->wa_batchbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"NULL context", ee->default_state);
}
@@ -835,43 +847,107 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info, &error->driver_caps);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
+}
+
+static int err_print_to_sgl(struct i915_gpu_state *error)
+{
+ struct drm_i915_error_state_buf m;
+
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ if (READ_ONCE(error->sgl))
+ return 0;
+
+ memset(&m, 0, sizeof(m));
+ m.i915 = error->i915;
+
+ __err_print_to_sgl(&m, error);
+
+ if (m.buf) {
+ __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
+ m.bytes = 0;
+ m.buf = NULL;
+ }
+ if (m.cur) {
+ GEM_BUG_ON(m.end < m.cur);
+ sg_mark_end(m.cur - 1);
+ }
+ GEM_BUG_ON(m.sgl && !m.cur);
+
+ if (m.err) {
+ err_free_sgl(m.sgl);
+ return m.err;
+ }
- if (m->bytes == 0 && m->err)
- return m->err;
+ if (cmpxchg(&error->sgl, NULL, m.sgl))
+ err_free_sgl(m.sgl);
return 0;
}
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
- struct drm_i915_private *i915,
- size_t count, loff_t pos)
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t off, size_t rem)
{
- memset(ebuf, 0, sizeof(*ebuf));
- ebuf->i915 = i915;
+ struct scatterlist *sg;
+ size_t count;
+ loff_t pos;
+ int err;
- /* We need to have enough room to store any i915_error_state printf
- * so that we can move it to start position.
- */
- ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size,
- GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+ if (!error || !rem)
+ return 0;
- if (ebuf->buf == NULL) {
- ebuf->size = PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ err = err_print_to_sgl(error);
+ if (err)
+ return err;
- if (ebuf->buf == NULL) {
- ebuf->size = 128;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ sg = READ_ONCE(error->fit);
+ if (!sg || off < sg->dma_address)
+ sg = error->sgl;
+ if (!sg)
+ return 0;
- if (ebuf->buf == NULL)
- return -ENOMEM;
+ pos = sg->dma_address;
+ count = 0;
+ do {
+ size_t len, start;
+
+ if (sg_is_chain(sg)) {
+ sg = sg_chain_ptr(sg);
+ GEM_BUG_ON(sg_is_chain(sg));
+ }
+
+ len = sg->length;
+ if (pos + len <= off) {
+ pos += len;
+ continue;
+ }
- ebuf->start = pos;
+ start = sg->offset;
+ if (pos < off) {
+ GEM_BUG_ON(off - pos > len);
+ len -= off - pos;
+ start += off - pos;
+ pos = off;
+ }
- return 0;
+ len = min(len, rem);
+ GEM_BUG_ON(!len || len > sg->length);
+
+ memcpy(buf, page_address(sg_page(sg)) + start, len);
+
+ count += len;
+ pos += len;
+
+ buf += len;
+ rem -= len;
+ if (!rem) {
+ WRITE_ONCE(error->fit, sg);
+ break;
+ }
+ } while (!sg_is_last(sg++));
+
+ return count;
}
static void i915_error_object_free(struct drm_i915_error_object *obj)
@@ -944,6 +1020,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
cleanup_params(error);
cleanup_uc_state(error);
+ err_free_sgl(error->sgl);
kfree(error);
}
@@ -1494,7 +1571,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
i915_error_object_create(i915,
- engine->scratch);
+ i915->gt.scratch);
request_record_user_bo(request, ee);
ee->ctx =
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 3ec89a504de5..ff2652bbb0b0 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -192,6 +192,8 @@ struct i915_gpu_state {
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
struct i915_address_space *active_vm[I915_NUM_ENGINES];
+
+ struct scatterlist *sgl, *fit;
};
struct i915_gpu_error {
@@ -298,29 +300,20 @@ struct i915_gpu_error {
struct drm_i915_error_state_buf {
struct drm_i915_private *i915;
- unsigned int bytes;
- unsigned int size;
+ struct scatterlist *sgl, *cur, *end;
+
+ char *buf;
+ size_t bytes;
+ size_t size;
+ loff_t iter;
+
int err;
- u8 *buf;
- loff_t start;
- loff_t pos;
};
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
- const struct i915_gpu_state *gpu);
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
- struct drm_i915_private *i915,
- size_t count, loff_t pos);
-
-static inline void
-i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
-{
- kfree(eb->buf);
-}
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
@@ -334,6 +327,9 @@ i915_gpu_state_get(struct i915_gpu_state *gpu)
return gpu;
}
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t offset, size_t count);
+
void __i915_gpu_state_free(struct kref *kref);
static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
{
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1b81d7cb209e..6350db5503cd 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -79,8 +79,9 @@
#define GEN2_FEATURES \
GEN(2), \
.num_pipes = 1, \
- .has_overlay = 1, .overlay_needs_physical = 1, \
- .has_gmch_display = 1, \
+ .display.has_overlay = 1, \
+ .display.overlay_needs_physical = 1, \
+ .display.has_gmch_display = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
@@ -93,7 +94,8 @@
static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
PLATFORM(INTEL_I830),
- .is_mobile = 1, .cursor_needs_physical = 1,
+ .is_mobile = 1,
+ .display.cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
@@ -107,8 +109,8 @@ static const struct intel_device_info intel_i85x_info = {
PLATFORM(INTEL_I85X),
.is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
- .cursor_needs_physical = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
@@ -119,7 +121,7 @@ static const struct intel_device_info intel_i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.num_pipes = 2, \
- .has_gmch_display = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -131,8 +133,9 @@ static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915G),
.has_coherent_ggtt = false,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -141,10 +144,11 @@ static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915GM),
.is_mobile = 1,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -152,8 +156,10 @@ static const struct intel_device_info intel_i915gm_info = {
static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945G),
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -162,10 +168,12 @@ static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945GM),
.is_mobile = 1,
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -173,23 +181,23 @@ static const struct intel_device_info intel_i945gm_info = {
static const struct intel_device_info intel_g33_info = {
GEN3_FEATURES,
PLATFORM(INTEL_G33),
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
static const struct intel_device_info intel_pineview_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
#define GEN4_FEATURES \
GEN(4), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_gmch_display = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -200,7 +208,7 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965G),
- .has_overlay = 1,
+ .display.has_overlay = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -208,9 +216,10 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
- .is_mobile = 1, .has_fbc = 1,
- .has_overlay = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.has_overlay = 1,
+ .display.supports_tv = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -224,15 +233,16 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
- .is_mobile = 1, .has_fbc = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
#define GEN5_FEATURES \
GEN(5), \
.num_pipes = 2, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -250,14 +260,15 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
- .is_mobile = 1, .has_fbc = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
};
#define GEN6_FEATURES \
GEN(6), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -301,8 +312,8 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
.num_pipes = 3, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -359,8 +370,8 @@ static const struct intel_device_info intel_valleyview_info = {
.num_pipes = 2,
.has_runtime_pm = 1,
.has_rc6 = 1,
- .has_gmch_display = 1,
- .has_hotplug = 1,
+ .display.has_gmch_display = 1,
+ .display.has_hotplug = 1,
.ppgtt = INTEL_PPGTT_FULL,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -374,10 +385,10 @@ static const struct intel_device_info intel_valleyview_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_psr = 1, \
- .has_dp_mst = 1, \
+ .display.has_psr = 1, \
+ .display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
@@ -444,14 +455,14 @@ static const struct intel_device_info intel_cherryview_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.num_pipes = 3,
- .has_hotplug = 1,
+ .display.has_hotplug = 1,
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
- .has_gmch_display = 1,
+ .display.has_gmch_display = 1,
.ppgtt = INTEL_PPGTT_FULL,
.has_reset_engine = 1,
.has_snoop = true,
@@ -473,15 +484,15 @@ static const struct intel_device_info intel_cherryview_info = {
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_guc = 1, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
.ddb_size = 896
#define SKL_PLATFORM \
GEN9_FEATURES, \
/* Display WA #0477 WaDisableIPC: skl */ \
- .has_ipc = 0, \
+ .display.has_ipc = 0, \
PLATFORM(INTEL_SKYLAKE)
static const struct intel_device_info intel_skylake_gt1_info = {
@@ -512,19 +523,19 @@ static const struct intel_device_info intel_skylake_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.num_pipes = 3, \
.has_64bit_reloc = 1, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_fbc = 1, \
- .has_psr = 1, \
+ .display.has_fbc = 1, \
+ .display.has_psr = 1, \
.has_runtime_pm = 1, \
.has_pooled_eu = 0, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_rc6 = 1, \
- .has_dp_mst = 1, \
+ .display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
@@ -532,7 +543,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 47baf2fe8f71..0a7d60509ca7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4570,6 +4570,7 @@ enum {
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_PPS_DATA_SIZE 132
#define VIDEO_DIP_CTL _MMIO(0x61170)
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -4617,6 +4618,17 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON (1 << 31)
+
+#define _PP_CONTROL_1 0xc7204
+#define _PP_CONTROL_2 0xc7304
+#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
+ _PP_CONTROL_2)
+#define POWER_CYCLE_DELAY_MASK (0x1f << 4)
+#define POWER_CYCLE_DELAY_SHIFT 4
+#define VDD_OVERRIDE_FORCE (1 << 3)
+#define BACKLIGHT_ENABLE (1 << 2)
+#define PWR_DOWN_ON_RESET (1 << 1)
+#define PWR_STATE_TARGET (1 << 0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -7750,6 +7762,7 @@ enum {
#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
#define ICP_DDIA_HPD_ENABLE (1 << 3)
+#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2)
#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
@@ -9197,6 +9210,7 @@ enum skl_power_gate {
#define _DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
#define DP_TP_CTL_ENABLE (1 << 31)
+#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
#define DP_TP_CTL_MODE_MST (1 << 27)
#define DP_TP_CTL_FORCE_ACT (1 << 25)
@@ -9215,6 +9229,7 @@ enum skl_power_gate {
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 71107540581d..ca95ab2f4cfa 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -136,6 +136,9 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
intel_engine_get_seqno(engine),
seqno);
+ if (seqno == engine->timeline.seqno)
+ continue;
+
kthread_park(engine->breadcrumbs.signaler);
if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6dbeed079ae5..fc2eeab823b7 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -1,10 +1,7 @@
/*
- * (C) Copyright 2016 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
+ * (C) Copyright 2016 Intel Corporation
*/
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index fe2ef4dadfc6..0e055ea0179f 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -1,10 +1,9 @@
/*
+ * SPDX-License-Identifier: MIT
+ *
* i915_sw_fence.h - library routines for N:M synchronisation points
*
* Copyright (C) 2016 Intel Corporation
- *
- * This file is released under the GPLv2.
- *
*/
#ifndef _I915_SW_FENCE_H_
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6bb2b05..535caebd9813 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -483,7 +483,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static const struct attribute *gen6_attrs[] = {
+static const struct attribute * const gen6_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -495,7 +495,7 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
-static const struct attribute *vlv_attrs[] = {
+static const struct attribute * const vlv_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -516,26 +516,21 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_i915_error_state_buf error_str;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_state *gpu;
ssize_t ret;
- ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
- if (ret)
- return ret;
-
- gpu = i915_first_error_state(dev_priv);
- ret = i915_error_state_to_str(&error_str, gpu);
- if (ret)
- goto out;
-
- ret = count < error_str.bytes ? count : error_str.bytes;
- memcpy(buf, error_str.buf, ret);
+ gpu = i915_first_error_state(i915);
+ if (gpu) {
+ ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_state_put(gpu);
+ } else {
+ const char *str = "No error state collected\n";
+ size_t len = strlen(str);
-out:
- i915_gpu_state_put(gpu);
- i915_error_state_buf_release(&error_str);
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 5858a43e19da..9726df37c4c4 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -44,16 +44,19 @@
__stringify(x), (long)(x))
#if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows(A, B) \
- __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
+#define add_overflows_t(T, A, B) \
+ __builtin_add_overflow_p((A), (B), (T)0)
#else
-#define add_overflows(A, B) ({ \
+#define add_overflows_t(T, A, B) ({ \
typeof(A) a = (A); \
typeof(B) b = (B); \
- a + b < a; \
+ (T)(a + b) < a; \
})
#endif
+#define add_overflows(A, B) \
+ add_overflows_t(typeof((A) + (B)), (A), (B))
+
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 01f422df8c23..4dd793b78996 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -26,6 +26,7 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_atomic_helper.h>
#include "intel_dsi.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
@@ -107,6 +108,90 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
}
}
+static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
+ u32 len)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ int free_credits;
+ int i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 tmp = 0;
+
+ free_credits = payload_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("Payload credit not available\n");
+ return false;
+ }
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ tmp |= *data++ << 8 * j;
+
+ I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+ }
+
+ return true;
+}
+
+static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt, bool enable_lpdt)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ u32 tmp;
+ int free_credits;
+
+ /* check if header credit available */
+ free_credits = header_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+ return -1;
+ }
+
+ tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+
+ if (pkt.payload)
+ tmp |= PAYLOAD_PRESENT;
+ else
+ tmp &= ~PAYLOAD_PRESENT;
+
+ tmp &= ~VBLANK_FENCE;
+
+ if (enable_lpdt)
+ tmp |= LP_DATA_TRANSFER;
+
+ tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
+ tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
+ tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
+ tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
+ tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
+ I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+
+ return 0;
+}
+
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt)
+{
+ /* payload queue can accept *256 bytes*, check limit */
+ if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
+ DRM_ERROR("payload size exceeds max queue limit\n");
+ return -1;
+ }
+
+ /* load data into command payload queue */
+ if (!add_payld_to_queue(host, pkt.payload,
+ pkt.payload_length)) {
+ DRM_ERROR("adding payload to queue failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -172,6 +257,45 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
}
}
+static void configure_dual_link_mode(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 dss_ctl1;
+
+ dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl1 |= SPLITTER_ENABLE;
+ dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+ dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ u32 dss_ctl2;
+ u16 hactive = adjusted_mode->crtc_hdisplay;
+ u16 dl_buffer_depth;
+
+ dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE;
+ dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
+
+ if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
+ DRM_ERROR("DL buffer depth exceed max value\n");
+
+ dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ I915_WRITE(DSS_CTL2, dss_ctl2);
+ } else {
+ /* Interleave */
+ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+ }
+
+ I915_WRITE(DSS_CTL1, dss_ctl1);
+}
+
static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -412,6 +536,62 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
}
}
+static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_map_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpll_lock);
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ }
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
+
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
static void
gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
@@ -506,7 +686,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
- //TODO: configure DSS_CTL1
+ /* configure stream splitting */
+ configure_dual_link_mode(encoder, pipe_config);
}
for_each_dsi_port(port, intel_dsi->ports) {
@@ -758,6 +939,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
/* Step (4h, 4i, 4j, 4k): Configure transcoder */
gen11_dsi_configure_transcoder(encoder, pipe_config);
+
+ /* Step 4l: Gate DDI clocks */
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -799,18 +983,25 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
wait_for_cmds_dispatched_to_panel(encoder);
}
-static void __attribute__((unused))
-gen11_dsi_pre_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
/* step3: enable DSI PLL */
gen11_dsi_program_esc_clk_div(encoder);
+}
+
+static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ /* step3b */
+ gen11_dsi_map_pll(encoder, pipe_config);
/* step4: enable DSI port and DPHY */
gen11_dsi_enable_port_and_phy(encoder, pipe_config);
@@ -912,6 +1103,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
u32 tmp;
enum port port;
+ gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(DDI_BUF_CTL(port));
tmp &= ~DDI_BUF_CTL_ENABLE;
@@ -923,6 +1115,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
DRM_ERROR("DDI port:%c buffer not idle\n",
port_name(port));
}
+ gen11_dsi_ungate_clocks(encoder);
}
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
@@ -945,10 +1138,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
}
}
-static void __attribute__((unused)) gen11_dsi_disable(
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void gen11_dsi_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -972,10 +1164,289 @@ static void __attribute__((unused)) gen11_dsi_disable(
gen11_dsi_disable_io_power(encoder);
}
+static void gen11_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 pll_id;
+
+ /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+}
+
+static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ const struct drm_display_mode *fixed_mode =
+ intel_connector->panel.fixed_mode;
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+ adjusted_mode->flags = 0;
+
+ /* Dual link goes to trancoder DSI'0' */
+ if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_1;
+ else
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+
+ pipe_config->clock_set = true;
+ pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+ return true;
+}
+
+static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u64 domains = 0;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports)
+ if (port == PORT_A)
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
+ else
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
+
+ return domains;
+}
+
+static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+ enum transcoder dsi_trans;
+ bool ret = false;
+
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
+ return false;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ default:
+ DRM_ERROR("Invalid PIPE input\n");
+ goto out;
+ }
+
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ ret = tmp & PIPECONF_ENABLE;
+ }
+out:
+ intel_display_power_put(dev_priv, encoder->power_domain);
+ return ret;
+}
+
+static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
+ .destroy = gen11_dsi_encoder_destroy,
+};
+
+static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = intel_dsi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+};
+
+static int gen11_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int gen11_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct mipi_dsi_packet dsi_pkt;
+ ssize_t ret;
+ bool enable_lpdt = false;
+
+ ret = mipi_dsi_create_packet(&dsi_pkt, msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ enable_lpdt = true;
+
+ /* send packet header */
+ ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt);
+ if (ret < 0)
+ return ret;
+
+ /* only long packet contains payload */
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ //TODO: add payload receive code if needed
+
+ ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
+ .attach = gen11_dsi_host_attach,
+ .detach = gen11_dsi_host_detach,
+ .transfer = gen11_dsi_host_transfer,
+};
+
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_display_mode *scan, *fixed_mode = NULL;
enum port port;
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return;
+ }
+
+ encoder = &intel_dsi->base;
+ intel_dsi->attached_connector = intel_connector;
+ connector = &intel_connector->base;
+
+ /* register DSI encoder with DRM subsystem */
+ drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
+
+ encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
+ encoder->pre_enable = gen11_dsi_pre_enable;
+ encoder->disable = gen11_dsi_disable;
+ encoder->port = port;
+ encoder->get_config = gen11_dsi_get_config;
+ encoder->compute_config = gen11_dsi_compute_config;
+ encoder->get_hw_state = gen11_dsi_get_hw_state;
+ encoder->type = INTEL_OUTPUT_DSI;
+ encoder->cloneable = 0;
+ encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ encoder->get_power_domains = gen11_dsi_get_power_domains;
+
+ /* register DSI connector with DRM subsystem */
+ drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ /* attach connector to encoder */
+ intel_connector_attach_encoder(intel_connector, encoder);
+
+ /* fill mode info from VBT */
+ mutex_lock(&dev->mode_config.mutex);
+ intel_dsi_vbt_get_modes(intel_dsi);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (!fixed_mode) {
+ DRM_ERROR("DSI fixed mode info missing\n");
+ goto err;
+ }
+
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+
+ if (dev_priv->vbt.dsi.config->dual_link)
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
+ else
+ intel_dsi->ports = BIT(port);
+
+ intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port);
+ if (!host)
+ goto err;
+
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+ DRM_DEBUG_KMS("no device found\n");
+ goto err;
+ }
+
+ return;
+
+err:
+ drm_encoder_cleanup(&encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index a5a2c8fe58a7..8cb02f28d30c 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -184,6 +184,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->fifo_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
+ crtc_state->update_planes = 0;
return &crtc_state->base;
}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 905f8ef3ba4f..0a73e6e65c20 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -139,6 +139,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
crtc_state->nv12_planes |= BIT(intel_plane->id);
+ if (state->visible || old_plane_state->base.visible)
+ crtc_state->update_planes |= BIT(intel_plane->id);
+
return intel_plane_atomic_calc_changes(old_crtc_state,
&crtc_state->base,
old_plane_state,
@@ -168,27 +171,75 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
to_intel_plane_state(new_plane_state));
}
-void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
- struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+static struct intel_plane *
+skl_next_plane_to_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES],
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
+ unsigned int *update_mask)
{
- struct intel_plane_state *new_plane_state;
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *plane_state;
struct intel_plane *plane;
- u32 update_mask;
int i;
- update_mask = old_crtc_state->active_planes;
- update_mask |= new_crtc_state->active_planes;
+ if (*update_mask == 0)
+ return NULL;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ enum plane_id plane_id = plane->id;
- for_each_new_intel_plane_in_state(old_state, plane, new_plane_state, i) {
if (crtc->pipe != plane->pipe ||
- !(update_mask & BIT(plane->id)))
+ !(*update_mask & BIT(plane_id)))
+ continue;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+ entries_y,
+ I915_MAX_PLANES, plane_id) ||
+ skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ entries_uv,
+ I915_MAX_PLANES, plane_id))
continue;
+ *update_mask &= ~BIT(plane_id);
+ entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
+ entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+ return plane;
+ }
+
+ /* should never happen */
+ WARN_ON(1);
+
+ return NULL;
+}
+
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES];
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane *plane;
+
+ memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_y));
+ memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
+
+ while ((plane = skl_next_plane_to_commit(state, crtc,
+ entries_y, entries_uv,
+ &update_mask))) {
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+
if (new_plane_state->base.visible) {
trace_intel_update_plane(&plane->base, crtc);
-
plane->update_plane(plane, new_crtc_state, new_plane_state);
} else if (new_plane_state->slave) {
struct intel_plane *master =
@@ -204,15 +255,38 @@ void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
* plane_state.
*/
new_plane_state =
- intel_atomic_get_new_plane_state(old_state, master);
+ intel_atomic_get_new_plane_state(state, master);
trace_intel_update_plane(&plane->base, crtc);
-
plane->update_slave(plane, new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
+ }
+ }
+}
+
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
- plane->disable_plane(plane, crtc);
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ if (new_plane_state->base.visible) {
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, new_crtc_state, new_plane_state);
+ } else {
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0694aa8bb9bc..6d3e0260d49c 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1752,7 +1752,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (!HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 84bf8d827136..447c5256f63a 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,11 +27,7 @@
#include "i915_drv.h"
-#ifdef CONFIG_SMP
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
-#else
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
-#endif
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
{
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ad11540ac436..f3e1d6a0b7dd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -28,6 +28,7 @@
#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_dsi.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -1363,8 +1364,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
-static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -2154,6 +2155,12 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
intel_port_is_tc(dev_priv, encoder->port))
domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
+ /*
+ * VDSC power is needed when DSC is enabled
+ */
+ if (crtc_state->dsc_params.compression_enable)
+ domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
+
return domains;
}
@@ -2785,77 +2792,54 @@ uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
return 0;
}
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void icl_map_plls_to_ports(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *conn_state;
- struct drm_connector *conn;
- int i;
-
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
- enum port port;
- uint32_t val;
-
- if (conn_state->crtc != crtc)
- continue;
-
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
+ enum port port = encoder->port;
+ u32 val;
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
+ mutex_lock(&dev_priv->dpll_lock);
- if (intel_port_is_combophy(dev_priv, port)) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
- }
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ if (intel_port_is_combophy(dev_priv, port)) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
-
- mutex_unlock(&dev_priv->dpll_lock);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
}
+
+ val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+ mutex_unlock(&dev_priv->dpll_lock);
}
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *old_conn_state;
- struct drm_connector *conn;
- int i;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(old_conn_state->best_encoder);
- enum port port;
+ mutex_lock(&dev_priv->dpll_lock);
- if (old_conn_state->crtc != crtc)
- continue;
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
- I915_WRITE(DPCLKA_CFGCR0_ICL,
- I915_READ(DPCLKA_CFGCR0_ICL) |
- icl_dpclka_cfgcr0_clk_off(dev_priv, port));
- mutex_unlock(&dev_priv->dpll_lock);
- }
+ mutex_unlock(&dev_priv->dpll_lock);
}
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
- enum port port = encoder->port;
- bool clk_enabled;
+ enum port port;
+ u32 port_mask;
+ bool ddi_clk_needed;
/*
* In case of DP MST, we sanitize the primary encoder only, not the
@@ -2864,9 +2848,6 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
if (encoder->type == INTEL_OUTPUT_DP_MST)
return;
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- clk_enabled = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, port));
-
if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
u8 pipe_mask;
bool is_mst;
@@ -2880,20 +2861,52 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
return;
}
- if (clk_enabled == !!encoder->base.crtc)
- return;
+ port_mask = BIT(encoder->port);
+ ddi_clk_needed = encoder->base.crtc;
- /*
- * Punt on the case now where clock is disabled, but the encoder is
- * enabled, something else is really broken then.
- */
- if (WARN_ON(!clk_enabled))
- return;
+ if (encoder->type == INTEL_OUTPUT_DSI) {
+ struct intel_encoder *other_encoder;
- DRM_NOTE("Port %c is disabled but it has a mapped PLL, unmap it\n",
- port_name(port));
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ port_mask = intel_dsi_encoder_ports(encoder);
+ /*
+ * Sanity check that we haven't incorrectly registered another
+ * encoder using any of the ports of this DSI encoder.
+ */
+ for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+ if (other_encoder == encoder)
+ continue;
+
+ if (WARN_ON(port_mask & BIT(other_encoder->port)))
+ return;
+ }
+ /*
+ * DSI ports should have their DDI clock ungated when disabled
+ * and gated when enabled.
+ */
+ ddi_clk_needed = !encoder->base.crtc;
+ }
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_port_masked(port, port_mask) {
+ bool ddi_clk_ungated = !(val &
+ icl_dpclka_cfgcr0_clk_off(dev_priv,
+ port));
+
+ if (ddi_clk_needed == ddi_clk_ungated)
+ continue;
+
+ /*
+ * Punt on the case now where clock is gated, but it would
+ * be needed by the port. Something else is really broken then.
+ */
+ if (WARN_ON(ddi_clk_needed))
+ continue;
+
+ DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ port_name(port));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ }
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -3096,6 +3109,53 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
I915_WRITE(MG_DP_MODE(port, 1), ln1);
}
+static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->fec_enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
+ DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+}
+
+static void intel_ddi_enable_fec(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val |= DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ 1))
+ DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+}
+
+static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+}
+
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3134,14 +3194,21 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
+ true);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
+ intel_ddi_enable_fec(encoder, crtc_state);
+
icl_enable_phy_clock_gating(dig_port);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
+
+ intel_dsc_enable(encoder, crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -3208,6 +3275,9 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
WARN_ON(crtc_state->has_pch_encoder);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_map_plls_to_ports(encoder, crtc_state);
+
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -3228,7 +3298,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
}
}
-static void intel_disable_ddi_buf(struct intel_encoder *encoder)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -3247,6 +3318,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder)
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(port), val);
+ /* Disable FEC in DP Sink */
+ intel_ddi_disable_fec_state(encoder, crtc_state);
+
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
}
@@ -3270,7 +3344,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
@@ -3293,7 +3367,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
@@ -3306,6 +3380,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
/*
* When called from DP MST code:
* - old_conn_state will be NULL
@@ -3325,6 +3401,9 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
else
intel_ddi_post_disable_dp(encoder,
old_crtc_state, old_conn_state);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_unmap_plls_to_ports(encoder);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3344,7 +3423,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_ddi_clk_disable(encoder);
val = I915_READ(FDI_RX_MISC(PIPE_A));
@@ -3491,6 +3570,9 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
+ /* Disable the decompression in DP Sink */
+ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
+ false);
}
static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index ceecb5bd5226..1e56319334f3 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -77,6 +77,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
+
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
+#undef PRINT_FLAG
}
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
@@ -782,7 +786,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
- } else if (info->num_pipes > 0 &&
+ } else if (HAS_DISPLAY(dev_priv) &&
(IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
@@ -807,7 +811,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
- } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+ } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 88f97210dc49..1caf24e2cf0b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -89,35 +89,38 @@ enum intel_ppgtt {
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
- func(has_csr); \
- func(has_ddi); \
- func(has_dp_mst); \
func(has_reset_engine); \
- func(has_fbc); \
func(has_fpga_dbg); \
- func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
- func(has_hotplug); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
- func(has_overlay); \
func(has_pooled_eu); \
- func(has_psr); \
func(has_rc6); \
func(has_rc6p); \
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
func(unfenced_needs_alignment); \
+ func(hws_needs_physical);
+
+#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
+ /* Keep in alphabetical order */ \
func(cursor_needs_physical); \
- func(hws_needs_physical); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_fbc); \
+ func(has_gmch_display); \
+ func(has_hotplug); \
+ func(has_ipc); \
+ func(has_overlay); \
+ func(has_psr); \
func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_ipc);
+ func(supports_tv);
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
@@ -172,6 +175,13 @@ struct intel_device_info {
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
+
+ struct {
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+ } display;
+
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 812ec5ae5c7b..07c861884c70 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2341,10 +2341,26 @@ static int intel_fb_offset_to_xy(int *x, int *y,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int height;
if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
- fb->offsets[color_plane] % intel_tile_size(dev_priv))
+ fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+ DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
return -EINVAL;
+ }
+
+ height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+ height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+ /* Catch potential overflows early */
+ if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane])) {
+ DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
+ return -ERANGE;
+ }
*x = 0;
*y = 0;
@@ -2767,7 +2783,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_pre_disable_primary_noatomic(&crtc->base);
trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc);
+ plane->disable_plane(plane, crtc_state);
}
static void
@@ -3315,7 +3331,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
u32 dspcntr = plane_state->ctl;
- i915_reg_t reg = DSPCNTR(i9xx_plane);
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
unsigned long irqflags;
@@ -3330,47 +3345,51 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+
if (INTEL_GEN(dev_priv) < 4) {
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
+ I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
I915_WRITE_FW(DSPSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
- I915_WRITE_FW(reg, dspcntr);
-
- I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
+ I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
+ I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
- } else {
+ else
I915_WRITE_FW(DSPADDR(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- }
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_plane(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -3456,6 +3475,21 @@ static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
}
}
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int rotation)
+{
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ return 64;
+ else if (drm_rotation_90_or_270(rotation))
+ return intel_tile_height(fb, color_plane);
+ else
+ return intel_tile_width_bytes(fb, color_plane);
+}
+
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int color_plane)
{
@@ -3466,16 +3500,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
if (color_plane >= fb->format->num_planes)
return 0;
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- if (drm_rotation_90_or_270(rotation))
- stride /= intel_tile_height(fb, color_plane);
- else
- stride /= intel_fb_stride_alignment(fb, color_plane);
-
- return stride;
+ return stride / skl_plane_stride_mult(fb, color_plane, rotation);
}
static u32 skl_plane_ctl_format(uint32_t pixel_format)
@@ -5403,23 +5428,32 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(crtc);
}
-static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask)
+static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int update_mask = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
struct intel_plane *plane;
unsigned fb_bits = 0;
+ int i;
intel_crtc_dpms_overlay_disable(crtc);
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- if (plane_mask & BIT(plane->id)) {
- plane->disable_plane(plane, crtc);
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+ plane->disable_plane(plane, new_crtc_state);
+
+ if (old_plane_state->base.visible)
fb_bits |= plane->frontbuffer_bit;
- }
}
- intel_frontbuffer_flip(to_i915(dev), fb_bits);
+ intel_frontbuffer_flip(dev_priv, fb_bits);
}
static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5692,9 +5726,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (pipe_config->shared_dpll)
intel_enable_shared_dpll(pipe_config);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_map_plls_to_ports(crtc, pipe_config, old_state);
-
intel_encoders_pre_enable(crtc, pipe_config, old_state);
if (intel_crtc_has_dp_encoder(pipe_config))
@@ -5889,6 +5920,8 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_dsc_disable(old_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
@@ -5896,9 +5929,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
-
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
}
@@ -6724,7 +6754,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
}
void
-intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n)
@@ -8939,7 +8969,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, plane_id));
- stride_mult = intel_fb_stride_alignment(fb, 0);
+ stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
@@ -9303,10 +9333,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->base.state);
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+ IS_ICELAKE(dev_priv)) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
@@ -9444,11 +9476,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
+ unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+ unsigned long enabled_panel_transcoders = 0;
+ enum transcoder panel_transcoder;
u32 tmp;
+ if (IS_ICELAKE(dev_priv))
+ panel_transcoder_mask |=
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
- * transcoder handled below.
+ * and DSI transcoders handled below.
*/
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -9456,29 +9495,49 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if (tmp & TRANS_DDI_FUNC_ENABLE) {
- enum pipe trans_edp_pipe;
+ for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+ enum pipe trans_pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+ if (!(tmp & TRANS_DDI_FUNC_ENABLE))
+ continue;
+
+ /*
+ * Log all enabled ones, only use the first one.
+ *
+ * FIXME: This won't work for two separate DSI displays.
+ */
+ enabled_panel_transcoders |= BIT(panel_transcoder);
+ if (enabled_panel_transcoders != BIT(panel_transcoder))
+ continue;
+
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- WARN(1, "unknown pipe linked to edp transcoder\n");
+ WARN(1, "unknown pipe linked to transcoder %s\n",
+ transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
- trans_edp_pipe = PIPE_A;
+ trans_pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
- trans_edp_pipe = PIPE_B;
+ trans_pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
- trans_edp_pipe = PIPE_C;
+ trans_pipe = PIPE_C;
break;
}
- if (trans_edp_pipe == crtc->pipe)
- pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ if (trans_pipe == crtc->pipe)
+ pipe_config->cpu_transcoder = panel_transcoder;
}
+ /*
+ * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+ */
+ WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+ enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
@@ -9611,7 +9670,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!active)
goto out;
- if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+ IS_ICELAKE(dev_priv)) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@@ -9667,7 +9727,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
- if (INTEL_INFO(dev_priv)->cursor_needs_physical)
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
base = obj->phys_handle->busaddr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -9894,9 +9954,9 @@ static void i845_update_cursor(struct intel_plane *plane,
}
static void i845_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i845_update_cursor(plane, NULL, NULL);
+ i845_update_cursor(plane, crtc_state, NULL);
}
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@ -10087,8 +10147,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
* Without the CURCNTR write the CURPOS write would
- * arm itself. Thus we always start the full update
- * with a CURCNTR write.
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
*
* On other platforms CURPOS always requires the
* CURBASE write to arm the update. Additonally
@@ -10098,15 +10158,20 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* cursor that doesn't appear to move, or even change
* shape. Thus we always write CURBASE.
*
- * CURCNTR and CUR_FBC_CTL are always
- * armed by the CURBASE write only.
+ * The other registers are armed by by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
*/
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
- I915_WRITE_FW(CURCNTR(pipe), cntl);
if (HAS_CUR_FBC(dev_priv))
I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+ I915_WRITE_FW(CURCNTR(pipe), cntl);
I915_WRITE_FW(CURPOS(pipe), pos);
I915_WRITE_FW(CURBASE(pipe), base);
@@ -10122,9 +10187,9 @@ static void i9xx_update_cursor(struct intel_plane *plane,
}
static void i9xx_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i9xx_update_cursor(plane, NULL, NULL);
+ i9xx_update_cursor(plane, crtc_state, NULL);
}
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@ -10835,8 +10900,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
continue;
plane_state->linked_plane = NULL;
- if (plane_state->slave && !plane_state->base.visible)
+ if (plane_state->slave && !plane_state->base.visible) {
crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->update_planes |= BIT(plane->id);
+ }
plane_state->slave = false;
}
@@ -10877,6 +10944,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->slave = true;
linked_state->linked_plane = plane;
crtc_state->active_planes |= BIT(linked->id);
+ crtc_state->update_planes |= BIT(linked->id);
DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
}
@@ -11887,6 +11955,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -11897,6 +11967,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+ skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -11939,8 +12011,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][plane];
- sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+ hw_ddb_entry = &hw_ddb_y[plane];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -11989,8 +12061,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12668,7 +12740,6 @@ static void intel_update_crtc(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state);
struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
bool modeset = needs_modeset(new_crtc_state);
struct intel_plane_state *new_plane_state =
@@ -12691,8 +12762,10 @@ static void intel_update_crtc(struct drm_crtc *crtc,
intel_begin_crtc_commit(crtc, old_crtc_state);
- intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc,
- old_intel_cstate, pipe_config);
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ else
+ i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
intel_finish_crtc_commit(crtc, old_crtc_state);
}
@@ -12885,7 +12958,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
if (old_crtc_state->active) {
- intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes);
+ intel_crtc_disable_planes(intel_state, intel_crtc);
/*
* We need to disable pipe CRC before disabling the pipe,
@@ -13240,7 +13313,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
if (plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int align = intel_cursor_alignment(dev_priv);
int err;
@@ -13735,7 +13808,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
to_intel_plane_state(plane->state));
} else {
trace_intel_disable_plane(plane, to_intel_crtc(crtc));
- intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+ intel_plane->disable_plane(intel_plane, crtc_state);
}
intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -14186,7 +14259,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return;
/*
@@ -14451,7 +14524,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_framebuffer *fb = &intel_fb->base;
- struct drm_format_name_buf format_name;
u32 pitch_limit;
unsigned int tiling, stride;
int ret = -EINVAL;
@@ -14482,39 +14554,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
}
- /* Passed in modifier sanity checking. */
- switch (mode_cmd->modifier[0]) {
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- default:
- DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
- goto err;
- }
- /* fall through */
- case I915_FORMAT_MOD_Yf_TILED:
- if (mode_cmd->pixel_format == DRM_FORMAT_C8) {
- DRM_DEBUG_KMS("Indexed format does not support Yf tiling\n");
- goto err;
- }
- /* fall through */
- case I915_FORMAT_MOD_Y_TILED:
- if (INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
- mode_cmd->modifier[0]);
- goto err;
- }
- break;
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
mode_cmd->modifier[0]);
goto err;
}
@@ -14549,69 +14596,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- /* Reject formats not supported by any plane early. */
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- case DRM_FORMAT_XRGB1555:
- if (INTEL_GEN(dev_priv) > 3) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR8888:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- if (INTEL_GEN(dev_priv) < 4) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR2101010:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_VYUY:
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_NV12:
- if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
- IS_BROXTON(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name));
- goto err;
- }
- break;
- default:
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
-
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0)
goto err;
@@ -16066,7 +16050,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
};
int i;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 5f2955b944da..4262452963b3 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -242,6 +242,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_EDP_VDSC,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
@@ -398,6 +399,14 @@ struct intel_link_m_n {
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
+#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -423,10 +432,18 @@ struct intel_link_m_n {
(__i)++) \
for_each_if(plane)
-void intel_link_compute_m_n(int bpp, int nlanes,
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n);
-
bool is_ccs_modifier(u64 modifier);
#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 7699f9b7b2d2..fdd2cbc56fa3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -47,6 +47,8 @@
/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
+#define DP_DSC_MIN_SUPPORTED_BPC 8
+#define DP_DSC_MAX_SUPPORTED_BPC 10
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
@@ -543,7 +545,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
dsc_slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
- } else {
+ } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
dsc_max_output_bpp =
intel_dp_dsc_get_output_bpp(max_link_clock,
max_lanes,
@@ -1708,6 +1710,41 @@ struct link_config_limits {
int min_bpp, max_bpp;
};
+static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 11 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_fec(intel_dp->fec_capable);
+}
+
+static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 10 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+ return false;
+
+ return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
+}
+
static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
@@ -1842,14 +1879,122 @@ intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
return false;
}
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
+{
+ int i, num_bpc;
+ u8 dsc_bpc[3] = {0};
+
+ num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+ dsc_bpc);
+ for (i = 0; i < num_bpc; i++) {
+ if (dsc_max_bpc >= dsc_bpc[i])
+ return dsc_bpc[i] * 3;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ u8 dsc_max_bpc;
+ int pipe_bpp;
+
+ if (!intel_dp_supports_dsc(intel_dp, pipe_config))
+ return false;
+
+ dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
+ conn_state->max_requested_bpc);
+
+ pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
+ if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+ DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+ return false;
+ }
+
+ /*
+ * For now enable DSC for max bpp, max link rate, max lane count.
+ * Optimize this later for the minimum possible link rate/lane count
+ * with DSC enabled for the requested mode.
+ */
+ pipe_config->pipe_bpp = pipe_bpp;
+ pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
+ pipe_config->lane_count = limits->max_lane_count;
+
+ if (intel_dp_is_edp(intel_dp)) {
+ pipe_config->dsc_params.compressed_bpp =
+ min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ } else {
+ u16 dsc_max_output_bpp;
+ u8 dsc_dp_slice_count;
+
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ dsc_dp_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+ DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+ return false;
+ }
+ pipe_config->dsc_params.compressed_bpp = min_t(u16,
+ dsc_max_output_bpp >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+ }
+ /*
+ * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
+ * is greater than the maximum Cdclock and if slice count is even
+ * then we need to use 2 VDSC instances.
+ */
+ if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
+ if (pipe_config->dsc_params.slice_count > 1) {
+ pipe_config->dsc_params.dsc_split = true;
+ } else {
+ DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+ return false;
+ }
+ }
+ if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) {
+ DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
+ "Compressed BPP = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
+ return false;
+ }
+ pipe_config->dsc_params.compression_enable = true;
+ DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
+ "Compressed Bpp = %d Slice Count = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp,
+ pipe_config->dsc_params.slice_count);
+
+ return true;
+}
+
static bool
intel_dp_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct link_config_limits limits;
int common_len;
+ bool ret;
common_len = intel_dp_common_len_rate_limit(intel_dp,
intel_dp->max_link_rate);
@@ -1888,7 +2033,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->common_rates[limits.max_clock],
limits.max_bpp, adjusted_mode->crtc_clock);
- if (intel_dp_is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp))
/*
* Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
* section A.1: "It is recommended that the minimum number of
@@ -1898,26 +2043,42 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
* Note that we use the max clock and lane count for eDP 1.3 and
* earlier, and fast vs. wide is irrelevant.
*/
- if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config,
- &limits))
- return false;
- } else {
+ ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
+ &limits);
+ else
/* Optimize for slow and wide. */
- if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config,
- &limits))
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
+ &limits);
+
+ /* enable compression if the mode doesn't fit available BW */
+ if (!ret) {
+ if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
+ conn_state, &limits))
return false;
}
- DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
+ if (pipe_config->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->dsc_params.compressed_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ } else {
+ DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ }
return true;
}
@@ -1983,7 +2144,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
- if (!intel_dp_compute_link_config(encoder, pipe_config))
+ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, pipe_config);
+
+ if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
return false;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
@@ -2001,11 +2165,20 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
- intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ if (!pipe_config->dsc_params.compression_enable)
+ intel_link_compute_m_n(pipe_config->pipe_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
+ else
+ intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2702,6 +2875,22 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ int ret;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
+ enable ? DP_DECOMPRESSION_EN : 0);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
+ enable ? "enable" : "disable");
+}
+
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
@@ -3837,15 +4026,14 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
(int)sizeof(intel_dp->dsc_dpcd),
intel_dp->dsc_dpcd);
+
/* FEC is supported only on DP 1.4 */
- if (!intel_dp_is_edp(intel_dp)) {
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
- &intel_dp->fec_capable) < 0)
- DRM_ERROR("Failed to read FEC DPCD register\n");
+ if (!intel_dp_is_edp(intel_dp) &&
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
+ &intel_dp->fec_capable) < 0)
+ DRM_ERROR("Failed to read FEC DPCD register\n");
- DRM_DEBUG_KMS("FEC CAPABILITY: %x\n",
- intel_dp->fec_capable);
- }
+ DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
}
}
@@ -3936,8 +4124,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
- u8 sink_count;
-
if (!intel_dp_read_dpcd(intel_dp))
return false;
@@ -3947,25 +4133,35 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
}
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
- return false;
-
/*
- * Sink count can change between short pulse hpd hence
- * a member variable in intel_dp will track any changes
- * between short pulse interrupts.
+ * Some eDP panels do not set a valid value for sink count, that is why
+ * it don't care about read it here and in intel_edp_init_dpcd().
*/
- intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
+ if (!intel_dp_is_edp(intel_dp)) {
+ u8 count;
+ ssize_t r;
- /*
- * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
- * a dongle is present but no display. Unless we require to know
- * if a dongle is present or not, we don't need to update
- * downstream port information. So, an early return here saves
- * time from performing other operations which are not required.
- */
- if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
- return false;
+ r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
+ if (r < 1)
+ return false;
+
+ /*
+ * Sink count can change between short pulse hpd hence
+ * a member variable in intel_dp will track any changes
+ * between short pulse interrupts.
+ */
+ intel_dp->sink_count = DP_GET_SINK_COUNT(count);
+
+ /*
+ * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+ * a dongle is present but no display. Unless we require to know
+ * if a dongle is present or not, we don't need to update
+ * downstream port information. So, an early return here saves
+ * time from performing other operations which are not required.
+ */
+ if (!intel_dp->sink_count)
+ return false;
+ }
if (!drm_dp_is_branch(intel_dp->dpcd))
return true; /* native DP sink */
@@ -4375,6 +4571,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
if (!intel_dp->link_trained)
return false;
+ /*
+ * While PSR source HW is enabled, it will control main-link sending
+ * frames, enabling and disabling it so trying to do a retrain will fail
+ * as the link would or not be on or it could mix training patterns
+ * and frame data at the same time causing retrain to fail.
+ * Also when exiting PSR, HW will retrain the link anyways fixing
+ * any link status error.
+ */
+ if (intel_psr_enabled(intel_dp))
+ return false;
+
if (!intel_dp_get_link_status(intel_dp, link_status))
return false;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 901e15063b24..d513ca875c67 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2523,7 +2523,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
if (intel_port_is_tc(dev_priv, encoder->port))
ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
else
ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a7d9ac912125..f94a04b4ad87 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -706,6 +706,8 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
+ struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
} skl;
struct {
@@ -926,6 +928,9 @@ struct intel_crtc_state {
u8 active_planes;
u8 nv12_planes;
+ /* bitmask of planes that will be updated during the commit */
+ u8 update_planes;
+
/* HDMI scrambling status */
bool hdmi_scrambling;
@@ -937,6 +942,18 @@ struct intel_crtc_state {
/* Output down scaling is done in LSPCON device */
bool lspcon_downsampling;
+
+ /* Display Stream compression state */
+ struct {
+ bool compression_enable;
+ bool dsc_split;
+ u16 compressed_bpp;
+ u8 slice_count;
+ } dsc_params;
+ struct drm_dsc_config dp_dsc_cfg;
+
+ /* Forward Error correction State */
+ bool fec_enable;
};
struct intel_crtc {
@@ -1013,7 +1030,7 @@ struct intel_plane {
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
- struct intel_crtc *crtc);
+ const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -1517,13 +1534,9 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
@@ -1788,6 +1801,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
@@ -1843,6 +1859,12 @@ uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
int mode_hdisplay);
+/* intel_vdsc.c */
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
+
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
return ~((1 << lane_count) - 1) & 0xf;
@@ -2047,6 +2069,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
/* intel_quirks.c */
void intel_init_quirks(struct drm_i915_private *dev_priv);
@@ -2181,6 +2204,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
@@ -2195,6 +2221,10 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry entries[],
int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
@@ -2287,10 +2317,10 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
- struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state);
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index ee93137f4433..d968f1f13e09 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -146,6 +146,11 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
+static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
+{
+ return enc_to_intel_dsi(&encoder->base)->ports;
+}
+
/* intel_dsi.c */
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index a72de81f4832..a1a8b3790e61 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -103,6 +103,18 @@ static struct gpio_map vlv_gpio_table[] = {
#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
#define CHV_GPIO_CFGLOCK (1 << 31)
+/* ICL DSI Display GPIO Pins */
+#define ICL_GPIO_DDSP_HPD_A 0
+#define ICL_GPIO_L_VDDEN_1 1
+#define ICL_GPIO_L_BKLTEN_1 2
+#define ICL_GPIO_DDPA_CTRLCLK_1 3
+#define ICL_GPIO_DDPA_CTRLDATA_1 4
+#define ICL_GPIO_DDSP_HPD_B 5
+#define ICL_GPIO_L_VDDEN_2 6
+#define ICL_GPIO_L_BKLTEN_2 7
+#define ICL_GPIO_DDPA_CTRLCLK_2 8
+#define ICL_GPIO_DDPA_CTRLDATA_2 9
+
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
@@ -324,6 +336,12 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
gpiod_set_value(gpio_desc, value);
}
+static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -347,7 +365,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
- if (IS_VALLEYVIEW(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
else if (IS_CHERRYVIEW(dev_priv))
chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 759c0fd58f8c..af2873403009 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -493,46 +493,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- WARN_ON(engine->scratch);
-
- obj = i915_gem_object_create_stolen(engine->i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(engine->i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
-
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- engine->scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
- i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
static void cleanup_status_page(struct intel_engine_cs *engine)
{
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -707,8 +667,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- intel_engine_cleanup_scratch(engine);
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -723,6 +681,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
__intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
+
+ intel_wa_list_free(&engine->ctx_wa_list);
+ intel_wa_list_free(&engine->wa_list);
+ intel_wa_list_free(&engine->whitelist);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 14cbaf4a0e93..f23570c44323 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1309,7 +1309,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->active = false;
if (need_fbc_vtd_wa(dev_priv))
- mkwrite_device_info(dev_priv)->has_fbc = false;
+ mkwrite_device_info(dev_priv)->display.has_fbc = false;
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 2480c7d6edee..fb5bb5b32a60 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -672,7 +672,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
+ if (WARN_ON(!HAS_DISPLAY(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e2c6a2b3e8f2..07e803a604bd 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -115,6 +115,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
+ case DP_SDP_PPS:
+ return VDIP_ENABLE_PPS;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,6 +138,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
switch (type) {
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+ case DP_SDP_PPS:
+ return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
@@ -148,6 +152,18 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
}
+static int hsw_dip_data_size(unsigned int type)
+{
+ switch (type) {
+ case DP_SDP_VSC:
+ return VIDEO_DIP_VSC_DATA_SIZE;
+ case DP_SDP_PPS:
+ return VIDEO_DIP_PPS_DATA_SIZE;
+ default:
+ return VIDEO_DIP_DATA_SIZE;
+ }
+}
+
static void g4x_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
@@ -382,11 +398,12 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- int data_size = type == DP_SDP_VSC ?
- VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
+ int data_size;
int i;
u32 val = I915_READ(ctl_reg);
+ data_size = hsw_dip_data_size(type);
+
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 33d87ab93fdd..802d0394ccc4 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -817,7 +817,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 08fd9b12e4d7..d7fa301b5ec7 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -767,6 +767,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
+ const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
+
/*
* After a reset, the HW starts writing into CSB entry [0]. We
* therefore have to set our HEAD pointer back one entry so that
@@ -776,8 +778,8 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
* inline comparison of our cached head position against the last HW
* write works even before the first interrupt.
*/
- execlists->csb_head = execlists->csb_write_reset;
- WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
}
static void nop_submission_tasklet(unsigned long data)
@@ -818,8 +820,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (!i915_request_completed(rq))
- dma_fence_set_error(&rq->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ continue;
+
+ dma_fence_set_error(&rq->fence, -EIO);
}
/* Flush the queued requests to the timeline list (for retiring). */
@@ -839,6 +844,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
kmem_cache_free(engine->i915->priorities, p);
}
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority = INT_MIN;
@@ -1279,9 +1288,10 @@ static int execlists_request_alloc(struct i915_request *request)
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
+ /* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1295,7 +1305,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
return batch;
@@ -1332,7 +1342,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch) +
+ i915_scratch_offset(engine->i915) +
2 * CACHELINE_BYTES);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1401,18 +1411,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
- /* WaClearSlmSpaceAtContextSwitch:kbl */
- /* Actual scratch location is at 128 bytes offset */
- if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
- batch = gen8_emit_pipe_control(batch,
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch)
- + 2 * CACHELINE_BYTES);
- }
-
/* WaMediaPoolStateCmdInWABB:bxt,glk */
if (HAS_POOLED_EU(engine->i915)) {
/*
@@ -1629,6 +1627,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
+ intel_engine_apply_workarounds(engine);
+
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -1653,7 +1653,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -1676,7 +1676,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
return 0;
}
@@ -1974,7 +1974,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
- i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2088,7 +2088,7 @@ static int gen8_init_rcs_context(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret)
return ret;
@@ -2229,12 +2229,6 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_irqs(engine);
}
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
- /* Older GVT emulation depends upon intercepting CSB mmio */
- return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
-}
-
static int logical_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -2264,24 +2258,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
upper_32_bits(ce->lrc_desc);
}
- execlists->csb_read =
- i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
- if (csb_force_mmio(i915)) {
- execlists->csb_status = (u32 __force *)
- (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+ execlists->csb_status =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
- execlists->csb_write = (u32 __force *)execlists->csb_read;
- execlists->csb_write_reset =
- _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
- GEN8_CSB_ENTRIES - 1);
- } else {
- execlists->csb_status =
- &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ execlists->csb_write =
+ &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write =
- &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
- }
reset_csb_pointers(execlists);
return 0;
@@ -2311,10 +2293,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- goto err_cleanup_common;
-
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@@ -2326,11 +2304,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- return 0;
+ intel_engine_init_whitelist(engine);
+ intel_engine_init_workarounds(engine);
-err_cleanup_common:
- intel_engine_cleanup_common(engine);
- return ret;
+ return 0;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index d84b6d2d2fae..4aa68ffbd30e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -87,12 +87,12 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
{
}
-void intel_opregion_resume(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
{
}
-void intel_opregion_suspend(struct drm_i915_private *dev_priv,
- pci_power_t state)
+static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state)
{
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 897a791662c5..a26b4eddda25 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3951,68 +3951,68 @@ static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
const enum pipe pipe,
const enum plane_id plane_id,
- struct skl_ddb_allocation *ddb /* out */)
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- u32 val, val2 = 0;
- int fourcc, pixel_format;
+ u32 val, val2;
+ u32 fourcc = 0;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = I915_READ(CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
val = I915_READ(PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
- if (!(val & PLANE_CTL_ENABLE))
- return;
-
- pixel_format = val & PLANE_CTL_FORMAT_MASK;
- fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
+ if (val & PLANE_CTL_ENABLE)
+ fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ } else {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val2);
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->uv_plane[pipe][plane_id], val);
- } else {
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ if (fourcc == DRM_FORMAT_NV12)
+ swap(val, val2);
+
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
}
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- struct intel_crtc *crtc;
-
- memset(ddb, 0, sizeof(*ddb));
-
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum intel_display_power_domain power_domain;
- enum plane_id plane_id;
- enum pipe pipe = crtc->pipe;
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return;
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
- continue;
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(dev_priv, pipe,
+ plane_id,
+ &ddb_y[plane_id],
+ &ddb_uv[plane_id]);
- for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(dev_priv, pipe,
- plane_id, ddb);
+ intel_display_power_put(dev_priv, power_domain);
+}
- intel_display_power_put(dev_priv, power_domain);
- }
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
}
/*
@@ -4410,7 +4410,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
uint16_t alloc_size, start;
uint16_t minimum[I915_MAX_PLANES] = {};
@@ -4423,8 +4422,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t total_min_blocks = 0;
/* Clear the partitioning for disabled planes. */
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
+ memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
+ memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
@@ -4471,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
}
alloc_size -= total_min_blocks;
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
/*
* 2. Distribute the remaining space in proportion to the amount of
@@ -4503,8 +4502,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/* Leave disabled planes at (0,0) */
if (data_rate) {
- ddb->plane[pipe][plane_id].start = start;
- ddb->plane[pipe][plane_id].end = start + plane_blocks;
+ cstate->wm.skl.plane_ddb_y[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
}
start += plane_blocks;
@@ -4519,8 +4518,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
if (uv_data_rate) {
- ddb->uv_plane[pipe][plane_id].start = start;
- ddb->uv_plane[pipe][plane_id].end =
+ cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_uv[plane_id].end =
start + uv_plane_blocks;
}
@@ -4617,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
}
static int
-skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
- struct skl_wm_params *wp, int plane_id)
+ struct skl_wm_params *wp, int color_plane)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
uint32_t interm_pbpl;
@@ -4630,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- if (!intel_wm_plane_visible(cstate, intel_pstate))
- return 0;
-
/* only NV12 format has two planes */
- if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
+ if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
DRM_DEBUG_KMS("Non NV12 format have single plane\n");
return -EINVAL;
}
@@ -4659,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
}
- if (plane_id == 1 && wp->is_planar)
+ if (color_plane == 1 && wp->is_planar)
wp->width /= 2;
- wp->cpp = fb->format->cpp[plane_id];
+ wp->cpp = fb->format->cpp[color_plane];
wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
intel_pstate);
@@ -4724,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
uint16_t ddb_allocation,
int level,
@@ -4733,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
@@ -4743,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
uint32_t min_disp_buf_needed;
- if (latency == 0 ||
- !intel_wm_plane_visible(cstate, intel_pstate)) {
- result->plane_en = false;
- return 0;
- }
+ if (latency == 0)
+ return level == 0 ? -EINVAL : 0;
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4844,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if ((level > 0 && res_lines > 31) ||
res_blocks >= ddb_allocation ||
min_disp_buf_needed >= ddb_allocation) {
- result->plane_en = false;
-
/*
* If there are no valid level 0 watermarks, then we can't
* support this display configuration.
@@ -4872,27 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
static int
-skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
- const struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
uint16_t ddb_blocks,
const struct skl_wm_params *wm_params,
- struct skl_plane_wm *wm,
struct skl_wm_level *levels)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
int ret;
- if (WARN_ON(!intel_pstate->base.fb))
- return -EINVAL;
-
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
+ ret = skl_compute_plane_wm(cstate,
intel_pstate,
ddb_blocks,
level,
@@ -4905,9 +4892,6 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
result_prev = result;
}
- if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
- wm->is_planar = true;
-
return 0;
}
@@ -4935,10 +4919,9 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
}
static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
- struct skl_wm_params *wp,
- struct skl_wm_level *wm_l0,
- uint16_t ddb_allocation,
- struct skl_wm_level *trans_wm /* out */)
+ const struct skl_wm_params *wp,
+ struct skl_plane_wm *wm,
+ uint16_t ddb_allocation)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4946,16 +4929,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
const uint16_t trans_amount = 10; /* This is configurable amount */
uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
- if (!cstate->base.active)
- goto exit;
-
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
- goto exit;
+ return;
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
- goto exit;
+ return;
trans_min = 14;
if (INTEL_GEN(dev_priv) >= 11)
@@ -4973,7 +4953,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
* Result Blocks is Result Blocks minus 1 and it should work for the
* current platforms.
*/
- wm0_sel_res_b = wm_l0->plane_res_b - 1;
+ wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
if (wp->y_tiled) {
trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
@@ -4992,107 +4972,129 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
res_blocks += 1;
if (res_blocks < ddb_allocation) {
- trans_wm->plane_res_b = res_blocks;
- trans_wm->plane_en = true;
- return;
+ wm->trans_wm.plane_res_b = res_blocks;
+ wm->trans_wm.plane_en = true;
}
-
-exit:
- trans_wm->plane_en = false;
}
-static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- int color_plane)
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id, int color_plane)
{
- struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
- struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
- enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
struct skl_wm_params wm_params;
- uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
int ret;
- ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
&wm_params, color_plane);
if (ret)
return ret;
- ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
- ddb_blocks, &wm_params, wm, wm->wm);
-
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->wm);
if (ret)
return ret;
- skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
- ddb_blocks, &wm->trans_wm);
+ skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
return 0;
}
-static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
-{
- enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
-
- return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
-}
-
-static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id)
{
- struct intel_plane *plane = to_intel_plane(pstate->base.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
struct skl_wm_params wm_params;
- enum pipe pipe = plane->pipe;
- uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
int ret;
- ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
- if (ret)
- return ret;
+ wm->is_planar = true;
/* uv plane watermarks must also be validated for NV12/Planar */
- ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, 1);
+ if (ret)
+ return ret;
- ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1);
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->uv_wm);
if (ret)
return ret;
- return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
- ddb_blocks, &wm_params, wm, wm->uv_wm);
+ return 0;
}
-static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id plane_id = plane->id;
int ret;
- enum plane_id y_plane_id = pstate->linked_plane->id;
- enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id;
- ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id,
- cstate, pstate, 0);
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
if (ret)
return ret;
- return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id,
- cstate, pstate, 1);
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+ plane_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+ int ret;
+
+ /* Watermarks calculated in master */
+ if (plane_state->slave)
+ return 0;
+
+ if (plane_state->linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id y_plane_id = plane_state->linked_plane->id;
+
+ WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
+ WARN_ON(!fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ y_plane_id, 0);
+ if (ret)
+ return ret;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_ddb_allocation *ddb,
struct skl_pipe_wm *pipe_wm)
{
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
@@ -5108,18 +5110,12 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate =
to_intel_plane_state(pstate);
- /* Watermarks calculated in master */
- if (intel_pstate->slave)
- continue;
-
- if (intel_pstate->linked_plane)
- ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
- else if (intel_pstate->base.fb &&
- intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
- ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
+ if (INTEL_GEN(dev_priv) >= 11)
+ ret = icl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
else
- ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate);
-
+ ret = skl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
if (ret)
return ret;
}
@@ -5134,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
- I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+ I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
else
- I915_WRITE(reg, 0);
+ I915_WRITE_FW(reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5151,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
}
- I915_WRITE(reg, val);
+ I915_WRITE_FW(reg, val);
}
-static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb,
- enum plane_id plane_id)
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5172,29 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
- if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->uv_plane[pipe][plane_id]);
+ if (INTEL_GEN(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- } else {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- if (INTEL_GEN(dev_priv) < 11)
- I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ return;
}
+
+ if (wm->is_planar)
+ swap(ddb_y, ddb_uv);
+
+ skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
}
-static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb)
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -5202,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
}
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &ddb->plane[pipe][PLANE_CURSOR]);
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
- if (l1->plane_en != l2->plane_en)
- return false;
+ return l1->plane_en == l2->plane_en &&
+ l1->plane_res_l == l2->plane_res_l &&
+ l1->plane_res_b == l2->plane_res_b;
+}
- /* If both planes aren't enabled, the rest shouldn't matter */
- if (!l1->plane_en)
- return true;
+static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
+ const struct skl_plane_wm *wm1,
+ const struct skl_plane_wm *wm2)
+{
+ int level, max_level = ilk_wm_max_level(dev_priv);
- return (l1->plane_res_l == l2->plane_res_l &&
- l1->plane_res_b == l2->plane_res_b);
+ for (level = 0; level <= max_level; level++) {
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
+ !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ return false;
+ }
+
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -5244,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
- struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
- ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+ ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
if (ret)
return ret;
@@ -5276,42 +5285,29 @@ pipes_modified(struct drm_atomic_state *state)
}
static int
-skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct drm_device *dev = state->dev;
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
- struct drm_plane *plane;
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
- struct drm_plane_state *plane_state;
- struct intel_plane *linked;
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
- if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
- &new_ddb->plane[pipe][plane_id]) &&
- skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
- &new_ddb->uv_plane[pipe][plane_id]))
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
continue;
- plane_state = drm_atomic_get_plane_state(state, plane);
+ plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
- /* Make sure linked plane is updated too */
- linked = to_intel_plane_state(plane_state)->linked_plane;
- if (!linked)
- continue;
-
- plane_state = drm_atomic_get_plane_state(state, &linked->base);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
+ new_crtc_state->update_planes |= BIT(plane_id);
}
return 0;
@@ -5323,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
const struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
int ret, i;
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
- ret = skl_allocate_pipe_ddb(cstate, ddb);
+ for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
return ret;
- ret = skl_ddb_add_affected_planes(cstate);
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
if (ret)
return ret;
}
@@ -5343,36 +5342,29 @@ skl_compute_ddb(struct drm_atomic_state *state)
}
static void
-skl_print_wm_changes(const struct drm_atomic_state *state)
+skl_print_wm_changes(struct intel_atomic_state *state)
{
- const struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- const struct drm_crtc *crtc;
- const struct drm_crtc_state *cstate;
- const struct intel_plane *intel_plane;
- const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
- const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
int i;
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- enum plane_id plane_id = intel_plane->id;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
- old = &old_ddb->plane[pipe][plane_id];
- new = &new_ddb->plane[pipe][plane_id];
+ old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
- intel_plane->base.base.id,
- intel_plane->base.name,
+ plane->base.base.id, plane->base.name,
old->start, old->end,
new->start, new->end);
}
@@ -5469,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
return 0;
}
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ /*
+ * Force a full wm update for every plane on modeset.
+ * Required because the reset value of the wm registers
+ * is non-zero, whereas we want all disabled planes to
+ * have zero watermarks. So if we turn off the relevant
+ * power well the hardware state will go out of sync
+ * with the software state.
+ */
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+ skl_plane_wm_equals(dev_priv,
+ &old_crtc_state->wm.skl.optimal.planes[plane_id],
+ &new_crtc_state->wm.skl.optimal.planes[plane_id]))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -5508,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state)
&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
- &results->ddb, &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+ if (ret)
+ return ret;
+
+ ret = skl_wm_add_affected_planes(intel_state,
+ to_intel_crtc(crtc));
if (ret)
return ret;
@@ -5523,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state)
intel_cstate->update_wm_pre = true;
}
- skl_print_wm_changes(state);
+ skl_print_wm_changes(intel_state);
return 0;
}
@@ -5534,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id != PLANE_CURSOR)
- skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
- ddb, plane_id);
- else
- skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
- ddb);
- }
}
static void skl_initial_wm(struct intel_atomic_state *state,
@@ -5560,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *results = &state->wm_results;
- struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
- enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
return;
@@ -5571,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
if (cstate->base.active_changed)
skl_atomic_update_crtc_wm(state, cstate);
- memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
- sizeof(hw_vals->ddb.uv_plane[pipe]));
- memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
- sizeof(hw_vals->ddb.plane[pipe]));
-
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5726,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
- } else {
- /*
- * Easy/common case; just sanitize DDB now if everything off
- * Keep dbuf slice info intact
- */
- memset(ddb->plane, 0, sizeof(ddb->plane));
- memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
}
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 54fa17a5596a..419e56342523 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -75,6 +75,10 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
if (i915_modparams.enable_psr == -1)
return false;
+ /* Cannot enable DSC and PSR2 simultaneously */
+ WARN_ON(crtc_state->dsc_params.compression_enable &&
+ crtc_state->has_psr2);
+
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
@@ -169,6 +173,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
u32 transcoders = BIT(TRANSCODER_EDP);
enum transcoder cpu_transcoder;
ktime_t time_ns = ktime_get();
+ u32 mask = 0;
if (INTEL_GEN(dev_priv) >= 8)
transcoders |= BIT(TRANSCODER_A) |
@@ -178,10 +183,22 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
int shift = edp_psr_shift(cpu_transcoder);
- /* FIXME: Exit PSR and link train manually when this happens. */
- if (psr_iir & EDP_PSR_ERROR(shift))
- DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_ERROR(shift)) {
+ DRM_WARN("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
+
+ dev_priv->psr.irq_aux_error = true;
+
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ mask |= EDP_PSR_ERROR(shift);
+ }
if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
@@ -203,6 +220,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
}
+
+ if (mask) {
+ mask |= I915_READ(EDP_PSR_IMR);
+ I915_WRITE(EDP_PSR_IMR, mask);
+
+ schedule_work(&dev_priv->psr.work);
+ }
}
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
@@ -482,6 +506,16 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!dev_priv->psr.sink_psr2_support)
return false;
+ /*
+ * DSC and PSR2 cannot be enabled simultaneously. If a requested
+ * resolution requires DSC to be enabled, priority is given to DSC
+ * over PSR2.
+ */
+ if (crtc_state->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+ return false;
+ }
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
@@ -527,10 +561,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (IS_HASWELL(dev_priv) &&
- I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
- S3D_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ if (dev_priv->psr.sink_not_reliable) {
+ DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
return;
}
@@ -687,6 +719,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.prepared = true;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (psr_global_enabled(dev_priv->psr.debug))
intel_psr_enable_locked(dev_priv, crtc_state);
@@ -933,6 +966,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return ret;
}
+static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+{
+ struct i915_psr *psr = &dev_priv->psr;
+
+ intel_psr_disable_locked(psr->dp);
+ psr->sink_not_reliable = true;
+ /* let's make sure that sink is awaken */
+ drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -943,6 +986,9 @@ static void intel_psr_work(struct work_struct *work)
if (!dev_priv->psr.enabled)
goto unlock;
+ if (READ_ONCE(dev_priv->psr.irq_aux_error))
+ intel_psr_handle_irq(dev_priv);
+
/*
* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
@@ -981,9 +1027,6 @@ unlock:
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -996,10 +1039,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
if (frontbuffer_bits)
@@ -1024,9 +1064,6 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -1039,10 +1076,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
@@ -1056,7 +1090,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
* but it makes more sense write to the current active
* pipe.
*/
- I915_WRITE(CURSURFLIVE(pipe), 0);
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
}
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1073,6 +1107,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
+ u32 val;
+
if (!HAS_PSR(dev_priv))
return;
@@ -1086,6 +1122,22 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
i915_modparams.enable_psr = 0;
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ val = I915_READ(EDP_PSR_IIR);
+ val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
+ if (val) {
+ DRM_DEBUG_KMS("PSR interruption error set\n");
+ dev_priv->psr.sink_not_reliable = true;
+ return;
+ }
+
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
@@ -1123,6 +1175,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
}
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
@@ -1140,10 +1193,27 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (val & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
val & ~errors);
- if (val & errors)
+ if (val & errors) {
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ }
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
exit:
mutex_unlock(&psr->lock);
}
+
+bool intel_psr_enabled(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ bool ret;
+
+ if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ return false;
+
+ mutex_lock(&dev_priv->psr.lock);
+ ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
+ mutex_unlock(&dev_priv->psr.lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 87eebc13c0d8..c5eb26a7ee79 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -150,8 +150,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@@ -159,8 +158,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@@ -212,8 +210,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -246,8 +243,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
@@ -316,8 +312,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@@ -529,6 +524,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
+ if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
+ I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+ if (HAS_VEBOX(dev_priv))
+ I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+ }
+
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
@@ -546,10 +548,11 @@ static int init_ring_common(struct intel_engine_cs *engine)
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
-
intel_ring_update_space(ring);
+
+ /* First wake the ring up to an empty/idle ring */
I915_WRITE_HEAD(engine, ring->head);
- I915_WRITE_TAIL(engine, ring->tail);
+ I915_WRITE_TAIL(engine, ring->head);
(void)I915_READ_TAIL(engine);
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
@@ -574,6 +577,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+ /* Now awake, let it get started */
+ if (ring->tail != ring->head) {
+ I915_WRITE_TAIL(engine, ring->tail);
+ (void)I915_READ_TAIL(engine);
+ }
+
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_wakeup(engine);
out:
@@ -642,7 +651,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret != 0)
return ret;
@@ -660,8 +669,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
-
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
@@ -743,9 +750,18 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (!i915_request_completed(request))
- dma_fence_set_error(&request->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &request->fence.flags))
+ continue;
+
+ dma_fence_set_error(&request->fence, -EIO);
}
+
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -973,7 +989,7 @@ i965_emit_bb_start(struct i915_request *rq,
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
@@ -981,7 +997,9 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+ u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+ GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1438,7 +1456,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
- unsigned int size;
int err;
intel_engine_setup_common(engine);
@@ -1463,21 +1480,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
- size = PAGE_SIZE;
- if (HAS_BROKEN_CS_TLB(engine->i915))
- size = I830_WA_SIZE;
- err = intel_engine_create_scratch(engine, size);
- if (err)
- goto err_unpin;
-
err = intel_engine_init_common(engine);
if (err)
- goto err_scratch;
+ goto err_unpin;
return 0;
-err_scratch:
- intel_engine_cleanup_scratch(engine);
err_unpin:
intel_ring_unpin(ring);
err_ring:
@@ -1551,7 +1559,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -1660,7 +1668,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8a2270b209b0..72edaa7ff411 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -15,6 +15,7 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
#include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
struct drm_printer;
struct i915_sched_attr;
@@ -313,13 +314,6 @@ struct intel_engine_execlists {
struct rb_root_cached queue;
/**
- * @csb_read: control register for Context Switch buffer
- *
- * Note this register is always in mmio.
- */
- u32 __iomem *csb_read;
-
- /**
* @csb_write: control register for Context Switch buffer
*
* Note this register may be either mmio or HWSP shadow.
@@ -339,15 +333,6 @@ struct intel_engine_execlists {
u32 preempt_complete_status;
/**
- * @csb_write_reset: reset value for CSB write pointer
- *
- * As the CSB write pointer maybe either in HWSP or as a field
- * inside an mmio register, we want to reprogram it slightly
- * differently to avoid later confusion.
- */
- u32 csb_write_reset;
-
- /**
* @csb_head: context status buffer head
*/
u8 csb_head;
@@ -451,7 +436,9 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- struct i915_vma *scratch;
+ struct i915_wa_list ctx_wa_list;
+ struct i915_wa_list wa_list;
+ struct i915_wa_list whitelist;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -908,10 +895,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1c2de9b69a19..4350a5270423 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -76,6 +76,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+ return "TRANSCODER_EDP_VDSC";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -2028,9 +2030,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
*/
#define ICL_PW_2_POWER_DOMAINS ( \
ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
- * - eDP/DSI VDSC
* - KVMR (HW control)
*/
#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index abe193815ccc..d2e003d8f3db 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -373,14 +373,12 @@ skl_program_scaler(struct intel_plane *plane,
#define BOFF(x) (((x) & 0xffff) << 16)
static void
-icl_program_input_csc_coeff(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+icl_program_input_csc(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- enum pipe pipe = crtc->pipe;
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
static const u16 input_csc_matrix[][9] = {
@@ -508,28 +506,12 @@ skl_program_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- plane_state->color_ctl);
-
- if (fb->format->is_yuv && icl_is_hdr_plane(plane))
- icl_program_input_csc_coeff(crtc_state, plane_state);
-
- I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
- I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
-
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- if (INTEL_GEN(dev_priv) < 11)
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) |
- plane_state->color_plane[1].x);
-
if (icl_is_hdr_plane(plane)) {
u32 cus_ctl = 0;
@@ -551,15 +533,38 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
}
- if (!slave && plane_state->scaler_id >= 0)
- skl_program_scaler(plane, crtc_state, plane_state);
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+ plane_state->color_ctl);
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+ icl_program_input_csc(plane, crtc_state, plane_state);
+
+ skl_write_plane_wm(plane, crtc_state);
+ I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+ I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
+ I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+
+ I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+
+ if (INTEL_GEN(dev_priv) < 11)
+ I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->color_plane[1].y << 16) |
+ plane_state->color_plane[1].x);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
+ if (!slave && plane_state->scaler_id >= 0)
+ skl_program_scaler(plane, crtc_state, plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -589,7 +594,8 @@ icl_update_slave(struct intel_plane *plane,
}
static void
-skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+skl_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@@ -598,6 +604,8 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ skl_write_plane_wm(plane, crtc_state);
+
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
@@ -819,35 +827,41 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- vlv_update_clrc(plane_state);
+ I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
+ plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(plane_state);
if (key->flags) {
I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+ I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
}
- I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
- plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+ I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
- I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
-
- I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
I915_WRITE_FW(SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ vlv_update_clrc(plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+vlv_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -980,27 +994,32 @@ ivb_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ if (IS_IVYBRIDGE(dev_priv))
+ I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+
if (key->flags) {
I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
} else {
- I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
}
- I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPRCTL(pipe), sprctl);
I915_WRITE_FW(SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
@@ -1009,7 +1028,8 @@ ivb_update_plane(struct intel_plane *plane,
}
static void
-ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+ivb_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1018,7 +1038,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(SPRCTL(pipe), 0);
- /* Can't leave the scaler enabled... */
+ /* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
I915_WRITE_FW(SPRSCALE(pipe), 0);
I915_WRITE_FW(SPRSURF(pipe), 0);
@@ -1148,20 +1168,25 @@ g4x_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+
if (key->flags) {
I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
- I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
- I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
I915_WRITE_FW(DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
@@ -1170,7 +1195,8 @@ g4x_update_plane(struct intel_plane *plane,
}
static void
-g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+g4x_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
new file mode 100644
index 000000000000..c56ba0e04044
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Author: Gaurav K Singh <gaurav.k.singh@intel.com>
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+enum ROW_INDEX_BPP {
+ ROW_INDEX_6BPP = 0,
+ ROW_INDEX_8BPP,
+ ROW_INDEX_10BPP,
+ ROW_INDEX_12BPP,
+ ROW_INDEX_15BPP,
+ MAX_ROW_INDEX
+};
+
+enum COLUMN_INDEX_BPC {
+ COLUMN_INDEX_8BPC = 0,
+ COLUMN_INDEX_10BPC,
+ COLUMN_INDEX_12BPC,
+ COLUMN_INDEX_14BPC,
+ COLUMN_INDEX_16BPC,
+ MAX_COLUMN_INDEX
+};
+
+#define DSC_SUPPORTED_VERSION_MIN 1
+
+/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
+static u16 rc_buf_thresh[] = {
+ 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
+ 7744, 7872, 8000, 8064
+};
+
+struct rc_parameters {
+ u16 initial_xmit_delay;
+ u8 first_line_bpg_offset;
+ u16 initial_offset;
+ u8 flatness_min_qp;
+ u8 flatness_max_qp;
+ u8 rc_quant_incr_limit0;
+ u8 rc_quant_incr_limit1;
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+};
+
+/*
+ * Selected Rate Control Related Parameter Recommended Values
+ * from DSC_v1.11 spec & C Model release: DSC_model_20161212
+ */
+static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+{
+ /* 6BPP/8BPC */
+ { 768, 15, 6144, 3, 13, 11, 11, {
+ { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 },
+ { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 },
+ { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 },
+ { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 }
+ }
+ },
+ /* 6BPP/10BPC */
+ { 768, 15, 6144, 7, 17, 15, 15, {
+ { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 },
+ { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 },
+ { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 },
+ { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 },
+ { 17, 18, -12 }
+ }
+ },
+ /* 6BPP/12BPC */
+ { 768, 15, 6144, 11, 21, 19, 19, {
+ { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 },
+ { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 },
+ { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 },
+ { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 },
+ { 21, 22, -12 }
+ }
+ },
+ /* 6BPP/14BPC */
+ { 768, 15, 6144, 15, 25, 23, 27, {
+ { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 },
+ { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 },
+ { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 },
+ { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 },
+ { 25, 26, -12 }
+ }
+ },
+ /* 6BPP/16BPC */
+ { 768, 15, 6144, 19, 29, 27, 27, {
+ { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 },
+ { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 },
+ { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 },
+ { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 },
+ { 29, 30, -12 }
+ }
+ },
+},
+{
+ /* 8BPP/8BPC */
+ { 512, 12, 6144, 3, 12, 11, 11, {
+ { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 },
+ { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 8BPP/10BPC */
+ { 512, 12, 6144, 7, 16, 15, 15, {
+ { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 8BPP/12BPC */
+ { 512, 12, 6144, 11, 20, 19, 19, {
+ { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 8BPP/14BPC */
+ { 512, 12, 6144, 15, 24, 23, 23, {
+ { 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
+ { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
+ { 24, 25, -12 }
+ }
+ },
+ /* 8BPP/16BPC */
+ { 512, 12, 6144, 19, 28, 27, 27, {
+ { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
+ { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
+ { 28, 29, -12 }
+ }
+ },
+},
+{
+ /* 10BPP/8BPC */
+ { 410, 15, 5632, 3, 12, 11, 11, {
+ { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
+ { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
+ }
+ },
+ /* 10BPP/10BPC */
+ { 410, 15, 5632, 7, 16, 15, 15, {
+ { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
+ { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
+ }
+ },
+ /* 10BPP/12BPC */
+ { 410, 15, 5632, 11, 20, 19, 19, {
+ { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
+ { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
+ { 19, 20, -12 }
+ }
+ },
+ /* 10BPP/14BPC */
+ { 410, 15, 5632, 15, 24, 23, 23, {
+ { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
+ { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
+ { 23, 24, -12 }
+ }
+ },
+ /* 10BPP/16BPC */
+ { 410, 15, 5632, 19, 28, 27, 27, {
+ { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
+ { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
+ { 27, 28, -12 }
+ }
+ },
+},
+{
+ /* 12BPP/8BPC */
+ { 341, 15, 2048, 3, 12, 11, 11, {
+ { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 },
+ { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 12BPP/10BPC */
+ { 341, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
+ { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 12BPP/12BPC */
+ { 341, 15, 2048, 11, 20, 19, 19, {
+ { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
+ { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 12BPP/14BPC */
+ { 341, 15, 2048, 15, 24, 23, 23, {
+ { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
+ { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
+ { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
+ { 22, 23, -12 }
+ }
+ },
+ /* 12BPP/16BPC */
+ { 341, 15, 2048, 19, 28, 27, 27, {
+ { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
+ { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
+ { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
+ { 26, 27, -12 }
+ }
+ },
+},
+{
+ /* 15BPP/8BPC */
+ { 273, 15, 2048, 3, 12, 11, 11, {
+ { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
+ { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
+ { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
+ { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
+ }
+ },
+ /* 15BPP/10BPC */
+ { 273, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
+ { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
+ { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
+ { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
+ }
+ },
+ /* 15BPP/12BPC */
+ { 273, 15, 2048, 11, 20, 19, 19, {
+ { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
+ { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
+ { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
+ { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
+ { 16, 17, -12 }
+ }
+ },
+ /* 15BPP/14BPC */
+ { 273, 15, 2048, 15, 24, 23, 23, {
+ { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
+ { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
+ { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
+ { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
+ { 20, 21, -12 }
+ }
+ },
+ /* 15BPP/16BPC */
+ { 273, 15, 2048, 19, 28, 27, 27, {
+ { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
+ { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
+ { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
+ { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
+ { 24, 25, -12 }
+ }
+ }
+}
+
+};
+
+static int get_row_index_for_rc_params(u16 compressed_bpp)
+{
+ switch (compressed_bpp) {
+ case 6:
+ return ROW_INDEX_6BPP;
+ case 8:
+ return ROW_INDEX_8BPP;
+ case 10:
+ return ROW_INDEX_10BPP;
+ case 12:
+ return ROW_INDEX_12BPP;
+ case 15:
+ return ROW_INDEX_15BPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int get_column_index_for_rc_params(u8 bits_per_component)
+{
+ switch (bits_per_component) {
+ case 8:
+ return COLUMN_INDEX_8BPC;
+ case 10:
+ return COLUMN_INDEX_10BPC;
+ case 12:
+ return COLUMN_INDEX_12BPC;
+ case 14:
+ return COLUMN_INDEX_14BPC;
+ case 16:
+ return COLUMN_INDEX_16BPC;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+ unsigned long groups_per_line = 0;
+ unsigned long groups_total = 0;
+ unsigned long num_extra_mux_bits = 0;
+ unsigned long slice_bits = 0;
+ unsigned long hrd_delay = 0;
+ unsigned long final_scale = 0;
+ unsigned long rbs_min = 0;
+
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+
+ if (vdsc_cfg->convert_rgb)
+ num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4)
+ - 2);
+ else
+ num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 2 * (4 * vdsc_cfg->bits_per_component) - 2;
+ /* Number of bits in one Slice */
+ slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+ while ((num_extra_mux_bits > 0) &&
+ ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+ num_extra_mux_bits--;
+
+ if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+ vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+ /* scale_decrement_interval calculation according to DSC spec 1.11 */
+ if (vdsc_cfg->initial_scale_value > 8)
+ vdsc_cfg->scale_decrement_interval = groups_per_line /
+ (vdsc_cfg->initial_scale_value - 8);
+ else
+ vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+ vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+ (vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+ if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+ DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+ return -ERANGE;
+ }
+
+ final_scale = (vdsc_cfg->rc_model_size * 8) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+ if (vdsc_cfg->slice_height > 1)
+ /*
+ * NflBpgOffset is 16 bit value with 11 fractional bits
+ * hence we multiply by 2^11 for preserving the
+ * fractional part
+ */
+ vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+ (vdsc_cfg->slice_height - 1));
+ else
+ vdsc_cfg->nfl_bpg_offset = 0;
+
+ /* 2^16 - 1 */
+ if (vdsc_cfg->nfl_bpg_offset > 65535) {
+ DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+ return -ERANGE;
+ }
+
+ /* Number of groups used to code the entire slice */
+ groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+ /* slice_bpg_offset is 16 bit value with 11 fractional bits */
+ vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+ vdsc_cfg->initial_offset +
+ num_extra_mux_bits) << 11),
+ groups_total);
+
+ if (final_scale > 9) {
+ /*
+ * ScaleIncrementInterval =
+ * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+ * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+ * we need divide by 2^11 from pstDscCfg values
+ */
+ vdsc_cfg->scale_increment_interval =
+ (vdsc_cfg->final_offset * (1 << 11)) /
+ ((vdsc_cfg->nfl_bpg_offset +
+ vdsc_cfg->slice_bpg_offset) *
+ (final_scale - 9));
+ } else {
+ /*
+ * If finalScaleValue is less than or equal to 9, a value of 0 should
+ * be used to disable the scale increment at the end of the slice
+ */
+ vdsc_cfg->scale_increment_interval = 0;
+ }
+
+ if (vdsc_cfg->scale_increment_interval > 65535) {
+ DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+ return -ERANGE;
+ }
+
+ /*
+ * DSC spec mentions that bits_per_pixel specifies the target
+ * bits/pixel (bpp) rate that is used by the encoder,
+ * in steps of 1/16 of a bit per pixel
+ */
+ rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+ DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel, 16) +
+ groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+ vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+ vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+ return 0;
+}
+
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
+ u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+ u8 i = 0;
+ int row_index = 0;
+ int column_index = 0;
+ u8 line_buf_depth = 0;
+
+ vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
+ vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
+ pipe_config->dsc_params.slice_count);
+ /*
+ * Slice Height of 8 works for all currently available panels. So start
+ * with that if pic_height is an integral multiple of 8.
+ * Eventually add logic to try multiple slice heights.
+ */
+ if (vdsc_cfg->pic_height % 8 == 0)
+ vdsc_cfg->slice_height = 8;
+ else if (vdsc_cfg->pic_height % 4 == 0)
+ vdsc_cfg->slice_height = 4;
+ else
+ vdsc_cfg->slice_height = 2;
+
+ /* Values filled from DSC Sink DPCD */
+ vdsc_cfg->dsc_version_major =
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+ vdsc_cfg->dsc_version_minor =
+ min(DSC_SUPPORTED_VERSION_MIN,
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+ vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
+
+ line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+ if (!line_buf_depth) {
+ DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ return -EINVAL;
+ }
+ if (vdsc_cfg->dsc_version_minor == 2)
+ vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+ else
+ vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+ /* Gen 11 does not support YCbCr */
+ vdsc_cfg->enable422 = false;
+ /* Gen 11 does not support VBR */
+ vdsc_cfg->vbr_enable = false;
+ vdsc_cfg->block_pred_enable =
+ intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+ /* Gen 11 only supports integral values of bpp */
+ vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+ vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ /*
+ * six 0s are appended to the lsb of each threshold value
+ * internally in h/w.
+ * Only 8 bits are allowed for programming RcBufThreshold
+ */
+ vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6;
+ }
+
+ /*
+ * For 6bpp, RC Buffer threshold 12 and 13 need a different value
+ * as per C Model
+ */
+ if (compressed_bpp == 6) {
+ vdsc_cfg->rc_buf_thresh[12] = 0x7C;
+ vdsc_cfg->rc_buf_thresh[13] = 0x7D;
+ }
+
+ row_index = get_row_index_for_rc_params(compressed_bpp);
+ column_index =
+ get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
+
+ if (row_index < 0 || column_index < 0)
+ return -EINVAL;
+
+ vdsc_cfg->first_line_bpg_offset =
+ rc_params[row_index][column_index].first_line_bpg_offset;
+ vdsc_cfg->initial_xmit_delay =
+ rc_params[row_index][column_index].initial_xmit_delay;
+ vdsc_cfg->initial_offset =
+ rc_params[row_index][column_index].initial_offset;
+ vdsc_cfg->flatness_min_qp =
+ rc_params[row_index][column_index].flatness_min_qp;
+ vdsc_cfg->flatness_max_qp =
+ rc_params[row_index][column_index].flatness_max_qp;
+ vdsc_cfg->rc_quant_incr_limit0 =
+ rc_params[row_index][column_index].rc_quant_incr_limit0;
+ vdsc_cfg->rc_quant_incr_limit1 =
+ rc_params[row_index][column_index].rc_quant_incr_limit1;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ vdsc_cfg->rc_range_params[i].range_min_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+ vdsc_cfg->rc_range_params[i].range_max_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+ /*
+ * Range BPG Offset uses 2's complement and is only a 6 bits. So
+ * mask it to get only 6 bits.
+ */
+ vdsc_cfg->rc_range_params[i].range_bpg_offset =
+ rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+ DSC_RANGE_BPG_OFFSET_MASK;
+ }
+
+ /*
+ * BitsPerComponent value determines mux_word_size:
+ * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits
+ * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to
+ * 48 bits
+ */
+ if (vdsc_cfg->bits_per_component == 8 ||
+ vdsc_cfg->bits_per_component == 10)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ else if (vdsc_cfg->bits_per_component == 12)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+ /* RC_MODEL_SIZE is a constant across all configurations */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
+ vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+
+ return intel_compute_rc_parameters(vdsc_cfg);
+}
+
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
+{
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
+ * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
+ * For any other transcoder, VDSC/joining uses the power well associated
+ * with the pipe/transcoder in use. Hence another reference on the
+ * transcoder power domain will suffice.
+ */
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+ else
+ return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+}
+
+static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 pps_val = 0;
+ u32 rc_buf_thresh_dword[4];
+ u32 rc_range_params_dword[8];
+ u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+ int i = 0;
+
+ /* Populate PICTURE_PARAMETER_SET_0 registers */
+ pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
+ DSC_VER_MIN_SHIFT |
+ vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
+ vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+ if (vdsc_cfg->block_pred_enable)
+ pps_val |= DSC_BLOCK_PREDICTION;
+ if (vdsc_cfg->convert_rgb)
+ pps_val |= DSC_COLOR_SPACE_CONVERSION;
+ if (vdsc_cfg->enable422)
+ pps_val |= DSC_422_ENABLE;
+ if (vdsc_cfg->vbr_enable)
+ pps_val |= DSC_VBR_ENABLE;
+ DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_1 registers */
+ pps_val = 0;
+ pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
+ DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_2 registers */
+ pps_val = 0;
+ pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
+ DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
+ DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_3 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
+ DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
+ DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_4 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
+ DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
+ DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_5 registers */
+ pps_val = 0;
+ pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
+ DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
+ DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_6 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) |
+ DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
+ DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
+ DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
+ DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_7 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
+ DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
+ DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_8 registers */
+ pps_val = 0;
+ pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
+ DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
+ DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_9 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+ DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
+ DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_10 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) |
+ DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
+ DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
+ DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
+ DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
+ }
+
+ /* Populate Picture parameter set 16 */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) |
+ DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) /
+ vdsc_cfg->slice_width) |
+ DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
+ vdsc_cfg->slice_height);
+ DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
+ }
+
+ /* Populate the RC_BUF_THRESH registers */
+ memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ rc_buf_thresh_dword[i / 4] |=
+ (u32)(vdsc_cfg->rc_buf_thresh[i] <<
+ BITS_PER_BYTE * (i % 4));
+ DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+ rc_buf_thresh_dword[i / 4]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ }
+ }
+
+ /* Populate the RC_RANGE_PARAMETERS registers */
+ memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ rc_range_params_dword[i / 2] |=
+ (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
+ RC_BPG_OFFSET_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_max_qp <<
+ RC_MAX_QP_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_min_qp <<
+ RC_MIN_QP_SHIFT)) << 16 * (i % 2));
+ DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ rc_range_params_dword[i / 2]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ }
+ }
+}
+
+static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
+
+ /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
+ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+
+ /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
+ drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+
+ intel_dig_port->write_infoframe(encoder, crtc_state,
+ DP_SDP_PPS, &dp_dsc_pps_sdp,
+ sizeof(dp_dsc_pps_sdp));
+}
+
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0;
+ u32 dss_ctl2_val = 0;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ /* Enable Power wells for VDSC/joining */
+ intel_display_power_get(dev_priv,
+ intel_dsc_power_domain(crtc_state));
+
+ intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+
+ intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+
+ if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+ if (crtc_state->dsc_params.dsc_split) {
+ dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+ dss_ctl1_val |= JOINER_ENABLE;
+ }
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+}
+
+void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
+
+ if (!old_crtc_state->dsc_params.compression_enable)
+ return;
+
+ if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl1_val = I915_READ(dss_ctl1_reg);
+ if (dss_ctl1_val & JOINER_ENABLE)
+ dss_ctl1_val &= ~JOINER_ENABLE;
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+
+ dss_ctl2_val = I915_READ(dss_ctl2_reg);
+ if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
+ dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
+ dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
+ RIGHT_BRANCH_VDSC_ENABLE);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+
+ /* Disable Power wells for VDSC/joining */
+ intel_display_power_put(dev_priv,
+ intel_dsc_power_domain(old_crtc_state));
+}
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index ca1f78a42b17..4f41e326f3f3 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,58 +48,112 @@
* - Public functions to init or apply the given workaround type.
*/
-static void wa_add(struct drm_i915_private *i915,
- i915_reg_t reg, const u32 mask, const u32 val)
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
{
- struct i915_workarounds *wa = &i915->workarounds;
- unsigned int start = 0, end = wa->count;
- unsigned int addr = i915_mmio_reg_offset(reg);
- struct i915_wa_reg *r;
+ wal->name = name;
+}
+
+#define WA_LIST_CHUNK (1 << 4)
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+ /* Trim unused entries. */
+ if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
+ struct i915_wa *list = kmemdup(wal->list,
+ wal->count * sizeof(*list),
+ GFP_KERNEL);
+
+ if (list) {
+ kfree(wal->list);
+ wal->list = list;
+ }
+ }
+
+ if (!wal->count)
+ return;
+
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+ wal->wa_count, wal->name);
+}
+
+static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+ unsigned int addr = i915_mmio_reg_offset(wa->reg);
+ unsigned int start = 0, end = wal->count;
+ const unsigned int grow = WA_LIST_CHUNK;
+ struct i915_wa *wa_;
+
+ GEM_BUG_ON(!is_power_of_2(grow));
+
+ if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+ struct i915_wa *list;
+
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ GFP_KERNEL);
+ if (!list) {
+ DRM_ERROR("No space for workaround init!\n");
+ return;
+ }
+
+ if (wal->list)
+ memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+ wal->list = list;
+ }
while (start < end) {
unsigned int mid = start + (end - start) / 2;
- if (wa->reg[mid].addr < addr) {
+ if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
start = mid + 1;
- } else if (wa->reg[mid].addr > addr) {
+ } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
end = mid;
} else {
- r = &wa->reg[mid];
+ wa_ = &wal->list[mid];
- if ((mask & ~r->mask) == 0) {
+ if ((wa->mask & ~wa_->mask) == 0) {
DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, r->mask, r->value);
+ i915_mmio_reg_offset(wa_->reg),
+ wa_->mask, wa_->val);
- r->value &= ~mask;
+ wa_->val &= ~wa->mask;
}
- r->value |= val;
- r->mask |= mask;
+ wal->wa_count++;
+ wa_->val |= wa->val;
+ wa_->mask |= wa->mask;
return;
}
}
- if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
- DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, mask, val);
- return;
- }
-
- r = &wa->reg[wa->count++];
- r->addr = addr;
- r->value = val;
- r->mask = mask;
+ wal->wa_count++;
+ wa_ = &wal->list[wal->count++];
+ *wa_ = *wa;
- while (r-- > wa->reg) {
- GEM_BUG_ON(r[0].addr == r[1].addr);
- if (r[1].addr > r[0].addr)
+ while (wa_-- > wal->list) {
+ GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
+ i915_mmio_reg_offset(wa_[1].reg));
+ if (i915_mmio_reg_offset(wa_[1].reg) >
+ i915_mmio_reg_offset(wa_[0].reg))
break;
- swap(r[1], r[0]);
+ swap(wa_[1], wa_[0]);
}
}
-#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
+static void
+__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
+}
+
+#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val))
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -110,8 +164,10 @@ static void wa_add(struct drm_i915_private *i915,
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
-static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -155,17 +211,14 @@ static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
-
- return 0;
}
-static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -185,31 +238,28 @@ static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
- return 0;
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
-static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
- return 0;
}
-static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- if (HAS_LLC(dev_priv)) {
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
@@ -228,7 +278,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- if (!IS_COFFEELAKE(dev_priv))
+ if (!IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
@@ -271,9 +321,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
- if (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@@ -300,14 +348,14 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(i915))
WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
-
- return 0;
}
-static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -318,7 +366,7 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+ if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
continue;
/*
@@ -327,12 +375,12 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+ ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
- return 0;
+ return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
@@ -342,28 +390,19 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
-
- return 0;
}
-static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
- return skl_tune_iz_hashing(dev_priv);
+ gen9_ctx_workarounds_init(engine);
+ skl_tune_iz_hashing(engine);
}
-static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -372,57 +411,41 @@ static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaToEnableHwFixForPushConstHWBug:bxt */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE);
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+ if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -431,18 +454,19 @@ static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableSbeCacheDispatchPortSharing:cfl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
@@ -450,7 +474,7 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
@@ -470,16 +494,17 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableEarlyEOT:cnl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-
- return 0;
}
-static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
PUSH_CONSTANT_DEREF_DISABLE);
@@ -495,7 +520,7 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
@@ -504,70 +529,67 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN11_STATE_CACHE_REDIRECT_TO_CS);
/* Wa_2006665173:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
-
- return 0;
}
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
- int err = 0;
-
- dev_priv->workarounds.count = 0;
-
- if (INTEL_GEN(dev_priv) < 8)
- err = 0;
- else if (IS_BROADWELL(dev_priv))
- err = bdw_ctx_workarounds_init(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- err = chv_ctx_workarounds_init(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- err = skl_ctx_workarounds_init(dev_priv);
- else if (IS_BROXTON(dev_priv))
- err = bxt_ctx_workarounds_init(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- err = kbl_ctx_workarounds_init(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- err = glk_ctx_workarounds_init(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- err = cfl_ctx_workarounds_init(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- err = cnl_ctx_workarounds_init(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- err = icl_ctx_workarounds_init(dev_priv);
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ wa_init_start(wal, "context");
+
+ if (INTEL_GEN(i915) < 8)
+ return;
+ else if (IS_BROADWELL(i915))
+ bdw_ctx_workarounds_init(engine);
+ else if (IS_CHERRYVIEW(i915))
+ chv_ctx_workarounds_init(engine);
+ else if (IS_SKYLAKE(i915))
+ skl_ctx_workarounds_init(engine);
+ else if (IS_BROXTON(i915))
+ bxt_ctx_workarounds_init(engine);
+ else if (IS_KABYLAKE(i915))
+ kbl_ctx_workarounds_init(engine);
+ else if (IS_GEMINILAKE(i915))
+ glk_ctx_workarounds_init(engine);
+ else if (IS_COFFEELAKE(i915))
+ cfl_ctx_workarounds_init(engine);
+ else if (IS_CANNONLAKE(i915))
+ cnl_ctx_workarounds_init(engine);
+ else if (IS_ICELAKE(i915))
+ icl_ctx_workarounds_init(engine);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
- if (err)
- return err;
+ MISSING_CASE(INTEL_GEN(i915));
- DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
- dev_priv->workarounds.count);
- return 0;
+ wa_init_finish(wal);
}
-int intel_ctx_workarounds_emit(struct i915_request *rq)
+int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
- struct i915_workarounds *w = &rq->i915->workarounds;
+ struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
u32 *cs;
- int ret, i;
+ int ret;
- if (w->count == 0)
+ if (wal->count == 0)
return 0;
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
- cs = intel_ring_begin(rq, (w->count * 2 + 2));
+ cs = intel_ring_begin(rq, (wal->count * 2 + 2));
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(w->count);
- for (i = 0; i < w->count; i++) {
- *cs++ = w->reg[i].addr;
- *cs++ = w->reg[i].value;
+ *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = wa->val;
}
*cs++ = MI_NOOP;
@@ -580,160 +602,149 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
return 0;
}
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = val,
+ .val = _MASKED_BIT_ENABLE(val)
+ };
+
+ _wa_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
}
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ wa_write_masked_or(wal, reg, ~0, val);
}
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+ wa_write_masked_or(wal, reg, val, val);
+}
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+ struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
+ if (!IS_COFFEELAKE(i915))
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ ECOCHK_DIS_TLB);
- if (HAS_LLC(dev_priv)) {
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- I915_WRITE(MMCD_MISC_CTRL,
- I915_READ(MMCD_MISC_CTRL) |
- MMCD_PCLA |
- MMCD_HOTSPOT_EN);
+ wa_write_or(wal,
+ MMCD_MISC_CTRL,
+ MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
- if (IS_GEN9_LP(dev_priv)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
-
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ BDW_DISABLE_HDC_INVALIDATION);
}
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:skl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+ gen9_gt_workarounds_init(i915);
/* WaInPlaceDecompressionHang:bxt */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+ if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- for_each_engine(engine, dev_priv, tmp) {
- if (engine->id == RCS)
- continue;
-
- I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
- }
- }
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ gen9_gt_workarounds_init(i915);
}
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:cfl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:cfl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void wa_init_mcr(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
- u32 mcr;
+ struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask;
/*
@@ -770,8 +781,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- mcr = I915_READ(GEN8_MCR_SELECTOR);
-
if (INTEL_GEN(dev_priv) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
@@ -789,186 +798,220 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
- I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+ wa_write_masked_or(wal,
+ GEN8_MCR_SELECTOR,
+ mcr_slice_subslice_mask,
+ intel_calculate_mcr_s_ss_select(dev_priv));
}
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_mcr(i915);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaEnablePreemptionGranularityControlByUMD:cnl */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+ wa_init_mcr(i915);
/* WaInPlaceDecompressionHang:icl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaPipelineFlushCoherentLines:icl */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* Wa_1405543622:icl
- * Formerly known as WaGAPZPriorityScheme
- */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) |
- GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
- /* Wa_1604223664:icl
- * Formerly known as WaL3BankAddressHashing
- */
- I915_WRITE(GEN8_GARBCNTL,
- (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
- GEN11_HASH_CTRL_EXCL_BIT0);
- I915_WRITE(GEN11_GLBLINVL,
- (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- I915_WRITE(GEN11_GACB_PERF_CTRL,
- (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
- /* Wa_1405733216:icl
- * Formerly known as WaDisableCleanEvicts
- */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) |
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_masked_or(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
- I915_WRITE(GEN11_LSN_UNSLCVC,
- I915_READ(GEN11_LSN_UNSLCVC) |
- GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
- GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+ wa_write_or(wal,
+ GEN11_LSN_UNSLCVC,
+ GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
- I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
- I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
- GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+ wa_write_or(wal,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1405779004:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
- MSCUNIT_CLKGATE_DIS);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ MSCUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl */
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
- GWUNIT_CLKGATE_DIS);
-
- /* Wa_1604302699:icl */
- I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
- I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
- GEN11_I2M_WRITE_DISABLE);
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
/* Wa_1406838659:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
- I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
- I915_READ(INF_UNIT_LEVEL_CLKGATE) |
- CGPSF_CLKGATE_DIS);
-
- /* WaForwardProgressSoftReset:icl */
- I915_WRITE(GEN10_SCRATCH_LNCF2,
- I915_READ(GEN10_SCRATCH_LNCF2) |
- PMFLUSHDONE_LNICRSDROP |
- PMFLUSH_GAPL3UNBLOCK |
- PMFLUSHDONE_LNEBLK);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ INF_UNIT_LEVEL_CLKGATE,
+ CGPSF_CLKGATE_DIS);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_L3_COH_PIPE);
-
- /* Wa_1406609255:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
- I915_WRITE(GEN7_SARCHKMD,
- I915_READ(GEN7_SARCHKMD) |
- GEN7_DISABLE_DEMAND_PREFETCH |
- GEN7_DISABLE_SAMPLER_PREFETCH);
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
- if (INTEL_GEN(dev_priv) < 8)
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_start(wal, "GT");
+
+ if (INTEL_GEN(i915) < 8)
+ return;
+ else if (IS_BROADWELL(i915))
return;
- else if (IS_BROADWELL(dev_priv))
- bdw_gt_workarounds_apply(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_gt_workarounds_apply(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- skl_gt_workarounds_apply(dev_priv);
- else if (IS_BROXTON(dev_priv))
- bxt_gt_workarounds_apply(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- kbl_gt_workarounds_apply(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- glk_gt_workarounds_apply(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- cfl_gt_workarounds_apply(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_gt_workarounds_apply(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- icl_gt_workarounds_apply(dev_priv);
+ else if (IS_CHERRYVIEW(i915))
+ return;
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_gt_workarounds_init(i915);
+ else if (IS_ICELAKE(i915))
+ icl_gt_workarounds_init(i915);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
+
+ wa_init_finish(wal);
}
-struct whitelist {
- i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
- unsigned int count;
- u32 nopid;
-};
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw = 0;
+ struct i915_wa *wa;
+ unsigned int i;
-static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ wa->reg,
+ FW_REG_READ |
+ FW_REG_WRITE);
+
+ return fw;
+}
+
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
{
- if (GEM_DEBUG_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+ enum forcewake_domains fw;
+ unsigned long flags;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ if (!wal->count)
return;
- w->reg[w->count++] = reg;
+ fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 val = I915_READ_FW(wa->reg);
+
+ val &= ~wa->mask;
+ val |= wa->val;
+
+ I915_WRITE_FW(wa->reg, val);
+ }
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+ wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
+}
+
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
+{
+ if ((cur ^ wa->val) & wa->mask) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg), cur,
+ cur & wa->mask, wa->val, wa->mask);
+
+ return false;
+ }
+
+ return true;
}
-static void bdw_whitelist_build(struct whitelist *w)
+static bool wa_list_verify(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal,
+ const char *from)
{
+ struct i915_wa *wa;
+ unsigned int i;
+ bool ok = true;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+
+ return ok;
}
-static void chv_whitelist_build(struct whitelist *w)
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from)
{
+ return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
}
-static void gen9_whitelist_build(struct whitelist *w)
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+ struct i915_wa wa = {
+ .reg = reg
+ };
+
+ if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+ return;
+
+ _wa_add(wal, &wa);
+}
+
+static void gen9_whitelist_build(struct i915_wa_list *w)
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
@@ -980,7 +1023,7 @@ static void gen9_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_HDC_CHICKEN1);
}
-static void skl_whitelist_build(struct whitelist *w)
+static void skl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -988,12 +1031,12 @@ static void skl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void bxt_whitelist_build(struct whitelist *w)
+static void bxt_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void kbl_whitelist_build(struct whitelist *w)
+static void kbl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -1001,7 +1044,7 @@ static void kbl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void glk_whitelist_build(struct whitelist *w)
+static void glk_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -1009,18 +1052,18 @@ static void glk_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
-static void cfl_whitelist_build(struct whitelist *w)
+static void cfl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void cnl_whitelist_build(struct whitelist *w)
+static void cnl_whitelist_build(struct i915_wa_list *w)
{
/* WaEnablePreemptionGranularityControlByUMD:cnl */
whitelist_reg(w, GEN8_CS_CHICKEN1);
}
-static void icl_whitelist_build(struct whitelist *w)
+static void icl_whitelist_build(struct i915_wa_list *w)
{
/* WaAllowUMDToModifyHalfSliceChicken7:icl */
whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
@@ -1029,22 +1072,21 @@ static void icl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN10_SAMPLER_MODE);
}
-static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
- struct whitelist *w)
+void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *w = &engine->whitelist;
GEM_BUG_ON(engine->id != RCS);
- w->count = 0;
- w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
+ wa_init_start(w, "whitelist");
if (INTEL_GEN(i915) < 8)
- return NULL;
+ return;
else if (IS_BROADWELL(i915))
- bdw_whitelist_build(w);
+ return;
else if (IS_CHERRYVIEW(i915))
- chv_whitelist_build(w);
+ return;
else if (IS_SKYLAKE(i915))
skl_whitelist_build(w);
else if (IS_BROXTON(i915))
@@ -1062,39 +1104,180 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
else
MISSING_CASE(INTEL_GEN(i915));
- return w;
+ wa_init_finish(w);
}
-static void whitelist_apply(struct intel_engine_cs *engine,
- const struct whitelist *w)
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ const struct i915_wa_list *wal = &engine->whitelist;
const u32 base = engine->mmio_base;
+ struct i915_wa *wa;
unsigned int i;
- if (!w)
+ if (!wal->count)
return;
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
- for (i = 0; i < w->count; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(w->reg[i]));
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(RING_NOPID(base)));
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
{
- struct whitelist w;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (IS_ICELAKE(i915)) {
+ /* This is not an Wa. Enable for better image quality */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+ /* WaPipelineFlushCoherentLines:icl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /*
+ * Wa_1405543622:icl
+ * Formerly known as WaGAPZPriorityScheme
+ */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+ /*
+ * Wa_1604223664:icl
+ * Formerly known as WaL3BankAddressHashing
+ */
+ wa_write_masked_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_masked_or(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
- whitelist_apply(engine, whitelist_build(engine, &w));
+ /*
+ * Wa_1405733216:icl
+ * Formerly known as WaDisableCleanEvicts
+ */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+ /* WaForwardProgressSoftReset:icl */
+ wa_write_or(wal,
+ GEN10_SCRATCH_LNCF2,
+ PMFLUSHDONE_LNICRSDROP |
+ PMFLUSH_GAPL3UNBLOCK |
+ PMFLUSHDONE_LNEBLK);
+
+ /* Wa_1406609255:icl (pre-prod) */
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_DEMAND_PREFETCH |
+ GEN7_DISABLE_SAMPLER_PREFETCH);
+ }
+
+ if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ wa_masked_en(wal,
+ GEN7_FF_SLICE_CS_CHICKEN1,
+ GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN9_GAPS_TSV_CREDIT_DISABLE);
+ }
+
+ if (IS_BROXTON(i915)) {
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ wa_masked_en(wal,
+ FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ if (IS_GEN9(i915)) {
+ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal,
+ GEN9_CSFE_CHICKEN1_RCS,
+ GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+ wa_write_or(wal,
+ BDW_SCRATCH1,
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(i915))
+ wa_write_masked_or(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+ }
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ wa_write(wal,
+ RING_SEMA_WAIT_POLL(engine->mmio_base),
+ 1);
+ }
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ return;
+
+ wa_init_start(wal, engine->name);
+
+ if (engine->id == RCS)
+ rcs_engine_wa_init(engine);
+ else
+ xcs_engine_wa_init(engine);
+
+ wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+ wa_list_apply(engine->i915, &engine->wa_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return wa_list_verify(engine->i915, &engine->wa_list, from);
+}
+
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623e626..7c734714b05e 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,39 @@
#ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
-int intel_ctx_workarounds_emit(struct i915_request *rq);
+#include <linux/slab.h>
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+ unsigned int wa_count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+ kfree(wal->list);
+ memset(wal, 0, sizeof(*wal));
+}
+
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
+int intel_engine_emit_ctx_wa(struct i915_request *rq);
+
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from);
+
+void intel_engine_init_whitelist(struct intel_engine_cs *engine);
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
new file mode 100644
index 000000000000..208a966da8ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_reset.h"
+
+#include "../i915_drv.h"
+#include "../intel_ringbuffer.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ pr_debug("%s: current gpu_error=%08lx\n",
+ __func__, i915->gpu_error.flags);
+
+ while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &i915->gpu_error.flags));
+
+ for_each_engine(engine, i915, id) {
+ while (test_and_set_bit(I915_RESET_ENGINE + id,
+ &i915->gpu_error.flags))
+ wait_on_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + id,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
+void igt_global_reset_unlock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
new file mode 100644
index 000000000000..5f0234d045d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_RESET_H__
+#define __I915_SELFTESTS_IGT_RESET_H__
+
+#include "../i915_drv.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915);
+void igt_global_reset_unlock(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
new file mode 100644
index 000000000000..8cd34f6e6859
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -0,0 +1,199 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_spinner.h"
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+{
+ unsigned int mode;
+ void *vaddr;
+ int err;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ memset(spin, 0, sizeof(*spin));
+ spin->i915 = i915;
+
+ spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->hws)) {
+ err = PTR_ERR(spin->hws);
+ goto err;
+ }
+
+ spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->obj)) {
+ err = PTR_ERR(spin->obj);
+ goto err_hws;
+ }
+
+ i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+ spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+ mode = i915_coherent_map_type(i915);
+ vaddr = i915_gem_object_pin_map(spin->obj, mode);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_unpin_hws;
+ }
+ spin->batch = vaddr;
+
+ return 0;
+
+err_unpin_hws:
+ i915_gem_object_unpin_map(spin->hws);
+err_obj:
+ i915_gem_object_put(spin->obj);
+err_hws:
+ i915_gem_object_put(spin->hws);
+err:
+ return err;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+ return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct i915_request *rq)
+{
+ return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int emit_recurse_batch(struct igt_spinner *spin,
+ struct i915_request *rq,
+ u32 arbitration_command)
+{
+ struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+ struct i915_vma *hws, *vma;
+ u32 *batch;
+ int err;
+
+ vma = i915_vma_instance(spin->obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ hws = i915_vma_instance(spin->hws, vm, NULL);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(hws, 0, 0, PIN_USER);
+ if (err)
+ goto unpin_vma;
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(hws->obj)) {
+ i915_gem_object_get(hws->obj);
+ i915_gem_object_set_active_reference(hws->obj);
+ }
+
+ batch = spin->batch;
+
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+
+ *batch++ = arbitration_command;
+
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+ i915_gem_chipset_flush(spin->i915);
+
+ err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+
+unpin_hws:
+ i915_vma_unpin(hws);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = emit_recurse_batch(spin, rq, arbitration_command);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static u32
+hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
+{
+ u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+ return READ_ONCE(*seqno);
+}
+
+void igt_spinner_end(struct igt_spinner *spin)
+{
+ *spin->batch = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(spin->i915);
+}
+
+void igt_spinner_fini(struct igt_spinner *spin)
+{
+ igt_spinner_end(spin);
+
+ i915_gem_object_unpin_map(spin->obj);
+ i915_gem_object_put(spin->obj);
+
+ i915_gem_object_unpin_map(spin->hws);
+ i915_gem_object_put(spin->hws);
+}
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
+{
+ if (!wait_event_timeout(rq->execute,
+ READ_ONCE(rq->global_seqno),
+ msecs_to_jiffies(10)))
+ return false;
+
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 1000));
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
new file mode 100644
index 000000000000..391777c76dc7
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
+#define __I915_SELFTESTS_IGT_SPINNER_H__
+
+#include "../i915_selftest.h"
+
+#include "../i915_drv.h"
+#include "../i915_request.h"
+#include "../intel_ringbuffer.h"
+#include "../i915_gem_context.h"
+
+struct igt_spinner {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ u32 *batch;
+ void *seqno;
+};
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+void igt_spinner_fini(struct igt_spinner *spin);
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command);
+void igt_spinner_end(struct igt_spinner *spin);
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index defe671130ab..40efbed611de 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_reset.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg)
goto unlock;
for_each_engine(engine, i915, id) {
+ struct igt_wedge_me w;
long timeout;
if (!intel_engine_can_store_dword(engine))
@@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_add(rq);
- timeout = i915_request_wait(rq,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ timeout = 0;
+ igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ timeout = i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ timeout = -EIO;
+
i915_request_put(rq);
if (timeout < 0) {
@@ -348,40 +355,6 @@ unlock:
return err;
}
-static void global_reset_lock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- pr_debug("%s: current gpu_error=%08lx\n",
- __func__, i915->gpu_error.flags);
-
- while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
-
- for_each_engine(engine, i915, id) {
- while (test_and_set_bit(I915_RESET_ENGINE + id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_ENGINE + id,
- TASK_UNINTERRUPTIBLE);
- }
-}
-
-static void global_reset_unlock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id)
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
-
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
-}
-
static int igt_global_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -390,7 +363,7 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
mutex_lock(&i915->drm.struct_mutex);
@@ -405,7 +378,7 @@ static int igt_global_reset(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
@@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg)
/* Check that we detect a stuck waiter and issue a reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -988,7 +961,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
/* Check that we can recover an unbind stuck on a hanging request */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1186,7 +1159,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1266,7 +1239,7 @@ static int igt_reset_queue(void *arg)
/* Check that we replay pending requests following a hang */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1397,7 +1370,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 94fc0e5c8766..ca461e3a5f27 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -6,216 +6,18 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
+#include "igt_spinner.h"
#include "i915_random.h"
#include "mock_context.h"
-struct spinner {
- struct drm_i915_private *i915;
- struct drm_i915_gem_object *hws;
- struct drm_i915_gem_object *obj;
- u32 *batch;
- void *seqno;
-};
-
-static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
-{
- unsigned int mode;
- void *vaddr;
- int err;
-
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
- memset(spin, 0, sizeof(*spin));
- spin->i915 = i915;
-
- spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->hws)) {
- err = PTR_ERR(spin->hws);
- goto err;
- }
-
- spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->obj)) {
- err = PTR_ERR(spin->obj);
- goto err_hws;
- }
-
- i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
- vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
- spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
-
- mode = i915_coherent_map_type(i915);
- vaddr = i915_gem_object_pin_map(spin->obj, mode);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_unpin_hws;
- }
- spin->batch = vaddr;
-
- return 0;
-
-err_unpin_hws:
- i915_gem_object_unpin_map(spin->hws);
-err_obj:
- i915_gem_object_put(spin->obj);
-err_hws:
- i915_gem_object_put(spin->hws);
-err:
- return err;
-}
-
-static unsigned int seqno_offset(u64 fence)
-{
- return offset_in_page(sizeof(u32) * fence);
-}
-
-static u64 hws_address(const struct i915_vma *hws,
- const struct i915_request *rq)
-{
- return hws->node.start + seqno_offset(rq->fence.context);
-}
-
-static int emit_recurse_batch(struct spinner *spin,
- struct i915_request *rq,
- u32 arbitration_command)
-{
- struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
- struct i915_vma *hws, *vma;
- u32 *batch;
- int err;
-
- vma = i915_vma_instance(spin->obj, vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- hws = i915_vma_instance(spin->hws, vm, NULL);
- if (IS_ERR(hws))
- return PTR_ERR(hws);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
-
- err = i915_vma_pin(hws, 0, 0, PIN_USER);
- if (err)
- goto unpin_vma;
-
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
-
- err = i915_vma_move_to_active(hws, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(hws->obj)) {
- i915_gem_object_get(hws->obj);
- i915_gem_object_set_active_reference(hws->obj);
- }
-
- batch = spin->batch;
-
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
- *batch++ = rq->fence.seqno;
-
- *batch++ = arbitration_command;
-
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
- *batch++ = MI_BATCH_BUFFER_END; /* not reached */
-
- i915_gem_chipset_flush(spin->i915);
-
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
-
-unpin_hws:
- i915_vma_unpin(hws);
-unpin_vma:
- i915_vma_unpin(vma);
- return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arbitration_command)
-{
- struct i915_request *rq;
- int err;
-
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return rq;
-
- err = emit_recurse_batch(spin, rq, arbitration_command);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
-{
- u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
-
- return READ_ONCE(*seqno);
-}
-
-static void spinner_end(struct spinner *spin)
-{
- *spin->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(spin->i915);
-}
-
-static void spinner_fini(struct spinner *spin)
-{
- spinner_end(spin);
-
- i915_gem_object_unpin_map(spin->obj);
- i915_gem_object_put(spin->obj);
-
- i915_gem_object_unpin_map(spin->hws);
- i915_gem_object_put(spin->hws);
-}
-
-static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
-{
- if (!wait_event_timeout(rq->execute,
- READ_ONCE(rq->global_seqno),
- msecs_to_jiffies(10)))
- return false;
-
- return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 10) &&
- wait_for(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 1000));
-}
-
static int live_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- struct spinner spin;
+ struct igt_spinner spin;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
@@ -224,7 +26,7 @@ static int live_sanitycheck(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin, i915))
+ if (igt_spinner_init(&spin, i915))
goto err_unlock;
ctx = kernel_context(i915);
@@ -234,14 +36,14 @@ static int live_sanitycheck(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin, rq)) {
+ if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -249,7 +51,7 @@ static int live_sanitycheck(void *arg)
goto err_ctx;
}
- spinner_end(&spin);
+ igt_spinner_end(&spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx;
@@ -260,7 +62,7 @@ static int live_sanitycheck(void *arg)
err_ctx:
kernel_context_close(ctx);
err_spin:
- spinner_fini(&spin);
+ igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -272,7 +74,7 @@ static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -283,10 +85,10 @@ static int live_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -304,15 +106,15 @@ static int live_preempt(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -320,16 +122,16 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -337,8 +139,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -351,9 +153,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -365,7 +167,7 @@ static int live_late_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
@@ -377,10 +179,10 @@ static int live_late_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -394,28 +196,29 @@ static int live_late_preempt(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
pr_err("First context failed to start\n");
goto err_wedged;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (wait_for_spinner(&spin_hi, rq)) {
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("Second context overtook first?\n");
goto err_wedged;
}
@@ -423,14 +226,14 @@ static int live_late_preempt(void *arg)
attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
engine->schedule(rq, &attr);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("High priority context failed to preempt the low priority context\n");
GEM_TRACE_DUMP();
goto err_wedged;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -443,9 +246,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -453,8 +256,8 @@ err_unlock:
return err;
err_wedged:
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
@@ -464,7 +267,7 @@ static int live_preempt_hang(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -478,10 +281,10 @@ static int live_preempt_hang(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -500,15 +303,15 @@ static int live_preempt_hang(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -516,10 +319,10 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
@@ -544,7 +347,7 @@ static int live_preempt_hang(void *arg)
engine->execlists.preempt_hang.inject_hang = false;
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -552,8 +355,8 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -566,9 +369,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index d1a0923d2f38..67017d5175b8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,9 @@
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_reset.h"
+#include "igt_spinner.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -91,17 +94,23 @@ err_obj:
return ERR_PTR(err);
}
-static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i)
+static u32
+get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
{
- return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid;
+ i915_reg_t reg = i < engine->whitelist.count ?
+ engine->whitelist.list[i].reg :
+ RING_NOPID(engine->mmio_base);
+
+ return i915_mmio_reg_offset(reg);
}
-static void print_results(const struct whitelist *w, const u32 *results)
+static void
+print_results(const struct intel_engine_cs *engine, const u32 *results)
{
unsigned int i;
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = results[i];
pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
@@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results)
}
}
-static int check_whitelist(const struct whitelist *w,
- struct i915_gem_context *ctx,
+static int check_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
@@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w,
}
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = vaddr[i];
if (expected != actual) {
- print_results(w, vaddr);
+ print_results(engine, vaddr);
pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
i, expected, actual);
@@ -159,66 +167,107 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+ set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
+ i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
return 0;
}
static int do_engine_reset(struct intel_engine_cs *engine)
{
- return i915_reset_engine(engine, NULL);
+ return i915_reset_engine(engine, "live_workarounds");
}
-static int switch_to_scratch_context(struct intel_engine_cs *engine)
+static int
+switch_to_scratch_context(struct intel_engine_cs *engine,
+ struct igt_spinner *spin)
{
struct i915_gem_context *ctx;
struct i915_request *rq;
+ int err = 0;
ctx = kernel_context(engine->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
intel_runtime_pm_get(engine->i915);
- rq = i915_request_alloc(engine, ctx);
+
+ if (spin)
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+ else
+ rq = i915_request_alloc(engine, ctx);
+
intel_runtime_pm_put(engine->i915);
kernel_context_close(ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+
+ if (IS_ERR(rq)) {
+ spin = NULL;
+ err = PTR_ERR(rq);
+ goto err;
+ }
i915_request_add(rq);
- return 0;
+ if (spin && !igt_wait_for_spinner(spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ err = -ETIMEDOUT;
+ }
+
+err:
+ if (err && spin)
+ igt_spinner_end(spin);
+
+ return err;
}
static int check_whitelist_across_reset(struct intel_engine_cs *engine,
int (*reset)(struct intel_engine_cs *),
- const struct whitelist *w,
const char *name)
{
+ struct drm_i915_private *i915 = engine->i915;
+ bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
+ struct igt_spinner spin;
int err;
- ctx = kernel_context(engine->i915);
+ pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
+ engine->whitelist.count, name);
+
+ if (want_spin) {
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ return err;
+ }
+
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
goto out;
}
- err = switch_to_scratch_context(engine);
+ err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
if (err)
goto out;
+ intel_runtime_pm_get(i915);
err = reset(engine);
+ intel_runtime_pm_put(i915);
+
+ if (want_spin) {
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+ }
+
if (err) {
pr_err("%s reset failed\n", name);
goto out;
}
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
@@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
kernel_context_close(ctx);
- ctx = kernel_context(engine->i915);
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
@@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine = i915->engine[RCS];
- struct i915_gpu_error *error = &i915->gpu_error;
- struct whitelist w;
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
- if (!engine)
- return 0;
-
- if (!whitelist_build(engine, &w))
+ if (!engine || engine->whitelist.count == 0)
return 0;
- pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count);
-
- set_bit(I915_RESET_BACKOFF, &error->flags);
- set_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+ igt_global_reset_lock(i915);
if (intel_has_reset_engine(i915)) {
err = check_whitelist_across_reset(engine,
- do_engine_reset, &w,
+ do_engine_reset,
"engine");
if (err)
goto out;
@@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg)
if (intel_has_gpu_reset(i915)) {
err = check_whitelist_across_reset(engine,
- do_device_reset, &w,
+ do_device_reset,
"device");
if (err)
goto out;
}
out:
- clear_bit(I915_RESET_ENGINE + engine->id, &error->flags);
- clear_bit(I915_RESET_BACKOFF, &error->flags);
+ igt_global_reset_unlock(i915);
return err;
}
+static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool ok = true;
+
+ ok &= intel_gt_verify_workarounds(i915, str);
+
+ for_each_engine(engine, i915, id)
+ ok &= intel_engine_verify_workarounds(engine, str);
+
+ return ok;
+}
+
+static int
+live_gpu_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gpu_error *error = &i915->gpu_error;
+ bool ok;
+
+ if (!intel_has_gpu_reset(i915))
+ return 0;
+
+ pr_info("Verifying after GPU reset...\n");
+
+ igt_global_reset_lock(i915);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok)
+ goto out;
+
+ intel_runtime_pm_get(i915);
+ set_bit(I915_RESET_HANDOFF, &error->flags);
+ i915_reset(i915, ALL_ENGINES, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after reset");
+
+out:
+ igt_global_reset_unlock(i915);
+
+ return ok ? 0 : -ESRCH;
+}
+
+static int
+live_engine_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ struct i915_request *rq;
+ int ret = 0;
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ igt_global_reset_lock(i915);
+
+ for_each_engine(engine, i915, id) {
+ bool ok;
+
+ pr_info("Verifying after %s reset...\n", engine->name);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ intel_runtime_pm_get(i915);
+ i915_reset_engine(engine, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after idle reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ ret = igt_spinner_init(&spin, i915);
+ if (ret)
+ goto err;
+
+ intel_runtime_pm_get(i915);
+
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ goto err;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ i915_reset_engine(engine, "live_workarounds");
+
+ intel_runtime_pm_put(i915);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ ok = verify_gt_engine_wa(i915, "after busy reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+ }
+
+err:
+ igt_global_reset_unlock(i915);
+ kernel_context_close(ctx);
+
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+
+ return ret;
+}
+
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_reset_whitelist),
+ SUBTEST(live_gpu_reset_gt_engine_workarounds),
+ SUBTEST(live_engine_reset_gt_engine_workarounds),
};
int err;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 0a7252aecfa5..bb71db63c99c 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -334,7 +334,7 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
}
EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
-int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
+int sysfs_create_files(struct kobject *kobj, const struct attribute * const *ptr)
{
int err = 0;
int i;
@@ -493,7 +493,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr)
return ret;
}
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr)
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr)
{
int i;
for (i = 0; ptr[i]; i++)
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 3314e91f6eb3..5736c942c85b 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1123,7 +1123,8 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp);
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
-u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3]);
static inline bool
drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
new file mode 100644
index 000000000000..d03f1b83421a
--- /dev/null
+++ b/include/drm/drm_dsc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_H_
+#define DRM_DSC_H_
+
+#include <drm/drm_dp_helper.h>
+
+/* VESA Display Stream Compression DSC 1.2 constants */
+#define DSC_NUM_BUF_RANGES 15
+#define DSC_MUX_WORD_SIZE_8_10_BPC 48
+#define DSC_MUX_WORD_SIZE_12_BPC 64
+#define DSC_RC_PIXELS_PER_GROUP 3
+#define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095
+#define DSC_RANGE_BPG_OFFSET_MASK 0x3f
+
+/* DSC Rate Control Constants */
+#define DSC_RC_MODEL_SIZE_CONST 8192
+#define DSC_RC_EDGE_FACTOR_CONST 6
+#define DSC_RC_TGT_OFFSET_HI_CONST 3
+#define DSC_RC_TGT_OFFSET_LO_CONST 3
+
+/* DSC PPS constants and macros */
+#define DSC_PPS_VERSION_MAJOR_SHIFT 4
+#define DSC_PPS_BPC_SHIFT 4
+#define DSC_PPS_MSB_SHIFT 8
+#define DSC_PPS_LSB_MASK (0xFF << 0)
+#define DSC_PPS_BPP_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_VBR_EN_SHIFT 2
+#define DSC_PPS_SIMPLE422_SHIFT 3
+#define DSC_PPS_CONVERT_RGB_SHIFT 4
+#define DSC_PPS_BLOCK_PRED_EN_SHIFT 5
+#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8)
+#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4
+#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11
+#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6
+#define DSC_PPS_NATIVE_420_SHIFT 1
+#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16
+#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0
+#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13
+
+/* Configuration for a single Rate Control model range */
+struct drm_dsc_rc_range_parameters {
+ /* Min Quantization Parameters allowed for this range */
+ u8 range_min_qp;
+ /* Max Quantization Parameters allowed for this range */
+ u8 range_max_qp;
+ /* Bits/group offset to apply to target for this group */
+ u8 range_bpg_offset;
+};
+
+struct drm_dsc_config {
+ /* Bits / component for previous reconstructed line buffer */
+ u8 line_buf_depth;
+ /* Bits per component to code (must be 8, 10, or 12) */
+ u8 bits_per_component;
+ /*
+ * Flag indicating to do RGB - YCoCg conversion
+ * and back (should be 1 for RGB input)
+ */
+ bool convert_rgb;
+ u8 slice_count;
+ /* Slice Width */
+ u16 slice_width;
+ /* Slice Height */
+ u16 slice_height;
+ /*
+ * 4:2:2 enable mode (from PPS, 4:2:2 conversion happens
+ * outside of DSC encode/decode algorithm)
+ */
+ bool enable422;
+ /* Picture Width */
+ u16 pic_width;
+ /* Picture Height */
+ u16 pic_height;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_high;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_low;
+ /* Bits/pixel target << 4 (ie., 4 fractional bits) */
+ u16 bits_per_pixel;
+ /*
+ * Factor to determine if an edge is present based
+ * on the bits produced
+ */
+ u8 rc_edge_factor;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit1;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit0;
+ /* Number of pixels to delay the initial transmission */
+ u16 initial_xmit_delay;
+ /* Number of pixels to delay the VLD on the decoder,not including SSM */
+ u16 initial_dec_delay;
+ /* Block prediction enable */
+ bool block_pred_enable;
+ /* Bits/group offset to use for first line of the slice */
+ u8 first_line_bpg_offset;
+ /* Value to use for RC model offset at slice start */
+ u16 initial_offset;
+ /* Thresholds defining each of the buffer ranges */
+ u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /* Parameters for each of the RC ranges */
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+ /* Total size of RC model */
+ u16 rc_model_size;
+ /* Minimum QP where flatness information is sent */
+ u8 flatness_min_qp;
+ /* Maximum QP where flatness information is sent */
+ u8 flatness_max_qp;
+ /* Initial value for scale factor */
+ u8 initial_scale_value;
+ /* Decrement scale factor every scale_decrement_interval groups */
+ u16 scale_decrement_interval;
+ /* Increment scale factor every scale_increment_interval groups */
+ u16 scale_increment_interval;
+ /* Non-first line BPG offset to use */
+ u16 nfl_bpg_offset;
+ /* BPG offset used to enforce slice bit */
+ u16 slice_bpg_offset;
+ /* Final RC linear transformation offset value */
+ u16 final_offset;
+ /* Enable on-off VBR (ie., disable stuffing bits) */
+ bool vbr_enable;
+ /* Mux word size (in bits) for SSM mode */
+ u8 mux_word_size;
+ /*
+ * The (max) size in bytes of the "chunks" that are
+ * used in slice multiplexing
+ */
+ u16 slice_chunk_size;
+ /* Rate Control buffer siz in bits */
+ u16 rc_bits;
+ /* DSC Minor Version */
+ u8 dsc_version_minor;
+ /* DSC Major version */
+ u8 dsc_version_major;
+ /* Native 4:2:2 support */
+ bool native_422;
+ /* Native 4:2:0 support */
+ bool native_420;
+ /* Additional bits/grp for seconnd line of slice for native 4:2:0 */
+ u8 second_line_bpg_offset;
+ /* Num of bits deallocated for each grp that is not in second line of slice */
+ u16 nsl_bpg_offset;
+ /* Offset adj fr second line in Native 4:2:0 mode */
+ u16 second_line_offset_adj;
+};
+
+/**
+ * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set
+ *
+ * The VESA DSC standard defines picture parameter set (PPS) which display
+ * stream compression encoders must communicate to decoders.
+ * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in
+ * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2.
+ * The PPS fields that span over more than a byte should be stored in Big Endian
+ * format.
+ */
+struct drm_dsc_picture_parameter_set {
+ /**
+ * @dsc_version:
+ * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC
+ * PPS0[7:4] - dsc_version_major: Contains major version of DSC
+ */
+ u8 dsc_version;
+ /**
+ * @pps_identifier:
+ * PPS1[7:0] - Application specific identifier that can be
+ * used to differentiate between different PPS tables.
+ */
+ u8 pps_identifier;
+ /**
+ * @pps_reserved:
+ * PPS2[7:0]- RESERVED Byte
+ */
+ u8 pps_reserved;
+ /**
+ * @pps_3:
+ * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to
+ * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits,
+ * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits,
+ * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2.
+ * PPS3[7:4] - bits_per_component: Bits per component for the original
+ * pixels of the encoded picture.
+ * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2)
+ * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also
+ * allowed only when dsc_minor_version = 0x2)
+ */
+ u8 pps_3;
+ /**
+ * @pps_4:
+ * PPS4[1:0] -These are the most significant 2 bits of
+ * compressed BPP bits_per_pixel[9:0] syntax element.
+ * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled
+ * PPS4[3] - simple_422: Indicates if decoder drops samples to
+ * reconstruct the 4:2:2 picture.
+ * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is
+ * active.
+ * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any
+ * groups in picture
+ * PPS4[7:6] - Reseved bits
+ */
+ u8 pps_4;
+ /**
+ * @bits_per_pixel_low:
+ * PPS5[7:0] - This indicates the lower significant 8 bits of
+ * the compressed BPP bits_per_pixel[9:0] element.
+ */
+ u8 bits_per_pixel_low;
+ /**
+ * @pic_height:
+ * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows
+ * within the raster.
+ */
+ __be16 pic_height;
+ /**
+ * @pic_width:
+ * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within
+ * the raster.
+ */
+ __be16 pic_width;
+ /**
+ * @slice_height:
+ * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels.
+ */
+ __be16 slice_height;
+ /**
+ * @slice_width:
+ * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels.
+ */
+ __be16 slice_width;
+ /**
+ * @chunk_size:
+ * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks
+ * that are used for slice multiplexing.
+ */
+ __be16 chunk_size;
+ /**
+ * @initial_xmit_delay_high:
+ * PPS16[1:0] - Most Significant two bits of initial transmission delay.
+ * It specifies the number of pixel times that the encoder waits before
+ * transmitting data from its rate buffer.
+ * PPS16[7:2] - Reserved
+ */
+ u8 initial_xmit_delay_high;
+ /**
+ * @initial_xmit_delay_low:
+ * PPS17[7:0] - Least significant 8 bits of initial transmission delay.
+ */
+ u8 initial_xmit_delay_low;
+ /**
+ * @initial_dec_delay:
+ *
+ * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number
+ * of pixel times that the decoder accumulates data in its rate buffer
+ * before starting to decode and output pixels.
+ */
+ __be16 initial_dec_delay;
+ /**
+ * @pps20_reserved:
+ *
+ * PPS20[7:0] - Reserved
+ */
+ u8 pps20_reserved;
+ /**
+ * @initial_scale_value:
+ * PPS21[5:0] - Initial rcXformScale factor used at beginning
+ * of a slice.
+ * PPS21[7:6] - Reserved
+ */
+ u8 initial_scale_value;
+ /**
+ * @scale_increment_interval:
+ * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing
+ * the rcXformScale factor at end of a slice.
+ */
+ __be16 scale_increment_interval;
+ /**
+ * @scale_decrement_interval_high:
+ * PPS24[3:0] - Higher 4 bits indicating number of group times between
+ * decrementing the rcXformScale factor at beginning of a slice.
+ * PPS24[7:4] - Reserved
+ */
+ u8 scale_decrement_interval_high;
+ /**
+ * @scale_decrement_interval_low:
+ * PPS25[7:0] - Lower 8 bits of scale decrement interval
+ */
+ u8 scale_decrement_interval_low;
+ /**
+ * @pps26_reserved:
+ * PPS26[7:0]
+ */
+ u8 pps26_reserved;
+ /**
+ * @first_line_bpg_offset:
+ * PPS27[4:0] - Number of additional bits that are allocated
+ * for each group on first line of a slice.
+ * PPS27[7:5] - Reserved
+ */
+ u8 first_line_bpg_offset;
+ /**
+ * @nfl_bpg_offset:
+ * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits
+ * deallocated for each group for groups after the first line of slice.
+ */
+ __be16 nfl_bpg_offset;
+ /**
+ * @slice_bpg_offset:
+ * PPS30, PPS31[7:0] - Number of bits that are deallocated for each
+ * group to enforce the slice constraint.
+ */
+ __be16 slice_bpg_offset;
+ /**
+ * @initial_offset:
+ * PPS32,33[7:0] - Initial value for rcXformOffset
+ */
+ __be16 initial_offset;
+ /**
+ * @final_offset:
+ * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset
+ */
+ __be16 final_offset;
+ /**
+ * @flatness_min_qp:
+ * PPS36[4:0] - Minimum QP at which flatness is signaled and
+ * flatness QP adjustment is made.
+ * PPS36[7:5] - Reserved
+ */
+ u8 flatness_min_qp;
+ /**
+ * @flatness_max_qp:
+ * PPS37[4:0] - Max QP at which flatness is signalled and
+ * the flatness adjustment is made.
+ * PPS37[7:5] - Reserved
+ */
+ u8 flatness_max_qp;
+ /**
+ * @rc_model_size:
+ * PPS38,39[7:0] - Number of bits within RC Model.
+ */
+ __be16 rc_model_size;
+ /**
+ * @rc_edge_factor:
+ * PPS40[3:0] - Ratio of current activity vs, previous
+ * activity to determine presence of edge.
+ * PPS40[7:4] - Reserved
+ */
+ u8 rc_edge_factor;
+ /**
+ * @rc_quant_incr_limit0:
+ * PPS41[4:0] - QP threshold used in short term RC
+ * PPS41[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit0;
+ /**
+ * @rc_quant_incr_limit1:
+ * PPS42[4:0] - QP threshold used in short term RC
+ * PPS42[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit1;
+ /**
+ * @rc_tgt_offset:
+ * PPS43[3:0] - Lower end of the variability range around the target
+ * bits per group that is allowed by short term RC.
+ * PPS43[7:4]- Upper end of the variability range around the target
+ * bits per group that i allowed by short term rc.
+ */
+ u8 rc_tgt_offset;
+ /**
+ * @rc_buf_thresh:
+ * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for
+ * the 15 ranges defined by 14 thresholds.
+ */
+ u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /**
+ * @rc_range_parameters:
+ * PPS58[7:0] - PPS87[7:0]
+ * Parameters that correspond to each of the 15 ranges.
+ */
+ __be16 rc_range_parameters[DSC_NUM_BUF_RANGES];
+ /**
+ * @native_422_420:
+ * PPS88[0] - 0 = Native 4:2:2 not used
+ * 1 = Native 4:2:2 used
+ * PPS88[1] - 0 = Native 4:2:0 not use
+ * 1 = Native 4:2:0 used
+ * PPS88[7:2] - Reserved 6 bits
+ */
+ u8 native_422_420;
+ /**
+ * @second_line_bpg_offset:
+ * PPS89[4:0] - Additional bits/group budget for the
+ * second line of a slice in Native 4:2:0 mode.
+ * Set to 0 if DSC minor version is 1 or native420 is 0.
+ * PPS89[7:5] - Reserved
+ */
+ u8 second_line_bpg_offset;
+ /**
+ * @nsl_bpg_offset:
+ * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated
+ * for each group that is not in the second line of a slice.
+ */
+ __be16 nsl_bpg_offset;
+ /**
+ * @second_line_offset_adj:
+ * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second
+ * line in Native 4:2:0 mode.
+ */
+ __be16 second_line_offset_adj;
+ /**
+ * @pps_long_94_reserved:
+ * PPS 94, 95, 96, 97 - Reserved
+ */
+ u32 pps_long_94_reserved;
+ /**
+ * @pps_long_98_reserved:
+ * PPS 98, 99, 100, 101 - Reserved
+ */
+ u32 pps_long_98_reserved;
+ /**
+ * @pps_long_102_reserved:
+ * PPS 102, 103, 104, 105 - Reserved
+ */
+ u32 pps_long_102_reserved;
+ /**
+ * @pps_long_106_reserved:
+ * PPS 106, 107, 108, 109 - reserved
+ */
+ u32 pps_long_106_reserved;
+ /**
+ * @pps_long_110_reserved:
+ * PPS 110, 111, 112, 113 - reserved
+ */
+ u32 pps_long_110_reserved;
+ /**
+ * @pps_long_114_reserved:
+ * PPS 114 - 117 - reserved
+ */
+ u32 pps_long_114_reserved;
+ /**
+ * @pps_long_118_reserved:
+ * PPS 118 - 121 - reserved
+ */
+ u32 pps_long_118_reserved;
+ /**
+ * @pps_long_122_reserved:
+ * PPS 122- 125 - reserved
+ */
+ u32 pps_long_122_reserved;
+ /**
+ * @pps_short_126_reserved:
+ * PPS 126, 127 - reserved
+ */
+ __be16 pps_short_126_reserved;
+} __packed;
+
+/**
+ * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter
+ * Set Metadata
+ *
+ * This structure represents the DSC PPS infoframe required to send the Picture
+ * Parameter Set metadata required before enabling VESA Display Stream
+ * Compression. This is based on the DP Secondary Data Packet structure and
+ * comprises of SDP Header as defined in drm_dp_helper.h and PPS payload.
+ *
+ * @pps_header: Header for PPS as per DP SDP header format
+ * @pps_payload: PPS payload fields as per DSC specification Table 4-1
+ */
+struct drm_dsc_pps_infoframe {
+ struct dp_sdp_header pps_header;
+ struct drm_dsc_picture_parameter_set pps_payload;
+} __packed;
+
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp);
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg);
+
+#endif /* _DRM_DSC_H_ */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 987cefa337de..786816cf4aa5 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns);
int __must_check sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr);
+ const struct attribute * const *attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
@@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr);
int __must_check sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr);
@@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj,
}
static inline int sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
return 0;
}
@@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj,
}
static inline void sysfs_remove_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
}