summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVille Syrjälä <ville.syrjala@linux.intel.com>2022-02-18 17:47:08 +0200
committerVille Syrjälä <ville.syrjala@linux.intel.com>2022-02-18 17:47:08 +0200
commitb0209a73d29dc65cbc47eefca8af359cd53eb6ba (patch)
tree3af2d233df17c6a63b4d2a8ebe74aee8a71b5443
parent7ec6a8dab8b257943b7711e8e87b8a8830b4f59e (diff)
parent48bb92d91ae9331138656f369e66b5cab8f0fa7e (diff)
Merge remote-tracking branch 'drm_intel_push/drm-intel-next' into drm-tip
# Conflicts: # drivers/gpu/drm/i915/i915_module.c
-rw-r--r--drivers/gpu/drm/dp/drm_dp.c83
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c271
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c217
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c969
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c93
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c307
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.h17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.h15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.h23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c1
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h2626
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c18
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h301
-rw-r--r--drivers/gpu/drm/i915/i915_file_private.h108
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h11
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h1
-rw-r--r--drivers/gpu/drm/i915/i915_mitigations.c1
-rw-r--r--drivers/gpu/drm/i915/i915_module.c1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h280
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h27
-rw-r--r--drivers/gpu/drm/i915/i915_request.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c1
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h228
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c322
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--include/drm/dp/drm_dp_helper.h24
136 files changed, 3981 insertions, 3054 deletions
diff --git a/drivers/gpu/drm/dp/drm_dp.c b/drivers/gpu/drm/dp/drm_dp.c
index c43577c8ac4d..e159b81800d4 100644
--- a/drivers/gpu/drm/dp/drm_dp.c
+++ b/drivers/gpu/drm/dp/drm_dp.c
@@ -145,6 +145,69 @@ u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_get_adjust_tx_ffe_preset);
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align, lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+ if (!(lane_align & DP_INTERLANE_ALIGN_DONE))
+ return false;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if (!(lane_status & DP_LANE_CHANNEL_EQ_DONE))
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_lane_channel_eq_done);
+
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_status;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if (!(lane_status & DP_LANE_SYMBOL_LOCKED))
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_lane_symbol_locked);
+
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return status & DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_eq_interlane_align_done);
+
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return status & DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_cds_interlane_align_done);
+
+/* DP 2.0 errata for 128b/132b */
+bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return status & DP_128B132B_LT_FAILED;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_link_training_failed);
+
u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
unsigned int lane)
{
@@ -282,6 +345,26 @@ int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIV
}
EXPORT_SYMBOL(drm_dp_read_channel_eq_delay);
+/* Per DP 2.0 Errata */
+int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux)
+{
+ int unit;
+ u8 val;
+
+ if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) {
+ drm_err(aux->drm_dev, "%s: failed rd interval read\n",
+ aux->name);
+ /* default to max */
+ val = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
+ }
+
+ unit = (val & DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT) ? 1 : 2;
+ val &= DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
+
+ return (val + 1) * unit * 1000;
+}
+EXPORT_SYMBOL(drm_dp_128b132b_read_aux_rd_interval);
+
void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 139e0e7eba94..7686f46e42a4 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,7 @@
# will most likely get a sudden build breakage... Hopefully we will fix
# new warnings before CI updates!
subdir-ccflags-y := -Wall -Wextra
+subdir-ccflags-y += -Wno-format-security
subdir-ccflags-y += -Wno-unused-parameter
subdir-ccflags-y += -Wno-type-limits
subdir-ccflags-y += -Wno-missing-field-initializers
@@ -197,6 +198,7 @@ i915-y += gt/uc/intel_uc.o \
# modesetting core code
i915-y += \
+ display/hsw_ips.o \
display/intel_atomic.o \
display/intel_atomic_plane.o \
display/intel_audio.o \
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
new file mode 100644
index 000000000000..38014e0cc9ad
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "hsw_ips.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_pcode.h"
+
+static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!crtc_state->ips_enabled)
+ return;
+
+ /*
+ * We can only enable IPS after we enable a plane and wait for a vblank
+ * This function is called from post_plane_update, which is run after
+ * a vblank wait.
+ */
+ drm_WARN_ON(&i915->drm,
+ !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+
+ if (IS_BROADWELL(i915)) {
+ drm_WARN_ON(&i915->drm,
+ snb_pcode_write(i915, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
+ /*
+ * Quoting Art Runyan: "its not safe to expect any particular
+ * value in IPS_CTL bit 31 after enabling IPS through the
+ * mailbox." Moreover, the mailbox may return a bogus state,
+ * so we need to just enable it and continue on.
+ */
+ } else {
+ intel_de_write(i915, IPS_CTL, IPS_ENABLE);
+ /*
+ * The bit only becomes 1 in the next vblank, so this wait here
+ * is essentially intel_wait_for_vblank. If we don't have this
+ * and don't wait for vblanks until the end of crtc_enable, then
+ * the HW state readout code will complain that the expected
+ * IPS_CTL value is not the one we read.
+ */
+ if (intel_de_wait_for_set(i915, IPS_CTL, IPS_ENABLE, 50))
+ drm_err(&i915->drm,
+ "Timed out waiting for IPS enable\n");
+ }
+}
+
+bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ bool need_vblank_wait = false;
+
+ if (!crtc_state->ips_enabled)
+ return need_vblank_wait;
+
+ if (IS_BROADWELL(i915)) {
+ drm_WARN_ON(&i915->drm,
+ snb_pcode_write(i915, DISPLAY_IPS_CONTROL, 0));
+ /*
+ * Wait for PCODE to finish disabling IPS. The BSpec specified
+ * 42ms timeout value leads to occasional timeouts so use 100ms
+ * instead.
+ */
+ if (intel_de_wait_for_clear(i915, IPS_CTL, IPS_ENABLE, 100))
+ drm_err(&i915->drm,
+ "Timed out waiting for IPS disable\n");
+ } else {
+ intel_de_write(i915, IPS_CTL, 0);
+ intel_de_posting_read(i915, IPS_CTL);
+ }
+
+ /* We need to wait for a vblank before we can disable the plane. */
+ need_vblank_wait = true;
+
+ return need_vblank_wait;
+}
+
+static bool hsw_ips_need_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!old_crtc_state->ips_enabled)
+ return false;
+
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ *
+ * Disable IPS before we program the LUT.
+ */
+ if (IS_HASWELL(i915) &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe) &&
+ new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return true;
+
+ return !new_crtc_state->ips_enabled;
+}
+
+bool hsw_ips_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ if (!hsw_ips_need_disable(state, crtc))
+ return false;
+
+ return hsw_ips_disable(old_crtc_state);
+}
+
+static bool hsw_ips_need_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!new_crtc_state->ips_enabled)
+ return false;
+
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ *
+ * Re-enable IPS after the LUT has been programmed.
+ */
+ if (IS_HASWELL(i915) &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe) &&
+ new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return true;
+
+ /*
+ * We can't read out IPS on broadwell, assume the worst and
+ * forcibly enable IPS on the first fastset.
+ */
+ if (new_crtc_state->update_pipe && old_crtc_state->inherited)
+ return true;
+
+ return !old_crtc_state->ips_enabled;
+}
+
+void hsw_ips_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!hsw_ips_need_enable(state, crtc))
+ return;
+
+ hsw_ips_enable(new_crtc_state);
+}
+
+/* IPS only exists on ULT machines and is tied to pipe A. */
+bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
+{
+ return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
+}
+
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* IPS only exists on ULT machines and is tied to pipe A. */
+ if (!hsw_crtc_supports_ips(crtc))
+ return false;
+
+ if (!i915->params.enable_ips)
+ return false;
+
+ if (crtc_state->pipe_bpp > 24)
+ return false;
+
+ /*
+ * We compare against max which means we must take
+ * the increased cdclk requirement into account when
+ * calculating the new cdclk.
+ *
+ * Should measure whether using a lower cdclk w/o IPS
+ */
+ if (IS_BROADWELL(i915) &&
+ crtc_state->pixel_rate > i915->max_cdclk_freq * 95 / 100)
+ return false;
+
+ return true;
+}
+
+int hsw_ips_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->ips_enabled = false;
+
+ if (!hsw_crtc_state_ips_capable(crtc_state))
+ return 0;
+
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ if (crtc_state->crc_enabled)
+ return 0;
+
+ /* IPS should be fine as long as at least one plane is enabled. */
+ if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
+ return 0;
+
+ if (IS_BROADWELL(i915)) {
+ const struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+ return 0;
+ }
+
+ crtc_state->ips_enabled = true;
+
+ return 0;
+}
+
+void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!hsw_crtc_supports_ips(crtc))
+ return;
+
+ if (IS_HASWELL(i915)) {
+ crtc_state->ips_enabled = intel_de_read(i915, IPS_CTL) & IPS_ENABLE;
+ } else {
+ /*
+ * We cannot readout IPS state on broadwell, set to
+ * true so we can set it to a defined state on first
+ * commit.
+ */
+ crtc_state->ips_enabled = true;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h
new file mode 100644
index 000000000000..4564dee497d7
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/hsw_ips.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __HSW_IPS_H__
+#define __HSW_IPS_H__
+
+#include <linux/types.h>
+
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+
+bool hsw_ips_disable(const struct intel_crtc_state *crtc_state);
+bool hsw_ips_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void hsw_ips_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+bool hsw_crtc_supports_ips(struct intel_crtc *crtc);
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+int hsw_ips_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
+
+#endif /* __HSW_IPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 093904065112..e0667d163266 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -281,17 +281,6 @@ void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
intel_crtc_put_color_blobs(crtc_state);
}
-void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state,
- const struct intel_crtc_state *from_crtc_state)
-{
- drm_property_replace_blob(&crtc_state->hw.degamma_lut,
- from_crtc_state->uapi.degamma_lut);
- drm_property_replace_blob(&crtc_state->hw.gamma_lut,
- from_crtc_state->uapi.gamma_lut);
- drm_property_replace_blob(&crtc_state->hw.ctm,
- from_crtc_state->uapi.ctm);
-}
-
/**
* intel_crtc_destroy_state - destroy crtc state
* @crtc: drm crtc
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index d2700c74c9da..1dc439983dd9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -44,8 +44,6 @@ struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
-void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state,
- const struct intel_crtc_state *from_crtc_state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
void intel_atomic_state_free(struct drm_atomic_state *state);
void intel_atomic_state_clear(struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index bec02333bdeb..c53aa6a4c7a0 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -45,6 +45,7 @@
#include "intel_fb_pin.h"
#include "intel_pm.h"
#include "intel_sprite.h"
+#include "skl_scaler.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
@@ -322,6 +323,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->scaled_planes &= ~BIT(plane->id);
crtc_state->nv12_planes &= ~BIT(plane->id);
crtc_state->c8_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
@@ -330,6 +332,185 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
plane_state->uapi.visible = false;
}
+/* FIXME nuke when all wm code is atomic */
+static bool intel_wm_need_update(const struct intel_plane_state *cur,
+ struct intel_plane_state *new)
+{
+ /* Update watermarks on tiling or size changes. */
+ if (new->uapi.visible != cur->uapi.visible)
+ return true;
+
+ if (!cur->hw.fb || !new->hw.fb)
+ return false;
+
+ if (cur->hw.fb->modifier != new->hw.fb->modifier ||
+ cur->hw.rotation != new->hw.rotation ||
+ drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
+ drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
+ drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
+ drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
+ return true;
+
+ return false;
+}
+
+static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state)
+{
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ int dst_w = drm_rect_width(&plane_state->uapi.dst);
+ int dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+ return src_w != dst_w || src_h != dst_h;
+}
+
+static bool intel_plane_do_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ if (!plane->async_flip)
+ return false;
+
+ if (!new_crtc_state->uapi.async_flip)
+ return false;
+
+ /*
+ * In platforms after DISPLAY13, we might need to override
+ * first async flip in order to change watermark levels
+ * as part of optimization.
+ * So for those, we are checking if this is a first async flip.
+ * For platforms earlier than DISPLAY13 we always do async flip.
+ */
+ return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
+}
+
+static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct intel_plane_state *new_plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
+ bool was_crtc_enabled = old_crtc_state->hw.active;
+ bool is_crtc_enabled = new_crtc_state->hw.active;
+ bool turn_off, turn_on, visible, was_visible;
+ int ret;
+
+ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
+ ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
+ if (ret)
+ return ret;
+ }
+
+ was_visible = old_plane_state->uapi.visible;
+ visible = new_plane_state->uapi.visible;
+
+ if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
+ was_visible = false;
+
+ /*
+ * Visibility is calculated as if the crtc was on, but
+ * after scaler setup everything depends on it being off
+ * when the crtc isn't active.
+ *
+ * FIXME this is wrong for watermarks. Watermarks should also
+ * be computed as if the pipe would be active. Perhaps move
+ * per-plane wm computation to the .check_plane() hook, and
+ * only combine the results from all planes in the current place?
+ */
+ if (!is_crtc_enabled) {
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
+ visible = false;
+ }
+
+ if (!was_visible && !visible)
+ return 0;
+
+ turn_off = was_visible && (!visible || mode_changed);
+ turn_on = visible && (!was_visible || mode_changed);
+
+ drm_dbg_atomic(&dev_priv->drm,
+ "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ was_visible, visible,
+ turn_off, turn_on, mode_changed);
+
+ if (turn_on) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ new_crtc_state->update_wm_pre = true;
+
+ /* must disable cxsr around plane enable/disable */
+ if (plane->id != PLANE_CURSOR)
+ new_crtc_state->disable_cxsr = true;
+ } else if (turn_off) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ new_crtc_state->update_wm_post = true;
+
+ /* must disable cxsr around plane enable/disable */
+ if (plane->id != PLANE_CURSOR)
+ new_crtc_state->disable_cxsr = true;
+ } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
+ /* FIXME bollocks */
+ new_crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_post = true;
+ }
+ }
+
+ if (visible || was_visible)
+ new_crtc_state->fb_bits |= plane->frontbuffer_bit;
+
+ /*
+ * ILK/SNB DVSACNTR/Sprite Enable
+ * IVB SPR_CTL/Sprite Enable
+ * "When in Self Refresh Big FIFO mode, a write to enable the
+ * plane will be internally buffered and delayed while Big FIFO
+ * mode is exiting."
+ *
+ * Which means that enabling the sprite can take an extra frame
+ * when we start in big FIFO mode (LP1+). Thus we need to drop
+ * down to LP0 and wait for vblank in order to make sure the
+ * sprite gets enabled on the next vblank after the register write.
+ * Doing otherwise would risk enabling the sprite one frame after
+ * we've already signalled flip completion. We can resume LP1+
+ * once the sprite has been enabled.
+ *
+ *
+ * WaCxSRDisabledForSpriteScaling:ivb
+ * IVB SPR_SCALE/Scaling Enable
+ * "Low Power watermarks must be disabled for at least one
+ * frame before enabling sprite scaling, and kept disabled
+ * until sprite scaling is disabled."
+ *
+ * ILK/SNB DVSASCALE/Scaling Enable
+ * "When in Self Refresh Big FIFO mode, scaling enable will be
+ * masked off while Big FIFO mode is exiting."
+ *
+ * Despite the w/a only being listed for IVB we assume that
+ * the ILK/SNB note has similar ramifications, hence we apply
+ * the w/a on all three platforms.
+ *
+ * With experimental results seems this is needed also for primary
+ * plane, not only sprite plane.
+ */
+ if (plane->id != PLANE_CURSOR &&
+ (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)) &&
+ (turn_on || (!intel_plane_is_scaled(old_plane_state) &&
+ intel_plane_is_scaled(new_plane_state))))
+ new_crtc_state->disable_lp_wm = true;
+
+ if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
+ new_plane_state->do_async_flip = true;
+
+ return 0;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -357,6 +538,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes |= BIT(plane->id);
if (new_plane_state->uapi.visible &&
+ intel_plane_is_scaled(new_plane_state))
+ new_crtc_state->scaled_planes |= BIT(plane->id);
+
+ if (new_plane_state->uapi.visible &&
intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
new_crtc_state->nv12_planes |= BIT(plane->id);
@@ -403,10 +588,11 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (new_crtc_state && new_crtc_state->bigjoiner_slave) {
+ if (new_crtc_state && intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
+ struct intel_crtc *master_crtc =
+ intel_master_crtc(new_crtc_state);
struct intel_plane *master_plane =
- intel_crtc_get_plane(new_crtc_state->bigjoiner_linked_crtc,
- plane->id);
+ intel_crtc_get_plane(master_crtc, plane->id);
new_master_plane_state =
intel_atomic_get_new_plane_state(state, master_plane);
@@ -507,8 +693,8 @@ void intel_plane_disable_arm(struct intel_plane *plane,
plane->disable_arm(plane, crtc_state);
}
-void intel_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -536,8 +722,8 @@ void intel_update_planes_on_crtc(struct intel_atomic_state *state,
}
}
-void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -571,8 +757,8 @@ void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
}
}
-void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -597,6 +783,17 @@ void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
}
}
+void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (DISPLAY_VER(i915) >= 9)
+ skl_crtc_planes_update_arm(state, crtc);
+ else
+ i9xx_crtc_planes_update_arm(state, crtc);
+}
+
int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
struct intel_crtc_state *crtc_state,
int min_scale, int max_scale,
@@ -633,7 +830,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
}
/* right side of the image is on the slave crtc, adjust dst to match */
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
drm_rect_translate(dst, -crtc_state->pipe_src_w, 0);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index ead789709477..f4763a53541e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -44,22 +44,16 @@ void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
-void intel_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
int intel_plane_atomic_check(struct intel_atomic_state *state,
struct intel_plane *plane);
-int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *plane_state);
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index aec0efd5350e..40b5e7ed12c2 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -596,6 +596,12 @@ parse_general_features(struct drm_i915_private *i915,
} else {
i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
+
+ if (bdb->version >= 249 && general->afc_startup_config) {
+ i915->vbt.override_afc_startup = true;
+ i915->vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
+ }
+
drm_dbg_kms(&i915->drm,
"BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
i915->vbt.int_tv_support,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 5dce3cf0ed12..7bbe0dc5926b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -10,6 +10,7 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_pm.h"
@@ -673,6 +674,49 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
+ enum plane_id plane_id;
+
+ memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
+ unsigned int data_rate = crtc_state->data_rate[plane_id];
+ unsigned int dbuf_mask = 0;
+ enum dbuf_slice slice;
+
+ dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_y);
+ dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_uv);
+
+ /*
+ * FIXME: To calculate that more properly we probably
+ * need to split per plane data_rate into data_rate_y
+ * and data_rate_uv for multiplanar formats in order not
+ * to get accounted those twice if they happen to reside
+ * on different slices.
+ * However for pre-icl this would work anyway because
+ * we have only single slice and for icl+ uv plane has
+ * non-zero data rate.
+ * So in worst case those calculation are a bit
+ * pessimistic, which shouldn't pose any significant
+ * problem anyway.
+ */
+ for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask)
+ crtc_bw->used_bw[slice] += data_rate;
+ }
+}
+
int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -685,50 +729,13 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- enum plane_id plane_id;
- struct intel_dbuf_bw *crtc_bw;
-
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
- crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
-
- memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
-
- if (!crtc_state->hw.active)
- continue;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *plane_alloc =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- const struct skl_ddb_entry *uv_plane_alloc =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
- unsigned int data_rate = crtc_state->data_rate[plane_id];
- unsigned int dbuf_mask = 0;
- enum dbuf_slice slice;
-
- dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
- dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
-
- /*
- * FIXME: To calculate that more properly we probably
- * need to to split per plane data_rate into data_rate_y
- * and data_rate_uv for multiplanar formats in order not
- * to get accounted those twice if they happen to reside
- * on different slices.
- * However for pre-icl this would work anyway because
- * we have only single slice and for icl+ uv plane has
- * non-zero data rate.
- * So in worst case those calculation are a bit
- * pessimistic, which shouldn't pose any significant
- * problem anyway.
- */
- for_each_dbuf_slice_in_mask(dev_priv, slice, dbuf_mask)
- crtc_bw->used_bw[slice] += data_rate;
- }
+ skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
}
if (!old_bw_state)
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 46c6eecbd917..0ceaed1c9656 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -30,19 +30,19 @@ struct intel_bw_state {
*/
u8 pipe_sagv_reject;
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
/*
* Current QGV points mask, which restricts
* some particular SAGV states, not to confuse
* with pipe_sagv_mask.
*/
- u8 qgv_points_mask;
+ u16 qgv_points_mask;
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
- /* bitmask of active pipes */
- u8 active_pipes;
-
int min_cdclk;
};
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 4b140a014ca8..8888fda8b701 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -23,6 +23,7 @@
#include <linux/time.h>
+#include "hsw_ips.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_audio.h"
@@ -31,6 +32,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_psr.h"
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index de3ded1e327a..e94ec57260f1 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -28,6 +28,25 @@
#include "intel_dpll.h"
#include "vlv_dsi_pll.h"
+struct intel_color_funcs {
+ int (*color_check)(struct intel_crtc_state *crtc_state);
+ /*
+ * Program double buffered color management registers during
+ * vblank evasion. The registers should then latch during the
+ * next vblank start, alongside any other double buffered registers
+ * involved with the same commit.
+ */
+ void (*color_commit)(const struct intel_crtc_state *crtc_state);
+ /*
+ * Load LUTs (and other single buffered color management
+ * registers). Will (hopefully) be called during the vblank
+ * following the latching of any double buffered registers
+ * involved with the same commit.
+ */
+ void (*load_luts)(const struct intel_crtc_state *crtc_state);
+ void (*read_luts)(struct intel_crtc_state *crtc_state);
+};
+
#define CTM_COEFF_SIGN (1ULL << 63)
#define CTM_COEFF_1_0 (1ULL << 32)
@@ -160,29 +179,29 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
- intel_de_write(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
- intel_de_write(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe),
- coeff[0] << 16 | coeff[1]);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe),
- coeff[3] << 16 | coeff[4]);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe),
- coeff[6] << 16 | coeff[7]);
- intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
if (DISPLAY_VER(dev_priv) >= 7) {
- intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
- postoff[0]);
- intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
- postoff[1]);
- intel_de_write(dev_priv, PIPE_CSC_POSTOFF_LO(pipe),
- postoff[2]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
+ postoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
+ postoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_LO(pipe),
+ postoff[2]);
}
}
@@ -194,28 +213,28 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
- coeff[0] << 16 | coeff[1]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
- coeff[2] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
+ coeff[2] << 16);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
- coeff[3] << 16 | coeff[4]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
- coeff[5] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
+ coeff[5] << 16);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
- coeff[6] << 16 | coeff[7]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
- coeff[8] << 16);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
+ coeff[8] << 16);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
- intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
}
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
@@ -319,8 +338,8 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
ilk_csc_off_zero);
}
- intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
- crtc_state->csc_mode);
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
@@ -346,8 +365,8 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
ilk_csc_postoff_limited_range);
}
- intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
- crtc_state->csc_mode);
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
static void chv_load_cgm_csc(struct intel_crtc *crtc,
@@ -377,16 +396,16 @@ static void chv_load_cgm_csc(struct intel_crtc *crtc,
coeffs[i] |= (abs_coeff >> 20) & 0xfff;
}
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
- coeffs[1] << 16 | coeffs[0]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
- coeffs[3] << 16 | coeffs[2]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
- coeffs[5] << 16 | coeffs[4]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
- coeffs[7] << 16 | coeffs[6]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
- coeffs[8]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
+ coeffs[8]);
}
/* convert hw value with given bit_precision to lut property val */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 354b08d6f81d..9dee12986991 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -2703,6 +2703,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+ struct intel_crtc *slave_crtc;
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_crtc_vblank_off(old_crtc_state);
@@ -2721,9 +2722,8 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
ilk_pfit_disable(old_crtc_state);
}
- if (old_crtc_state->bigjoiner_linked_crtc) {
- struct intel_crtc *slave_crtc =
- old_crtc_state->bigjoiner_linked_crtc;
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) {
const struct intel_crtc_state *old_slave_crtc_state =
intel_atomic_get_old_crtc_state(state, slave_crtc);
@@ -2926,7 +2926,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
{
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
- if (!crtc_state->bigjoiner_slave)
+ if (!intel_crtc_is_bigjoiner_slave(crtc_state))
intel_ddi_enable_transcoder_func(encoder, crtc_state);
intel_vrr_enable(encoder, crtc_state);
@@ -3041,6 +3041,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
int required_lanes = crtc_state ? crtc_state->lane_count : 1;
@@ -3050,11 +3051,12 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
intel_tc_port_get_link(enc_to_dig_port(encoder),
required_lanes);
if (crtc_state && crtc_state->hw.active) {
- struct intel_crtc *slave_crtc = crtc_state->bigjoiner_linked_crtc;
+ struct intel_crtc *slave_crtc;
intel_update_active_dpll(state, crtc, encoder);
- if (slave_crtc)
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state))
intel_update_active_dpll(state, slave_crtc, encoder);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7f512f9e9e5c..aaf2aee4da35 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -74,6 +74,7 @@
#include "g4x_dp.h"
#include "g4x_hdmi.h"
+#include "hsw_ips.h"
#include "i915_drv.h"
#include "icl_dsi.h"
#include "intel_acpi.h"
@@ -337,10 +338,38 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
is_trans_port_sync_slave(crtc_state);
}
-static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
+static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
{
- if (crtc_state->bigjoiner_slave)
- return crtc_state->bigjoiner_linked_crtc;
+ return ffs(crtc_state->bigjoiner_pipes) - 1;
+}
+
+u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
+}
+
+bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return crtc_state->bigjoiner_pipes &&
+ crtc->pipe != bigjoiner_master_pipe(crtc_state);
+}
+
+bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return crtc_state->bigjoiner_pipes &&
+ crtc->pipe == bigjoiner_master_pipe(crtc_state);
+}
+
+struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
else
return to_intel_crtc(crtc_state->uapi.crtc);
}
@@ -752,8 +781,11 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc_state->data_rate[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
- if (plane->id == PLANE_PRIMARY)
- hsw_disable_ips(crtc_state);
+ if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
+ hsw_ips_disable(crtc_state)) {
+ crtc_state->ips_enabled = false;
+ intel_crtc_wait_for_next_vblank(crtc);
+ }
/*
* Vblank time updates from the shadow to live plane control register
@@ -1090,72 +1122,6 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
}
-void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!crtc_state->ips_enabled)
- return;
-
- /*
- * We can only enable IPS after we enable a plane and wait for a vblank
- * This function is called from post_plane_update, which is run after
- * a vblank wait.
- */
- drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
-
- if (IS_BROADWELL(dev_priv)) {
- drm_WARN_ON(dev, snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
- IPS_ENABLE | IPS_PCODE_CONTROL));
- /* Quoting Art Runyan: "its not safe to expect any particular
- * value in IPS_CTL bit 31 after enabling IPS through the
- * mailbox." Moreover, the mailbox may return a bogus state,
- * so we need to just enable it and continue on.
- */
- } else {
- intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
- /* The bit only becomes 1 in the next vblank, so this wait here
- * is essentially intel_wait_for_vblank. If we don't have this
- * and don't wait for vblanks until the end of crtc_enable, then
- * the HW state readout code will complain that the expected
- * IPS_CTL value is not the one we read. */
- if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
- drm_err(&dev_priv->drm,
- "Timed out waiting for IPS enable\n");
- }
-}
-
-void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!crtc_state->ips_enabled)
- return;
-
- if (IS_BROADWELL(dev_priv)) {
- drm_WARN_ON(dev,
- snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
- /*
- * Wait for PCODE to finish disabling IPS. The BSpec specified
- * 42ms timeout value leads to occasional timeouts so use 100ms
- * instead.
- */
- if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
- drm_err(&dev_priv->drm,
- "Timed out waiting for IPS disable\n");
- } else {
- intel_de_write(dev_priv, IPS_CTL, 0);
- intel_de_posting_read(dev_priv, IPS_CTL);
- }
-
- /* We need to wait for a vblank before we can disable the plane. */
- intel_crtc_wait_for_next_vblank(crtc);
-}
-
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
{
if (crtc->overlay)
@@ -1166,67 +1132,6 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
*/
}
-static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!old_crtc_state->ips_enabled)
- return false;
-
- if (intel_crtc_needs_modeset(new_crtc_state))
- return true;
-
- /*
- * Workaround : Do not read or write the pipe palette/gamma data while
- * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
- *
- * Disable IPS before we program the LUT.
- */
- if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe) &&
- new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
- return true;
-
- return !new_crtc_state->ips_enabled;
-}
-
-static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!new_crtc_state->ips_enabled)
- return false;
-
- if (intel_crtc_needs_modeset(new_crtc_state))
- return true;
-
- /*
- * Workaround : Do not read or write the pipe palette/gamma data while
- * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
- *
- * Re-enable IPS after the LUT has been programmed.
- */
- if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe) &&
- new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
- return true;
-
- /*
- * We can't read out IPS on broadwell, assume the worst and
- * forcibly enable IPS on the first fastset.
- */
- if (new_crtc_state->update_pipe && old_crtc_state->inherited)
- return true;
-
- return !old_crtc_state->ips_enabled;
-}
-
static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -1321,9 +1226,7 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
intel_update_watermarks(dev_priv);
- if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
- hsw_enable_ips(new_crtc_state);
-
+ hsw_ips_post_update(state, crtc);
intel_fbc_post_update(state, crtc);
intel_drrs_page_flip(state, crtc);
@@ -1426,8 +1329,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_psr_pre_plane_update(state, crtc);
- if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
- hsw_disable_ips(old_crtc_state);
+ if (hsw_ips_pre_update(state, crtc))
+ intel_crtc_wait_for_next_vblank(crtc);
if (intel_fbc_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1905,12 +1808,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-/* IPS only exists on ULT machines and is tied to pipe A. */
-static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
-{
- return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
-}
-
static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
enum pipe pipe, bool apply)
{
@@ -1974,34 +1871,18 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc_state *master_crtc_state;
- struct intel_crtc *master_crtc;
- struct drm_connector_state *conn_state;
- struct drm_connector *conn;
- struct intel_encoder *encoder = NULL;
- int i;
-
- master_crtc = intel_master_crtc(crtc_state);
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
-
- for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
- if (conn_state->crtc != &master_crtc->base)
- continue;
-
- encoder = to_intel_encoder(conn_state->best_encoder);
- break;
- }
+ struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
/*
* Enable sequence steps 1-7 on bigjoiner master
*/
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
intel_encoders_pre_pll_enable(state, master_crtc);
if (crtc_state->shared_dpll)
intel_enable_shared_dpll(crtc_state);
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
intel_encoders_pre_enable(state, master_crtc);
}
@@ -2065,7 +1946,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
- if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder))
+ if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
+ !transcoder_is_dsi(cpu_transcoder))
hsw_configure_cpu_transcoder(new_crtc_state);
crtc->active = true;
@@ -2105,7 +1987,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
}
- if (new_crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
intel_crtc_vblank_on(new_crtc_state);
intel_encoders_enable(state, crtc);
@@ -2190,7 +2072,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
* FIXME collapse everything to one hook.
* Need care with mst->ddi interactions.
*/
- if (!old_crtc_state->bigjoiner_slave) {
+ if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
}
@@ -2778,77 +2660,6 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
}
}
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- /* IPS only exists on ULT machines and is tied to pipe A. */
- if (!hsw_crtc_supports_ips(crtc))
- return false;
-
- if (!dev_priv->params.enable_ips)
- return false;
-
- if (crtc_state->pipe_bpp > 24)
- return false;
-
- /*
- * We compare against max which means we must take
- * the increased cdclk requirement into account when
- * calculating the new cdclk.
- *
- * Should measure whether using a lower cdclk w/o IPS
- */
- if (IS_BROADWELL(dev_priv) &&
- crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
- return false;
-
- return true;
-}
-
-static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->uapi.crtc->dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
-
- crtc_state->ips_enabled = false;
-
- if (!hsw_crtc_state_ips_capable(crtc_state))
- return 0;
-
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- if (crtc_state->crc_enabled)
- return 0;
-
- /* IPS should be fine as long as at least one plane is enabled. */
- if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
- return 0;
-
- if (IS_BROADWELL(dev_priv)) {
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
- return 0;
- }
-
- crtc_state->ips_enabled = true;
-
- return 0;
-}
-
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -3347,13 +3158,11 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 pipeconf;
-
- pipeconf = 0;
+ u32 pipeconf = 0;
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
+ pipeconf |= PIPECONF_ENABLE;
if (crtc_state->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
@@ -4069,19 +3878,20 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
return tmp & TRANS_DDI_FUNC_ENABLE;
}
-static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
+static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
+ u8 *master_pipes, u8 *slave_pipes)
{
- u8 master_pipes = 0, slave_pipes = 0;
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ *master_pipes = 0;
+ *slave_pipes = 0;
+
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
+ bigjoiner_pipes(dev_priv)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
- if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
- continue;
-
power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
@@ -4090,9 +3900,9 @@ static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
continue;
if (tmp & MASTER_BIG_JOINER_ENABLE)
- master_pipes |= BIT(pipe);
+ *master_pipes |= BIT(pipe);
else
- slave_pipes |= BIT(pipe);
+ *slave_pipes |= BIT(pipe);
}
if (DISPLAY_VER(dev_priv) < 13)
@@ -4103,18 +3913,47 @@ static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
if (tmp & UNCOMPRESSED_JOINER_MASTER)
- master_pipes |= BIT(pipe);
+ *master_pipes |= BIT(pipe);
if (tmp & UNCOMPRESSED_JOINER_SLAVE)
- slave_pipes |= BIT(pipe);
+ *slave_pipes |= BIT(pipe);
}
}
/* Bigjoiner pipes should always be consecutive master and slave */
- drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
+ drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
"Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
- master_pipes, slave_pipes);
+ *master_pipes, *slave_pipes);
+}
+
+static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
+{
+ if ((slave_pipes & BIT(pipe)) == 0)
+ return pipe;
+
+ /* ignore everything above our pipe */
+ master_pipes &= ~GENMASK(7, pipe);
+
+ /* highest remaining bit should be our master pipe */
+ return fls(master_pipes) - 1;
+}
+
+static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
+{
+ enum pipe master_pipe, next_master_pipe;
+
+ master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
+
+ if ((master_pipes & BIT(master_pipe)) == 0)
+ return 0;
- return slave_pipes;
+ /* ignore our master pipe and everything below it */
+ master_pipes &= ~GENMASK(master_pipe, 0);
+ /* make sure a high bit is set for the ffs() */
+ master_pipes |= BIT(7);
+ /* lowest remaining bit should be the next master pipe */
+ next_master_pipe = ffs(master_pipes) - 1;
+
+ return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
}
static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
@@ -4133,6 +3972,7 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
enum transcoder cpu_transcoder;
+ u8 master_pipes, slave_pipes;
u8 enabled_transcoders = 0;
/*
@@ -4184,8 +4024,10 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
enabled_transcoders |= BIT(cpu_transcoder);
/* bigjoiner slave -> consider the master pipe's transcoder as well */
- if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
- cpu_transcoder = (enum transcoder) crtc->pipe - 1;
+ enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
+ if (slave_pipes & BIT(crtc->pipe)) {
+ cpu_transcoder = (enum transcoder)
+ get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
}
@@ -4310,6 +4152,24 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
return transcoder_is_dsi(pipe_config->cpu_transcoder);
}
+static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u8 master_pipes, slave_pipes;
+ enum pipe pipe = crtc->pipe;
+
+ enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
+
+ if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
+ return;
+
+ crtc_state->bigjoiner = true;
+ crtc_state->bigjoiner_pipes =
+ BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
+ get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
+}
+
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -4336,8 +4196,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
goto out;
intel_dsc_get_config(pipe_config);
- if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
- intel_uncompressed_joiner_get_config(pipe_config);
+ intel_bigjoiner_get_config(pipe_config);
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(dev_priv) >= 11)
@@ -4395,19 +4254,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
ilk_get_pfit_config(pipe_config);
}
- if (hsw_crtc_supports_ips(crtc)) {
- if (IS_HASWELL(dev_priv))
- pipe_config->ips_enabled = intel_de_read(dev_priv,
- IPS_CTL) & IPS_ENABLE;
- else {
- /*
- * We cannot readout IPS state on broadwell, set to
- * true so we can set it to a defined state on first
- * commit.
- */
- pipe_config->ips_enabled = true;
- }
- }
+ hsw_ips_get_config(pipe_config);
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
@@ -4819,194 +4666,6 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
return mode;
}
-/**
- * intel_wm_need_update - Check whether watermarks need updating
- * @cur: current plane state
- * @new: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-static bool intel_wm_need_update(const struct intel_plane_state *cur,
- struct intel_plane_state *new)
-{
- /* Update watermarks on tiling or size changes. */
- if (new->uapi.visible != cur->uapi.visible)
- return true;
-
- if (!cur->hw.fb || !new->hw.fb)
- return false;
-
- if (cur->hw.fb->modifier != new->hw.fb->modifier ||
- cur->hw.rotation != new->hw.rotation ||
- drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
- drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
- drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
- drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
- return true;
-
- return false;
-}
-
-static bool needs_scaling(const struct intel_plane_state *state)
-{
- int src_w = drm_rect_width(&state->uapi.src) >> 16;
- int src_h = drm_rect_height(&state->uapi.src) >> 16;
- int dst_w = drm_rect_width(&state->uapi.dst);
- int dst_h = drm_rect_height(&state->uapi.dst);
-
- return (src_w != dst_w || src_h != dst_h);
-}
-
-static bool intel_plane_do_async_flip(struct intel_plane *plane,
- const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- if (!plane->async_flip)
- return false;
-
- if (!new_crtc_state->uapi.async_flip)
- return false;
-
- /*
- * In platforms after DISPLAY13, we might need to override
- * first async flip in order to change watermark levels
- * as part of optimization.
- * So for those, we are checking if this is a first async flip.
- * For platforms earlier than DISPLAY13 we always do async flip.
- */
- return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
-}
-
-int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state,
- const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *new_plane_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
- bool was_crtc_enabled = old_crtc_state->hw.active;
- bool is_crtc_enabled = new_crtc_state->hw.active;
- bool turn_off, turn_on, visible, was_visible;
- int ret;
-
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
- ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
- if (ret)
- return ret;
- }
-
- was_visible = old_plane_state->uapi.visible;
- visible = new_plane_state->uapi.visible;
-
- if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
- was_visible = false;
-
- /*
- * Visibility is calculated as if the crtc was on, but
- * after scaler setup everything depends on it being off
- * when the crtc isn't active.
- *
- * FIXME this is wrong for watermarks. Watermarks should also
- * be computed as if the pipe would be active. Perhaps move
- * per-plane wm computation to the .check_plane() hook, and
- * only combine the results from all planes in the current place?
- */
- if (!is_crtc_enabled) {
- intel_plane_set_invisible(new_crtc_state, new_plane_state);
- visible = false;
- }
-
- if (!was_visible && !visible)
- return 0;
-
- turn_off = was_visible && (!visible || mode_changed);
- turn_on = visible && (!was_visible || mode_changed);
-
- drm_dbg_atomic(&dev_priv->drm,
- "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
- crtc->base.base.id, crtc->base.name,
- plane->base.base.id, plane->base.name,
- was_visible, visible,
- turn_off, turn_on, mode_changed);
-
- if (turn_on) {
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- new_crtc_state->update_wm_pre = true;
-
- /* must disable cxsr around plane enable/disable */
- if (plane->id != PLANE_CURSOR)
- new_crtc_state->disable_cxsr = true;
- } else if (turn_off) {
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- new_crtc_state->update_wm_post = true;
-
- /* must disable cxsr around plane enable/disable */
- if (plane->id != PLANE_CURSOR)
- new_crtc_state->disable_cxsr = true;
- } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
- /* FIXME bollocks */
- new_crtc_state->update_wm_pre = true;
- new_crtc_state->update_wm_post = true;
- }
- }
-
- if (visible || was_visible)
- new_crtc_state->fb_bits |= plane->frontbuffer_bit;
-
- /*
- * ILK/SNB DVSACNTR/Sprite Enable
- * IVB SPR_CTL/Sprite Enable
- * "When in Self Refresh Big FIFO mode, a write to enable the
- * plane will be internally buffered and delayed while Big FIFO
- * mode is exiting."
- *
- * Which means that enabling the sprite can take an extra frame
- * when we start in big FIFO mode (LP1+). Thus we need to drop
- * down to LP0 and wait for vblank in order to make sure the
- * sprite gets enabled on the next vblank after the register write.
- * Doing otherwise would risk enabling the sprite one frame after
- * we've already signalled flip completion. We can resume LP1+
- * once the sprite has been enabled.
- *
- *
- * WaCxSRDisabledForSpriteScaling:ivb
- * IVB SPR_SCALE/Scaling Enable
- * "Low Power watermarks must be disabled for at least one
- * frame before enabling sprite scaling, and kept disabled
- * until sprite scaling is disabled."
- *
- * ILK/SNB DVSASCALE/Scaling Enable
- * "When in Self Refresh Big FIFO mode, scaling enable will be
- * masked off while Big FIFO mode is exiting."
- *
- * Despite the w/a only being listed for IVB we assume that
- * the ILK/SNB note has similar ramifications, hence we apply
- * the w/a on all three platforms.
- *
- * With experimental results seems this is needed also for primary
- * plane, not only sprite plane.
- */
- if (plane->id != PLANE_CURSOR &&
- (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
- IS_IVYBRIDGE(dev_priv)) &&
- (turn_on || (!needs_scaling(old_plane_state) &&
- needs_scaling(new_plane_state))))
- new_crtc_state->disable_lp_wm = true;
-
- if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
- new_plane_state->do_async_flip = true;
-
- return 0;
-}
-
static bool encoders_cloneable(const struct intel_encoder *a,
const struct intel_encoder *b)
{
@@ -5266,7 +4925,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
if (mode_changed && crtc_state->hw.enable &&
!drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
- ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
+ ret = intel_dpll_crtc_compute_clock(crtc_state);
if (ret)
return ret;
}
@@ -5317,7 +4976,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
if (HAS_IPS(dev_priv)) {
- ret = hsw_compute_ips_config(crtc_state);
+ ret = hsw_ips_compute_config(state, crtc);
if (ret)
return ret;
}
@@ -5619,9 +5278,10 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
transcoder_name(pipe_config->master_transcoder),
pipe_config->sync_mode_slaves_mask);
- drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
- pipe_config->bigjoiner_slave ? "slave" :
- pipe_config->bigjoiner ? "master" : "no");
+ drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s, pipes: 0x%x\n",
+ intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
+ intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
+ pipe_config->bigjoiner_pipes);
drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
enableddisabled(pipe_config->splitter.enable),
@@ -5818,35 +5478,42 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- const struct intel_crtc_state *master_crtc_state;
- struct intel_crtc *master_crtc;
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
- master_crtc = intel_master_crtc(crtc_state);
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
- /* No need to copy state if the master state is unchanged */
- if (master_crtc_state)
- intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
+ drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+ crtc_state->uapi.degamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+ crtc_state->uapi.gamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.ctm,
+ crtc_state->uapi.ctm);
}
static void
-intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
+
crtc_state->hw.enable = crtc_state->uapi.enable;
crtc_state->hw.active = crtc_state->uapi.active;
crtc_state->hw.mode = crtc_state->uapi.mode;
crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
- intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
}
static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
return;
crtc_state->uapi.enable = crtc_state->hw.enable;
@@ -5857,7 +5524,6 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
- /* copy color blobs to uapi */
drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
crtc_state->hw.degamma_lut);
drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
@@ -5866,51 +5532,79 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->hw.ctm);
}
+static void
+copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
+ struct intel_crtc *slave_crtc)
+{
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
+ const struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+
+ drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
+ master_crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
+ master_crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&slave_crtc_state->hw.ctm,
+ master_crtc_state->hw.ctm);
+
+ slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
+}
+
static int
-copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
- const struct intel_crtc_state *from_crtc_state)
+copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *slave_crtc)
{
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
+ const struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
struct intel_crtc_state *saved_state;
- saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
+ saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
if (!saved_state)
return -ENOMEM;
- saved_state->uapi = crtc_state->uapi;
- saved_state->scaler_state = crtc_state->scaler_state;
- saved_state->shared_dpll = crtc_state->shared_dpll;
- saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
- saved_state->crc_enabled = crtc_state->crc_enabled;
+ /* preserve some things from the slave's original crtc state */
+ saved_state->uapi = slave_crtc_state->uapi;
+ saved_state->scaler_state = slave_crtc_state->scaler_state;
+ saved_state->shared_dpll = slave_crtc_state->shared_dpll;
+ saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
+ saved_state->crc_enabled = slave_crtc_state->crc_enabled;
- intel_crtc_free_hw_state(crtc_state);
- memcpy(crtc_state, saved_state, sizeof(*crtc_state));
+ intel_crtc_free_hw_state(slave_crtc_state);
+ memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
kfree(saved_state);
/* Re-init hw state */
- memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
- crtc_state->hw.enable = from_crtc_state->hw.enable;
- crtc_state->hw.active = from_crtc_state->hw.active;
- crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
- crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
+ memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
+ slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
+ slave_crtc_state->hw.active = master_crtc_state->hw.active;
+ slave_crtc_state->hw.mode = master_crtc_state->hw.mode;
+ slave_crtc_state->hw.pipe_mode = master_crtc_state->hw.pipe_mode;
+ slave_crtc_state->hw.adjusted_mode = master_crtc_state->hw.adjusted_mode;
+ slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
+
+ copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
/* Some fixups */
- crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
- crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
- crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
- crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
- crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
- crtc_state->bigjoiner_slave = true;
- crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
- crtc_state->has_audio = from_crtc_state->has_audio;
+ slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
+ slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
+ slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
+ slave_crtc_state->cpu_transcoder = master_crtc_state->cpu_transcoder;
+ slave_crtc_state->has_audio = master_crtc_state->has_audio;
return 0;
}
static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *saved_state;
@@ -5940,7 +5634,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
memcpy(crtc_state, saved_state, sizeof(*crtc_state));
kfree(saved_state);
- intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
+ intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
return 0;
}
@@ -6618,6 +6312,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.div0);
PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
@@ -6669,8 +6364,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
PIPE_CONF_CHECK_BOOL(bigjoiner);
- PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
- PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
+ PIPE_CONF_CHECK_X(bigjoiner_pipes);
PIPE_CONF_CHECK_I(dsc.compression_enable);
PIPE_CONF_CHECK_I(dsc.dsc_split);
@@ -7456,20 +7150,25 @@ static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- int ret;
+ struct intel_crtc *other;
- if (!crtc_state->bigjoiner)
- continue;
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
+ crtc_state->bigjoiner_pipes) {
+ int ret;
- ret = intel_crtc_add_bigjoiner_planes(state, crtc,
- crtc_state->bigjoiner_linked_crtc);
- if (ret)
- return ret;
+ if (crtc == other)
+ continue;
+
+ ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -7571,71 +7270,123 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
return false;
}
+static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
+ u8 pipes)
+{
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->hw.enable &&
+ pipes & BIT(crtc->pipe) &&
+ intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+ struct intel_crtc *master_crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
- struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
- struct intel_crtc *slave_crtc, *master_crtc;
+ struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+ struct intel_crtc *slave_crtc;
+ u8 slave_pipes;
- /* slave being enabled, is master is still claiming this crtc? */
- if (old_crtc_state->bigjoiner_slave) {
- slave_crtc = crtc;
- master_crtc = old_crtc_state->bigjoiner_linked_crtc;
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
- if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
- goto claimed;
- }
+ /*
+ * TODO: encoder.compute_config() may be the best
+ * place to populate the bitmask for the master crtc.
+ * For now encoder.compute_config() just flags things
+ * as needing bigjoiner and we populate the bitmask
+ * here.
+ */
+ WARN_ON(master_crtc_state->bigjoiner_pipes);
- if (!new_crtc_state->bigjoiner)
+ if (!master_crtc_state->bigjoiner)
return 0;
- slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
- if (!slave_crtc) {
+ slave_pipes = BIT(master_crtc->pipe + 1);
+
+ if (slave_pipes & ~bigjoiner_pipes(i915)) {
drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] Big joiner configuration requires "
- "CRTC + 1 to be used, doesn't exist\n",
- crtc->base.base.id, crtc->base.name);
+ "[CRTC:%d:%s] Cannot act as big joiner master "
+ "(need 0x%x as slave pipes, only 0x%x possible)\n",
+ master_crtc->base.base.id, master_crtc->base.name,
+ slave_pipes, bigjoiner_pipes(i915));
return -EINVAL;
}
- new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
- slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
- master_crtc = crtc;
- if (IS_ERR(slave_crtc_state))
- return PTR_ERR(slave_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, slave_pipes) {
+ struct intel_crtc_state *slave_crtc_state;
+ int ret;
- /* master being enabled, slave was already configured? */
- if (slave_crtc_state->uapi.enable)
- goto claimed;
+ slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
+ if (IS_ERR(slave_crtc_state))
+ return PTR_ERR(slave_crtc_state);
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] Used as slave for big joiner\n",
- slave_crtc->base.base.id, slave_crtc->base.name);
+ /* master being enabled, slave was already configured? */
+ if (slave_crtc_state->uapi.enable) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
+ "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
+ slave_crtc->base.base.id, slave_crtc->base.name,
+ master_crtc->base.base.id, master_crtc->base.name);
+ return -EINVAL;
+ }
- return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
+ /*
+ * The state copy logic assumes the master crtc gets processed
+ * before the slave crtc during the main compute_config loop.
+ * This works because the crtcs are created in pipe order,
+ * and the hardware requires master pipe < slave pipe as well.
+ * Should that change we need to rethink the logic.
+ */
+ if (WARN_ON(drm_crtc_index(&master_crtc->base) >
+ drm_crtc_index(&slave_crtc->base)))
+ return -EINVAL;
-claimed:
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
- "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
- slave_crtc->base.base.id, slave_crtc->base.name,
- master_crtc->base.base.id, master_crtc->base.name);
- return -EINVAL;
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
+ slave_crtc->base.base.id, slave_crtc->base.name,
+ master_crtc->base.base.id, master_crtc->base.name);
+
+ master_crtc_state->bigjoiner_pipes =
+ BIT(master_crtc->pipe) | BIT(slave_crtc->pipe);
+ slave_crtc_state->bigjoiner_pipes =
+ BIT(master_crtc->pipe) | BIT(slave_crtc->pipe);
+
+ ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void kill_bigjoiner_slave(struct intel_atomic_state *state,
- struct intel_crtc_state *master_crtc_state)
+ struct intel_crtc *master_crtc)
{
- struct intel_crtc_state *slave_crtc_state =
- intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+ struct intel_crtc *slave_crtc;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+
+ slave_crtc_state->bigjoiner = false;
+ slave_crtc_state->bigjoiner_pipes = 0;
+
+ intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
+ }
- slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
- slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
- slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
- intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
+ master_crtc_state->bigjoiner = false;
+ master_crtc_state->bigjoiner_pipes = 0;
}
/**
@@ -7785,34 +7536,37 @@ static int intel_atomic_check_async(struct intel_atomic_state *state, struct int
static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
+ u8 affected_pipes = 0;
+ u8 modeset_pipes = 0;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct intel_crtc_state *linked_crtc_state;
- struct intel_crtc *linked_crtc;
- int ret;
+ affected_pipes |= crtc_state->bigjoiner_pipes;
+ if (intel_crtc_needs_modeset(crtc_state))
+ modeset_pipes |= crtc_state->bigjoiner_pipes;
+ }
- if (!crtc_state->bigjoiner)
- continue;
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
- linked_crtc = crtc_state->bigjoiner_linked_crtc;
- linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
- if (IS_ERR(linked_crtc_state))
- return PTR_ERR(linked_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
+ int ret;
- if (!intel_crtc_needs_modeset(crtc_state))
- continue;
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- linked_crtc_state->uapi.mode_changed = true;
+ crtc_state->uapi.mode_changed = true;
- ret = drm_atomic_add_affected_connectors(&state->base,
- &linked_crtc->base);
+ ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
if (ret)
return ret;
- ret = intel_atomic_add_affected_planes(state, linked_crtc);
+ ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
}
@@ -7820,8 +7574,8 @@ static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
/* Kill old bigjoiner link, we may re-establish afterwards */
if (intel_crtc_needs_modeset(crtc_state) &&
- crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
- kill_bigjoiner_slave(state, crtc_state);
+ intel_crtc_is_bigjoiner_master(crtc_state))
+ kill_bigjoiner_slave(state, crtc);
}
return 0;
@@ -7846,6 +7600,10 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state, i) {
if (new_crtc_state->inherited != old_crtc_state->inherited)
new_crtc_state->uapi.mode_changed = true;
+
+ if (new_crtc_state->uapi.scaling_filter !=
+ old_crtc_state->uapi.scaling_filter)
+ new_crtc_state->uapi.mode_changed = true;
}
intel_vrr_check_modeset(state);
@@ -7861,21 +7619,22 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!intel_crtc_needs_modeset(new_crtc_state)) {
- /* Light copy */
- intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
-
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ copy_bigjoiner_crtc_state_nomodeset(state, crtc);
+ else
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
continue;
}
if (!new_crtc_state->uapi.enable) {
- if (!new_crtc_state->bigjoiner_slave) {
- intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
+ if (!intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
+ intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
any_ms = true;
}
continue;
}
- ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
+ ret = intel_crtc_prepare_cleared_state(state, crtc);
if (ret)
goto fail;
@@ -7883,8 +7642,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
- new_crtc_state);
+ ret = intel_atomic_check_bigjoiner(state, crtc);
if (ret)
goto fail;
}
@@ -7938,10 +7696,7 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (new_crtc_state->bigjoiner) {
- struct intel_crtc_state *linked_crtc_state =
- intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
-
- if (intel_crtc_needs_modeset(linked_crtc_state)) {
+ if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
}
@@ -8121,9 +7876,6 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
hsw_set_linetime_wm(new_crtc_state);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- icl_set_pipe_chicken(new_crtc_state);
}
static void commit_pipe_pre_planes(struct intel_atomic_state *state,
@@ -8188,7 +7940,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
dev_priv->display->crtc_enable(state, crtc);
- if (new_crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
return;
/* vblanks work again, re-enable pipe CRC. */
@@ -8198,7 +7950,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
static void intel_update_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -8215,21 +7967,22 @@ static void intel_update_crtc(struct intel_atomic_state *state,
if (new_crtc_state->update_pipe)
intel_encoders_update_pipe(state, crtc);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ new_crtc_state->update_pipe)
+ icl_set_pipe_chicken(new_crtc_state);
}
intel_fbc_update(state, crtc);
- intel_update_planes_on_crtc(state, crtc);
+ intel_crtc_planes_update_noarm(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
commit_pipe_pre_planes(state, crtc);
- if (DISPLAY_VER(dev_priv) >= 9)
- skl_arm_planes_on_crtc(state, crtc);
- else
- i9xx_arm_planes_on_crtc(state, crtc);
+ intel_crtc_planes_update_arm(state, crtc);
commit_pipe_post_planes(state, crtc);
@@ -8305,7 +8058,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
*/
if (!is_trans_port_sync_slave(old_crtc_state) &&
!intel_dp_mst_is_slave_trans(old_crtc_state) &&
- !old_crtc_state->bigjoiner_slave)
+ !intel_crtc_is_bigjoiner_slave(old_crtc_state))
continue;
intel_old_crtc_state_disables(state, old_crtc_state,
@@ -8420,7 +8173,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
is_trans_port_sync_master(new_crtc_state) ||
- (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
+ intel_crtc_is_bigjoiner_master(new_crtc_state))
continue;
modeset_pipes &= ~BIT(pipe);
@@ -8947,10 +8700,8 @@ static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
struct intel_crtc *crtc;
u32 possible_crtcs = 0;
- for_each_intel_crtc(dev, crtc) {
- if (encoder->pipe_mask & BIT(crtc->pipe))
- possible_crtcs |= drm_crtc_mask(&crtc->base);
- }
+ for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
+ possible_crtcs |= drm_crtc_mask(&crtc->base);
return possible_crtcs;
}
@@ -10132,7 +9883,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
- !crtc_state->bigjoiner_slave)
+ !intel_crtc_is_bigjoiner_slave(crtc_state))
intel_crtc_disable_noatomic(crtc, ctx);
if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
@@ -10345,12 +10096,18 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
/* read out to slave crtc as well for bigjoiner */
if (crtc_state->bigjoiner) {
+ struct intel_crtc *slave_crtc;
+
/* encoder should read be linked to bigjoiner master */
- WARN_ON(crtc_state->bigjoiner_slave);
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
- crtc = crtc_state->bigjoiner_linked_crtc;
- crtc_state = to_intel_crtc_state(crtc->base.state);
- intel_encoder_get_config(encoder, crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
+ struct intel_crtc_state *slave_crtc_state;
+
+ slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
+ intel_encoder_get_config(encoder, slave_crtc_state);
+ }
}
} else {
encoder->base.crtc = NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 457738aeee3e..11d6134c53c8 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -430,11 +430,11 @@ enum hpd_pin {
&(dev)->mode_config.crtc_list, \
base.head)
-#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+#define for_each_intel_crtc_in_pipe_mask(dev, intel_crtc, pipe_mask) \
list_for_each_entry(intel_crtc, \
&(dev)->mode_config.crtc_list, \
base.head) \
- for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base))
+ for_each_if((pipe_mask) & BIT(intel_crtc->pipe))
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
@@ -555,6 +555,10 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
bool bigjoiner);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
+bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state);
+u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state);
+struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state);
void intel_plane_destroy(struct drm_plane *plane);
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
@@ -632,9 +636,6 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
-void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
-void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index f4de004d470f..695aa6efe8c1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -16,6 +16,7 @@
#include "intel_dp_mst.h"
#include "intel_drrs.h"
#include "intel_fbc.h"
+#include "intel_fbdev.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_pm.h"
@@ -78,7 +79,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (DISPLAY_VER(dev_priv) >= 9)
/* no global SR status; inspect per-plane WM */;
else if (HAS_PCH_SPLIT(dev_priv))
- sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
+ sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM_LP_ENABLE;
else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv))
sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -124,9 +125,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_framebuffer *drm_fb;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
- fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
-
+ fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev);
+ if (fbdev_fb) {
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
fbdev_fb->base.height,
@@ -474,8 +474,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
* reg for DC3CO debugging and validation,
* but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
*/
- seq_printf(m, "DC3CO count: %d\n",
- intel_de_read(dev_priv, DMC_DEBUG3));
+ seq_printf(m, "DC3CO count: %d\n", intel_de_read(dev_priv, IS_DGFX(dev_priv) ?
+ DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3));
} else {
dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
SKL_DMC_DC3_DC5_COUNT;
@@ -936,10 +936,9 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
}
if (crtc_state->bigjoiner)
- seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
- crtc_state->bigjoiner_linked_crtc->base.base.id,
- crtc_state->bigjoiner_linked_crtc->base.name,
- crtc_state->bigjoiner_slave ? "slave" : "master");
+ seq_printf(m, "\tLinked to 0x%x pipes as a %s\n",
+ crtc_state->bigjoiner_pipes,
+ intel_crtc_is_bigjoiner_slave(crtc_state) ? "slave" : "master");
for_each_intel_encoder_mask(&dev_priv->drm, encoder,
crtc_state->uapi.encoder_mask)
@@ -1015,6 +1014,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
+ seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0);
seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
pll->state.hw_state.mg_refclkin_ctl);
seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index d2102cc17bb4..9ebae7ac3235 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -16,6 +16,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
+#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_pm.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 60e15226a8cb..b50d0e6efe21 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -26,7 +26,6 @@
#ifndef __INTEL_DISPLAY_TYPES_H__
#define __INTEL_DISPLAY_TYPES_H__
-#include <linux/async.h>
#include <linux/i2c.h>
#include <linux/pm_qos.h>
#include <linux/pwm.h>
@@ -38,7 +37,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_dsc.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
@@ -145,25 +143,6 @@ struct intel_framebuffer {
struct i915_address_space *dpt_vm;
};
-struct intel_fbdev {
- struct drm_fb_helper helper;
- struct intel_framebuffer *fb;
- struct i915_vma *vma;
- unsigned long vma_flags;
- async_cookie_t cookie;
- int preferred_bpp;
-
- /* Whether or not fbdev hpd processing is temporarily suspended */
- bool hpd_suspended : 1;
- /* Set when a hotplug was received while HPD processing was
- * suspended
- */
- bool hpd_waiting : 1;
-
- /* Protects hpd_suspended */
- struct mutex hpd_lock;
-};
-
enum intel_hotplug_state {
INTEL_HOTPLUG_UNCHANGED,
INTEL_HOTPLUG_CHANGED,
@@ -1168,6 +1147,7 @@ struct intel_crtc_state {
/* bitmask of actually visible planes (enum plane_id) */
u8 active_planes;
+ u8 scaled_planes;
u8 nv12_planes;
u8 c8_planes;
@@ -1202,11 +1182,8 @@ struct intel_crtc_state {
/* enable pipe big joiner? */
bool bigjoiner;
- /* big joiner slave crtc? */
- bool bigjoiner_slave;
-
- /* linked crtc for bigjoiner, either slave or master */
- struct intel_crtc *bigjoiner_linked_crtc;
+ /* big joiner pipe bitmask */
+ u8 bigjoiner_pipes;
/* Display Stream compression state */
struct {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 146b83916005..1046e7fe310a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -886,9 +886,8 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
return MODE_CLOCK_HIGH;
/* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
- tmds_clock = target_clock;
- if (drm_mode_is_420_only(info, mode))
- tmds_clock /= 2;
+ tmds_clock = intel_hdmi_tmds_clock(target_clock, 8,
+ drm_mode_is_420_only(info, mode));
if (intel_dp->dfp.min_tmds_clock &&
tmds_clock < intel_dp->dfp.min_tmds_clock)
@@ -1139,21 +1138,12 @@ static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
intel_dp->dfp.ycbcr_444_to_420);
}
-static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state, int bpc)
-{
- int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
-
- if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
- clock /= 2;
-
- return clock;
-}
-
static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state, int bpc)
{
- int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
+ int tmds_clock = intel_hdmi_tmds_clock(clock, bpc,
+ intel_dp_hdmi_ycbcr420(intel_dp, crtc_state));
if (intel_dp->dfp.min_tmds_clock &&
tmds_clock < intel_dp->dfp.min_tmds_clock)
@@ -3628,6 +3618,32 @@ update_status:
"Could not write test response to sink\n");
}
+static bool intel_dp_link_ok(struct intel_dp *intel_dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ bool uhbr = intel_dp->link_rate >= 1000000;
+ bool ok;
+
+ if (uhbr)
+ ok = drm_dp_128b132b_lane_channel_eq_done(link_status,
+ intel_dp->lane_count);
+ else
+ ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
+
+ if (ok)
+ return true;
+
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] %s link not ok, retraining\n",
+ encoder->base.base.id, encoder->base.name,
+ uhbr ? "128b/132b" : "8b/10b");
+
+ return false;
+}
+
static void
intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
{
@@ -3658,14 +3674,7 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
return false;
}
- if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
- drm_dbg_kms(&i915->drm,
- "[ENCODER:%d:%s] channel EQ not ok, retraining\n",
- encoder->base.base.id, encoder->base.name);
- return false;
- }
-
- return true;
+ return intel_dp_link_ok(intel_dp, link_status);
}
/**
@@ -3779,8 +3788,8 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
intel_dp->lane_count))
return false;
- /* Retrain if Channel EQ or CR not ok */
- return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
+ /* Retrain if link not ok */
+ return !intel_dp_link_ok(intel_dp, link_status);
}
static bool intel_dp_has_connector(struct intel_dp *intel_dp,
@@ -3810,14 +3819,14 @@ static bool intel_dp_has_connector(struct intel_dp *intel_dp,
static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx,
- u32 *crtc_mask)
+ u8 *pipe_mask)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
int ret = 0;
- *crtc_mask = 0;
+ *pipe_mask = 0;
if (!intel_dp_needs_link_retrain(intel_dp))
return 0;
@@ -3851,12 +3860,12 @@ static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
!try_wait_for_completion(&conn_state->commit->hw_done))
continue;
- *crtc_mask |= drm_crtc_mask(&crtc->base);
+ *pipe_mask |= BIT(crtc->pipe);
}
drm_connector_list_iter_end(&conn_iter);
if (!intel_dp_needs_link_retrain(intel_dp))
- *crtc_mask = 0;
+ *pipe_mask = 0;
return ret;
}
@@ -3875,7 +3884,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc;
- u32 crtc_mask;
+ u8 pipe_mask;
int ret;
if (!intel_dp_is_connected(intel_dp))
@@ -3886,17 +3895,17 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
+ ret = intel_dp_prep_link_retrain(intel_dp, ctx, &pipe_mask);
if (ret)
return ret;
- if (crtc_mask == 0)
+ if (pipe_mask == 0)
return 0;
drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
encoder->base.base.id, encoder->base.name);
- for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -3907,7 +3916,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
intel_crtc_pch_transcoder(crtc), false);
}
- for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -3924,7 +3933,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
break;
}
- for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -3942,14 +3951,14 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx,
- u32 *crtc_mask)
+ u8 *pipe_mask)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
int ret = 0;
- *crtc_mask = 0;
+ *pipe_mask = 0;
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
@@ -3980,7 +3989,7 @@ static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
!try_wait_for_completion(&conn_state->commit->hw_done))
continue;
- *crtc_mask |= drm_crtc_mask(&crtc->base);
+ *pipe_mask |= BIT(crtc->pipe);
}
drm_connector_list_iter_end(&conn_iter);
@@ -3993,7 +4002,7 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc;
- u32 crtc_mask;
+ u8 pipe_mask;
int ret;
ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
@@ -4001,17 +4010,17 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask);
+ ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask);
if (ret)
return ret;
- if (crtc_mask == 0)
+ if (pipe_mask == 0)
return 0;
drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
encoder->base.base.id, encoder->base.name);
- for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9451f336f28f..5d98773efd1b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -712,7 +712,7 @@ static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_
return false;
}
-static void
+void
intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
@@ -996,6 +996,23 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
}
+static int
+intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 sink_status;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status);
+ if (ret != 1) {
+ drm_dbg_kms(&i915->drm, "Failed to read sink status\n");
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0;
+}
+
/**
* intel_dp_stop_link_train - stop link training
* @intel_dp: DP struct
@@ -1015,11 +1032,21 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
intel_dp->link_trained = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
DP_TRAINING_PATTERN_DISABLE);
+
+ if (intel_dp_is_uhbr(crtc_state) &&
+ wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] 128b/132b intra-hop not clearing\n",
+ encoder->base.base.id, encoder->base.name);
+ }
}
static bool
@@ -1083,8 +1110,6 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
bool ret = true;
int i;
- intel_dp_prepare_link_train(intel_dp, crtc_state);
-
for (i = lttpr_count - 1; i >= 0; i--) {
enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
@@ -1104,6 +1129,272 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
return ret;
}
+/*
+ * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1)
+ */
+static bool
+intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int delay_us;
+ int try, max_tries = 20;
+ unsigned long deadline;
+ bool timeout = false;
+
+ /*
+ * Reset signal levels. Start transmitting 128b/132b TPS1.
+ *
+ * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1
+ * in DP_TRAINING_PATTERN_SET.
+ */
+ if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ DP_TRAINING_PATTERN_1)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to start 128b/132b TPS1\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+
+ /* Read the initial TX FFE settings. */
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read TX FFE presets\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Update signal levels and training set as requested. */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
+ if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to set initial TX FFE settings\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Start transmitting 128b/132b TPS2. */
+ if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ DP_TRAINING_PATTERN_2)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to start 128b/132b TPS2\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Time budget for the LANEx_EQ_DONE Sequence */
+ deadline = jiffies + msecs_to_jiffies_timeout(400);
+
+ for (try = 0; try < max_tries; try++) {
+ usleep_range(delay_us, 2 * delay_us);
+
+ /*
+ * The delay may get updated. The transmitter shall read the
+ * delay before link status during link training.
+ */
+ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_link_training_failed(link_status)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Downstream link training failure\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Lane channel eq done\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ if (timeout) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Lane channel eq timeout\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (time_after(jiffies, deadline))
+ timeout = true; /* try one last time after deadline */
+
+ /* Update signal levels and training set as requested. */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
+ if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to update TX FFE settings\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+ }
+
+ if (try == max_tries) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Max loop count reached\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ for (;;) {
+ if (time_after(jiffies, deadline))
+ timeout = true; /* try one last time after deadline */
+
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_link_training_failed(link_status)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Downstream link training failure\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_eq_interlane_align_done(link_status)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Interlane align done\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ if (timeout) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Interlane align timeout\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ usleep_range(2000, 3000);
+ }
+
+ return true;
+}
+
+/*
+ * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2)
+ */
+static bool
+intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lttpr_count)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ unsigned long deadline;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_2_CDS) != 1) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to start 128b/132b TPS2 CDS\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Time budget for the LANEx_CDS_DONE Sequence */
+ deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20);
+
+ for (;;) {
+ bool timeout = false;
+
+ if (time_after(jiffies, deadline))
+ timeout = true; /* try one last time after deadline */
+
+ usleep_range(2000, 3000);
+
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_eq_interlane_align_done(link_status) &&
+ drm_dp_128b132b_cds_interlane_align_done(link_status) &&
+ drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] CDS interlane align done\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ if (drm_dp_128b132b_link_training_failed(link_status)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Downstream link training failure\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (timeout) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] CDS timeout\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+ }
+
+ /* FIXME: Should DP_TRAINING_PATTERN_DISABLE be written first? */
+ if (intel_dp->set_idle_link_train)
+ intel_dp->set_idle_link_train(intel_dp, crtc_state);
+
+ return true;
+}
+
+/*
+ * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.)
+ */
+static bool
+intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lttpr_count)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ bool passed = false;
+
+ if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] 128b/132b intra-hop not clear\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
+ intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count))
+ passed = true;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] 128b/132b Link Training %s at link rate = %d, lane count = %d\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ passed ? "passed" : "failed",
+ crtc_state->port_clock, crtc_state->lane_count);
+
+ return passed;
+}
+
/**
* intel_dp_start_link_train - start link training
* @intel_dp: DP struct
@@ -1117,6 +1408,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ bool passed;
/*
* TODO: Reiniting LTTPRs here won't be needed once proper connector
* HW state readout is added.
@@ -1127,6 +1419,13 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
/* Still continue with enabling the port and link training. */
lttpr_count = 0;
- if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
+ intel_dp_prepare_link_train(intel_dp, crtc_state);
+
+ if (intel_dp_is_uhbr(crtc_state))
+ passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count);
+ else
+ passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
+
+ if (!passed)
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index dbfb15705aaa..dc1556b46b85 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -29,6 +29,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void
+intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE]);
+
/* Get the TPSx symbol type of the value programmed to DP_TRAINING_PATTERN_SET */
static inline u8 intel_dp_training_pattern_symbol(u8 pattern)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 6b6eab507d30..e30e698aa684 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -99,6 +99,29 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
+ struct drm_dp_mst_topology_state *topology_state;
+ u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
+ DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
+
+ topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
+ if (IS_ERR(topology_state)) {
+ drm_dbg_kms(&i915->drm, "slot update failed\n");
+ return PTR_ERR(topology_state);
+ }
+
+ drm_dp_mst_update_slots(topology_state, link_coding_cap);
+
+ return 0;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -155,6 +178,10 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
+ ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state);
+ if (ret)
+ return ret;
+
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
@@ -357,6 +384,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ int start_slot = intel_dp_is_uhbr(old_crtc_state) ? 0 : 1;
int ret;
drm_dbg_kms(&i915->drm, "active links %d\n",
@@ -366,7 +394,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1);
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
if (ret) {
drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
@@ -475,6 +503,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
+ int start_slot = intel_dp_is_uhbr(pipe_config) ? 0 : 1;
int ret;
bool first_mst_stream;
@@ -509,7 +538,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_dp->active_mst_links++;
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1);
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
/*
* Before Gen 12 this is not done as part of
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 1ce0c171f4fb..14f5ffe27d05 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -16,6 +16,10 @@
#include "intel_snps_phy.h"
#include "vlv_sideband.h"
+struct intel_dpll_funcs {
+ int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state);
+};
+
struct intel_limit {
struct {
int min, max;
@@ -1400,6 +1404,14 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = {
.crtc_compute_clock = i8xx_crtc_compute_clock,
};
+int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ return i915->dpll_funcs->crtc_compute_clock(crtc_state);
+}
+
void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 1af0ac43cca4..69b06a9e473e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -15,6 +15,7 @@ struct intel_crtc_state;
enum pipe;
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state);
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
int pnv_calc_dpll_params(int refclk, struct dpll *clock);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 6723c3de5a80..569903d47aea 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -2748,6 +2748,9 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
+
+ if (i915->vbt.override_afc_startup)
+ pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
}
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
@@ -2949,6 +2952,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
+ if (dev_priv->vbt.override_afc_startup) {
+ u8 val = dev_priv->vbt.override_afc_startup_val;
+
+ pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
+ }
pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
@@ -3448,10 +3456,10 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
- hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
- DKL_PLL_DIV0_PROP_COEFF_MASK |
- DKL_PLL_DIV0_FBPREDIV_MASK |
- DKL_PLL_DIV0_FBDIV_INT_MASK);
+ val = DKL_PLL_DIV0_MASK;
+ if (dev_priv->vbt.override_afc_startup)
+ val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
+ hw_state->mg_pll_div0 &= val;
hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
@@ -3513,6 +3521,10 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
TGL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv,
TGL_DPLL_CFGCR1(id));
+ if (dev_priv->vbt.override_afc_startup) {
+ hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
+ hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
+ }
} else {
if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
hw_state->cfgcr0 = intel_de_read(dev_priv,
@@ -3554,7 +3566,7 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
const enum intel_dpll_id id = pll->info->id;
- i915_reg_t cfgcr0_reg, cfgcr1_reg;
+ i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
if (IS_ALDERLAKE_S(dev_priv)) {
cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
@@ -3568,6 +3580,7 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
} else if (DISPLAY_VER(dev_priv) >= 12) {
cfgcr0_reg = TGL_DPLL_CFGCR0(id);
cfgcr1_reg = TGL_DPLL_CFGCR1(id);
+ div0_reg = TGL_DPLL0_DIV0(id);
} else {
if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
cfgcr0_reg = ICL_DPLL_CFGCR0(4);
@@ -3580,6 +3593,12 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
+ drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup &&
+ !i915_mmio_reg_valid(div0_reg));
+ if (dev_priv->vbt.override_afc_startup &&
+ i915_mmio_reg_valid(div0_reg))
+ intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
+ hw_state->div0);
intel_de_posting_read(dev_priv, cfgcr1_reg);
}
@@ -3667,13 +3686,11 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
val |= hw_state->mg_clktop2_hsclkctl;
intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
- val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
- val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
- DKL_PLL_DIV0_PROP_COEFF_MASK |
- DKL_PLL_DIV0_FBPREDIV_MASK |
- DKL_PLL_DIV0_FBDIV_INT_MASK);
- val |= hw_state->mg_pll_div0;
- intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
+ val = DKL_PLL_DIV0_MASK;
+ if (dev_priv->vbt.override_afc_startup)
+ val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
+ intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
+ hw_state->mg_pll_div0);
val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
@@ -3912,13 +3929,14 @@ static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
drm_dbg_kms(&dev_priv->drm,
- "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
+ "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
"mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
"mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
"mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
"mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
"mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
hw_state->cfgcr0, hw_state->cfgcr1,
+ hw_state->div0,
hw_state->mg_refclkin_ctl,
hw_state->mg_clktop2_coreclkctl1,
hw_state->mg_clktop2_hsclkctl,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 91fe181462b2..ba2fdfce1579 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -208,6 +208,9 @@ struct intel_dpll_hw_state {
/* icl */
u32 cfgcr0;
+ /* tgl */
+ u32 div0;
+
/* bxt */
u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 8f674745e7e0..2b8e89477f48 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -3,11 +3,13 @@
* Copyright © 2021 Intel Corporation
*/
+#include "gem/i915_gem_domain.h"
+#include "gt/gen8_ppgtt.h"
+
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
-#include "gt/gen8_ppgtt.h"
struct i915_dpt {
struct i915_address_space vm;
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 83a69a4a4fea..b34a67309976 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,6 +4,8 @@
*
*/
+#include "gem/i915_gem_internal.h"
+
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 31c15e5fca95..e60046d90124 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -7,6 +7,7 @@
* DOC: display pinning helpers
*/
+#include "gem/i915_gem_domain.h"
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index adc3a81be9f7..837484cbcd33 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -50,6 +50,23 @@
#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer *fb;
+ struct i915_vma *vma;
+ unsigned long vma_flags;
+ async_cookie_t cookie;
+ int preferred_bpp;
+
+ /* Whether or not fbdev hpd processing is temporarily suspended */
+ bool hpd_suspended: 1;
+ /* Set when a hotplug was received while HPD processing was suspended */
+ bool hpd_waiting: 1;
+
+ /* Protects hpd_suspended */
+ struct mutex hpd_lock;
+};
+
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
{
return ifbdev->fb->frontbuffer;
@@ -680,3 +697,11 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
intel_fbdev_invalidate(ifbdev);
}
+
+struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
+{
+ if (!fbdev || !fbdev->helper.fb)
+ return NULL;
+
+ return to_intel_framebuffer(fbdev->helper.fb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index de7c84250eb5..0e95e9472fa3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -10,6 +10,8 @@
struct drm_device;
struct drm_i915_private;
+struct intel_fbdev;
+struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int intel_fbdev_init(struct drm_device *dev);
@@ -19,6 +21,7 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
void intel_fbdev_output_poll_changed(struct drm_device *dev);
void intel_fbdev_restore_mode(struct drm_device *dev);
+struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
#else
static inline int intel_fbdev_init(struct drm_device *dev)
{
@@ -48,6 +51,10 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
{
}
+static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
+{
+ return NULL;
+}
#endif
#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 3d6e22923601..4e4b43669b14 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -10,6 +10,11 @@
#include "intel_display_types.h"
#include "intel_fdi.h"
+struct intel_fdi_funcs {
+ void (*fdi_link_train)(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+};
+
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 6c72f8587240..1aa5bdc7b0dc 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1869,7 +1869,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_OK;
}
-static int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
+int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
{
/* YCBCR420 TMDS rate requirement is half the pixel clock */
if (ycbcr420_output)
@@ -1935,25 +1935,30 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
- enum drm_mode_status status;
+ enum drm_mode_status status = MODE_OK;
+ int bpc;
+
+ /*
+ * Try all color depths since valid port clock range
+ * can have holes. Any mode that can be used with at
+ * least one color depth is accepted.
+ */
+ for (bpc = 12; bpc >= 8; bpc -= 2) {
+ int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+
+ if (!intel_hdmi_source_bpc_possible(i915, bpc))
+ continue;
+
+ if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
+ continue;
+
+ status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink);
+ if (status == MODE_OK)
+ return MODE_OK;
+ }
- /* check if we can do 8bpc */
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 8, ycbcr420_output),
- true, has_hdmi_sink);
-
- /* if we can't do 8bpc we may still be able to do 12bpc */
- if (status != MODE_OK &&
- intel_hdmi_source_bpc_possible(i915, 12) &&
- intel_hdmi_sink_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output))
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 12, ycbcr420_output),
- true, has_hdmi_sink);
-
- /* if we can't do 8,12bpc we may still be able to do 10bpc */
- if (status != MODE_OK &&
- intel_hdmi_source_bpc_possible(i915, 10) &&
- intel_hdmi_sink_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output))
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 10, ycbcr420_output),
- true, has_hdmi_sink);
+ /* can never happen */
+ drm_WARN_ON(&i915->drm, status == MODE_OK);
return status;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index b0804b862a89..93f65a917c36 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -46,6 +46,7 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
int bpc, bool has_hdmi_sink, bool ycbcr420_output);
+int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output);
int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width,
int num_slices, int output_format, bool hdmi_all_bpp,
int hdmi_max_chunk_bytes);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 912b7003dcfa..8204126d17f9 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include "i915_drv.h"
+#include "i915_irq.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
@@ -213,12 +214,6 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
}
}
-static void intel_hpd_irq_setup(struct drm_i915_private *i915)
-{
- if (i915->display_irqs_enabled && i915->hotplug_funcs)
- i915->hotplug_funcs->hpd_irq_setup(i915);
-}
-
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index ad1afe9df6c3..f31e8c3f8ce0 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -47,10 +47,11 @@
#define OPREGION_ASLE_EXT_OFFSET 0x1C00
#define OPREGION_SIGNATURE "IntelGraphicsMem"
-#define MBOX_ACPI (1<<0)
-#define MBOX_SWSCI (1<<1)
-#define MBOX_ASLE (1<<2)
-#define MBOX_ASLE_EXT (1<<4)
+#define MBOX_ACPI BIT(0) /* Mailbox #1 */
+#define MBOX_SWSCI BIT(1) /* Mailbox #2 (obsolete from v2.x) */
+#define MBOX_ASLE BIT(2) /* Mailbox #3 */
+#define MBOX_ASLE_EXT BIT(4) /* Mailbox #5 */
+#define MBOX_BACKLIGHT BIT(5) /* Mailbox #2 (valid from v3.x) */
struct opregion_header {
u8 signature[16];
@@ -245,14 +246,10 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500
-static int swsci(struct drm_i915_private *dev_priv,
- u32 function, u32 parm, u32 *parm_out)
+static int check_swsci_function(struct drm_i915_private *i915, u32 function)
{
- struct opregion_swsci *swsci = dev_priv->opregion.swsci;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- u32 main_function, sub_function, scic;
- u16 swsci_val;
- u32 dslp;
+ struct opregion_swsci *swsci = i915->opregion.swsci;
+ u32 main_function, sub_function;
if (!swsci)
return -ENODEV;
@@ -264,15 +261,31 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
- if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+ if ((i915->opregion.swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
- if ((dev_priv->opregion.swsci_gbda_sub_functions &
+ if ((i915->opregion.swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
+ return 0;
+}
+
+static int swsci(struct drm_i915_private *dev_priv,
+ u32 function, u32 parm, u32 *parm_out)
+{
+ struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ u32 scic, dslp;
+ u16 swsci_val;
+ int ret;
+
+ ret = check_swsci_function(dev_priv, function);
+ if (ret)
+ return ret;
+
/* Driver sleep timeout in ms. */
dslp = swsci->dslp;
if (!dslp) {
@@ -346,11 +359,17 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
u32 parm = 0;
u32 type = 0;
u32 port;
+ int ret;
/* don't care about old stuff for now */
if (!HAS_DDI(dev_priv))
return 0;
+ /* Avoid port out of bounds checks if SWSCI isn't there. */
+ ret = check_swsci_function(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE);
+ if (ret)
+ return ret;
+
if (intel_encoder->type == INTEL_OUTPUT_DSI)
port = 0;
else
@@ -914,9 +933,17 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
}
if (mboxes & MBOX_SWSCI) {
- drm_dbg(&dev_priv->drm, "SWSCI supported\n");
- opregion->swsci = base + OPREGION_SWSCI_OFFSET;
- swsci_setup(dev_priv);
+ u8 major = opregion->header->over.major;
+
+ if (major >= 3) {
+ drm_err(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n");
+ } else {
+ if (major >= 2)
+ drm_dbg(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v2.x\n");
+ drm_dbg(&dev_priv->drm, "SWSCI supported\n");
+ opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+ swsci_setup(dev_priv);
+ }
}
if (mboxes & MBOX_ASLE) {
@@ -931,6 +958,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
opregion->asle_ext = base + OPREGION_ASLE_EXT_OFFSET;
}
+ if (mboxes & MBOX_BACKLIGHT) {
+ drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n");
+ }
+
if (intel_load_vbt_firmware(dev_priv) == 0)
goto out;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 5358f03b52db..76845d34ad0c 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -28,6 +28,7 @@
#include <drm/drm_fourcc.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 01ce1d72297f..f2fbb196d62a 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -165,8 +165,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
@@ -203,11 +201,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
* pretend the BIOS never had it enabled.
*/
intel_plane_disable_noatomic(crtc, plane);
- if (crtc_state->bigjoiner) {
- struct intel_crtc *slave =
- crtc_state->bigjoiner_linked_crtc;
- intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
- }
return;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index a1a663f362e7..2e0b092f4b6b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1063,31 +1063,28 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
intel_dp->psr.active = true;
}
-static void intel_psr_enable_source(struct intel_dp *intel_dp)
+static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
+{
+ switch (intel_dp->psr.pipe) {
+ case PIPE_A:
+ return LATENCY_REPORTING_REMOVED_PIPE_A;
+ case PIPE_B:
+ return LATENCY_REPORTING_REMOVED_PIPE_B;
+ case PIPE_C:
+ return LATENCY_REPORTING_REMOVED_PIPE_C;
+ default:
+ MISSING_CASE(intel_dp->psr.pipe);
+ return 0;
+ }
+}
+
+static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
- if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) {
- i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
- u32 chicken = intel_de_read(dev_priv, reg);
-
- chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
- PSR2_ADD_VERTICAL_LINE_COUNT;
- intel_de_write(dev_priv, reg, chicken);
- }
-
- /*
- * Wa_16014451276:adlp
- * All supported adlp panels have 1-based X granularity, this may
- * cause issues if non-supported panels are used.
- */
- if (IS_ALDERLAKE_P(dev_priv) &&
- intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
- ADLP_1_BASED_X_GRANULARITY);
-
/*
* Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
* mask LPSP to avoid dependency on other drivers that might block
@@ -1126,18 +1123,47 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) &&
- intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK,
- TRANS_SET_CONTEXT_LATENCY_VALUE(1));
+ if (intel_dp->psr.psr2_enabled) {
+ if (DISPLAY_VER(dev_priv) == 9)
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ PSR2_VSC_ENABLE_PROG_HEADER |
+ PSR2_ADD_VERTICAL_LINE_COUNT);
- /* Wa_16012604467:adlp */
- if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
- CLKGATE_DIS_MISC_DMASC_GATING_DIS);
+ /*
+ * Wa_16014451276:adlp
+ * All supported adlp panels have 1-based X granularity, this may
+ * cause issues if non-supported panels are used.
+ */
+ if (IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ ADLP_1_BASED_X_GRANULARITY);
+
+ /* Wa_16011168373:adl-p */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv,
+ TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
+ TRANS_SET_CONTEXT_LATENCY_MASK,
+ TRANS_SET_CONTEXT_LATENCY_VALUE(1));
+
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS);
+
+ /* Wa_16013835468:tgl[b0+], dg1 */
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
+ IS_DG1(dev_priv)) {
+ u16 vtotal, vblank;
+
+ vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
+ crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+ vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
+ crtc_state->uapi.adjusted_mode.crtc_vblank_start;
+ if (vblank > vtotal)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
+ wa_16013835468_bit_get(intel_dp));
+ }
+ }
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -1202,7 +1228,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
- intel_psr_enable_source(intel_dp);
+ intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
intel_dp->psr.paused = false;
@@ -1290,17 +1316,24 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) &&
- intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK, 0);
-
- /* Wa_16012604467:adlp */
- if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled)
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
- CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
+ if (intel_dp->psr.psr2_enabled) {
+ /* Wa_16011168373:adl-p */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv,
+ TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
+ TRANS_SET_CONTEXT_LATENCY_MASK, 0);
+
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
+
+ /* Wa_16013835468:tgl[b0+], dg1 */
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
+ IS_DG1(dev_priv))
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ wa_16013835468_bit_get(intel_dp), 0);
+ }
intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 8573a458811a..8fd00de981fc 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -35,7 +35,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
if (intel_de_wait_for_clear(i915, ICL_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
drm_err(&i915->drm, "SNPS PHY %c failed to calibrate after 25ms.\n",
- phy);
+ phy_name(phy));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index feead08ddf8f..fc037c027ea5 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -693,6 +693,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
+ intel_wakeref_t tc_cold_wref;
+ enum intel_display_power_domain domain;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
@@ -704,12 +706,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
- if (active_links) {
- enum intel_display_power_domain domain;
- intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain);
- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ tc_cold_wref = tc_cold_block(dig_port, &domain);
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
drm_dbg_kms(&i915->drm,
"Port %s: PHY disconnected with %d active link(s)\n",
@@ -718,10 +719,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
&dig_port->tc_lock_power_domain);
-
- tc_cold_unblock(dig_port, domain, tc_cold_wref);
+ } else {
+ /*
+ * TBT-alt is the default mode in any case the PHY ownership is not
+ * held (regardless of the sink's connected live state), so
+ * we'll just switch to disconnected mode from it here without
+ * a note.
+ */
+ if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+ icl_tc_phy_disconnect(dig_port);
}
+ tc_cold_unblock(dig_port, domain, tc_cold_wref);
+
drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index a39d6cfea87a..b9397d9363c5 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -162,6 +162,14 @@ struct bdb_general_features {
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 dp_ssc_dongle_supported:1;
u8 rsvd11:2; /* finish byte */
+
+ /* bits 6 */
+ u8 tc_hpd_retry_timeout:7; /* 242 */
+ u8 rsvd12:1;
+
+ /* bits 7 */
+ u8 afc_startup_config:2;/* 249 */
+ u8 rsvd13:6;
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 3faea903b9ae..545eff5bf158 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -1107,18 +1107,6 @@ static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_tran
ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2;
}
-struct intel_crtc *
-intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc)
-{
- return intel_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1);
-}
-
-static struct intel_crtc *
-intel_dsc_get_bigjoiner_primary(const struct intel_crtc *secondary_crtc)
-{
- return intel_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1);
-}
-
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1126,7 +1114,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
u32 dss_ctl1_val = 0;
if (crtc_state->bigjoiner && !crtc_state->dsc.compression_enable) {
- if (crtc_state->bigjoiner_slave)
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
dss_ctl1_val |= UNCOMPRESSED_JOINER_SLAVE;
else
dss_ctl1_val |= UNCOMPRESSED_JOINER_MASTER;
@@ -1154,7 +1142,7 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->bigjoiner) {
dss_ctl1_val |= BIG_JOINER_ENABLE;
- if (!crtc_state->bigjoiner_slave)
+ if (!intel_crtc_is_bigjoiner_slave(crtc_state))
dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE;
}
intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
@@ -1174,25 +1162,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
}
}
-void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dss_ctl1;
-
- dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder));
- if (dss_ctl1 & UNCOMPRESSED_JOINER_MASTER) {
- crtc_state->bigjoiner = true;
- crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
- drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
- } else if (dss_ctl1 & UNCOMPRESSED_JOINER_SLAVE) {
- crtc_state->bigjoiner = true;
- crtc_state->bigjoiner_slave = true;
- crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc);
- drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
- }
-}
-
void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1223,18 +1192,6 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
(dss_ctl1 & JOINER_ENABLE);
- if (dss_ctl1 & BIG_JOINER_ENABLE) {
- crtc_state->bigjoiner = true;
-
- if (!(dss_ctl1 & MASTER_BIG_JOINER_ENABLE)) {
- crtc_state->bigjoiner_slave = true;
- crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc);
- } else {
- crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
- }
- drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
- }
-
/* FIXME: add more state readout as needed */
/* PPS1 */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 4ec75f715986..8763f00fa7e2 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -18,7 +18,6 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config);
-void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 8a248003dfae..ce91b23385cf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -4,6 +4,8 @@
* Copyright © 2016 Intel Corporation
*/
+#include <drm/drm_cache.h>
+
#include "display/intel_frontbuffer.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 00327b750fbb..2958e2be4292 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -67,6 +67,7 @@
#include <linux/log2.h>
#include <linux/nospec.h>
+#include <drm/drm_cache.h>
#include <drm/drm_syncobj.h>
#include "gt/gen6_ppgtt.h"
@@ -79,6 +80,7 @@
#include "pxp/intel_pxp.h"
+#include "i915_file_private.h"
#include "i915_gem_context.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
@@ -343,6 +345,20 @@ static int proto_context_register(struct drm_i915_file_private *fpriv,
return ret;
}
+static struct i915_address_space *
+i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
+{
+ struct i915_address_space *vm;
+
+ xa_lock(&file_priv->vm_xa);
+ vm = xa_load(&file_priv->vm_xa, id);
+ if (vm)
+ kref_get(&vm->ref);
+ xa_unlock(&file_priv->vm_xa);
+
+ return vm;
+}
+
static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
const struct drm_i915_gem_context_param *args)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 9402d4bf4ffc..c6eb023d3d86 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -3,12 +3,15 @@
* Copyright © 2020 Intel Corporation
*/
+#include <drm/drm_fourcc.h>
+
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_gem_create.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.h b/drivers/gpu/drm/i915/gem/i915_gem_create.h
new file mode 100644
index 000000000000..9536aa906001
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_GEM_CREATE_H__
+#define __I915_GEM_CREATE_H__
+
+struct drm_file;
+struct drm_device;
+struct drm_mode_create_dumb;
+
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+#endif /* __I915_GEM_CREATE_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 14fdb0796c52..13917231ae81 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -11,6 +11,7 @@
#include <asm/smp.h>
+#include "gem/i915_gem_dmabuf.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h
new file mode 100644
index 000000000000..6e0405d47ce1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __I915_GEM_DMABUF_H__
+#define __I915_GEM_DMABUF_H__
+
+struct drm_gem_object;
+struct drm_device;
+struct dma_buf;
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
+
+#endif /* __I915_GEM_DMABUF_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 26532c07d467..3e5d6057b3ef 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -9,12 +9,13 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
+#include "i915_gem_domain.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
-#include "i915_gem_object.h"
-#include "i915_vma.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+#include "i915_gem_object.h"
+#include "i915_vma.h"
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.h b/drivers/gpu/drm/i915/gem/i915_gem_domain.h
new file mode 100644
index 000000000000..9622df962bfc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __I915_GEM_DOMAIN_H__
+#define __I915_GEM_DOMAIN_H__
+
+struct drm_i915_gem_object;
+enum i915_cache_level;
+
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+
+#endif /* __I915_GEM_DOMAIN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 54cae821513b..0a4ecf71ee08 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -25,6 +25,7 @@
#include "i915_cmd_parser.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_evict.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index c5150a1ee3d2..c698f95af15f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "i915_gem.h"
+#include "i915_gem_internal.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.h b/drivers/gpu/drm/i915/gem/i915_gem_internal.h
new file mode 100644
index 000000000000..6664e06112fc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __I915_GEM_INTERNAL_H__
+#define __I915_GEM_INTERNAL_H__
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct drm_i915_gem_object_ops;
+struct drm_i915_private;
+
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *i915,
+ phys_addr_t size);
+struct drm_i915_gem_object *
+__i915_gem_object_create_internal(struct drm_i915_private *i915,
+ const struct drm_i915_gem_object_ops *ops,
+ phys_addr_t size);
+
+#endif /* __I915_GEM_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 1478c02a82cb..a3b60e1ede90 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -9,6 +9,8 @@
#include <linux/pfn_t.h>
#include <linux/sizes.h>
+#include <drm/drm_cache.h>
+
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d87b508b59b1..9a478c19c477 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -24,11 +24,16 @@
#include <linux/sched/mm.h>
+#include <drm/drm_cache.h>
+
#include "display/intel_frontbuffer.h"
#include "pxp/intel_pxp.h"
+
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
+#include "i915_gem_dmabuf.h"
#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_gem_ttm.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index a50f884973bc..022582a1ca17 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -4,6 +4,8 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include <drm/drm_cache.h>
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index ac56124760e1..e81e5ac8d376 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -10,6 +10,7 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index cc9fe258fba7..60f962f0c041 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -5,8 +5,11 @@
*/
#include <linux/pagevec.h>
+#include <linux/shmem_fs.h>
#include <linux/swap.h>
+#include <drm/drm_cache.h>
+
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gemfs.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 4dfed34191c6..0272b24c5c57 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -16,6 +16,7 @@
#include "i915_gem_stolen.h"
#include "i915_reg.h"
#include "i915_vgpu.h"
+#include "intel_mchbar_regs.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index 75501db71041..af85d0c28168 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -9,6 +9,7 @@
#include <drm/drm_file.h>
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 1f880c8c66e7..68f758ebbd4b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/shmem_fs.h>
+
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 11f0aa65f8a3..6af237aa1854 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -8,9 +8,10 @@
#include "i915_selftest.h"
-#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_pm.h"
+#include "gem/i915_gem_region.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7d327ffd0464..85bf9ba727d7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -6,6 +6,7 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_regs.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index c6291429b00c..65e37d3cbb97 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -6,11 +6,13 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_region.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gem/i915_gem_region.h"
+
#include "huge_gem_object.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index b35c1219c852..3c55e77b0f1b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -7,6 +7,7 @@
#include "igt_gem_utils.h"
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
#include "gt/intel_gpu_commands.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index c0a8ef368044..6d6082b5f31f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -4,6 +4,7 @@
* Copyright © 2016 Intel Corporation
*/
+#include "i915_file_private.h"
#include "mock_context.h"
#include "selftests/mock_drm.h"
#include "selftests/mock_gtt.h"
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 8471758b3ef4..0bd0d611e0c8 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -5,6 +5,8 @@
#include <linux/log2.h>
+#include "gem/i915_gem_internal.h"
+
#include "gen6_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index e86d8255feec..ece16c2b5b8e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -9,6 +9,7 @@
#include "intel_engine_pm.h"
#include "intel_gpu_commands.h"
#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
#include "intel_ring.h"
#include "intel_sseu.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 0e353d8c2bc8..be4b1e65442f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -182,6 +182,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_HWS_CSB_BUF0_INDEX 0x10
#define I915_HWS_CSB_WRITE_INDEX 0x1f
#define ICL_HWS_CSB_WRITE_INDEX 0x2f
+#define INTEL_HWS_CSB_WRITE_INDEX(__i915) \
+ (GRAPHICS_VER(__i915) >= 11 ? ICL_HWS_CSB_WRITE_INDEX : I915_HWS_CSB_WRITE_INDEX)
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 84ad09fb9b8b..959e9300ac9e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -6,6 +6,7 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gt/intel_gt_regs.h"
#include "i915_cmd_parser.h"
@@ -1229,17 +1230,6 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
-const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
-{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
- case I915_CACHE_L3_LLC: return " L3+LLC";
- case I915_CACHE_WT: return " WT";
- default: return "";
- }
-}
-
static u32
read_subslice_reg(const struct intel_engine_cs *engine,
int slice, int subslice, i915_reg_t reg)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index e9fec6214073..0bf8b45c9319 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -70,6 +70,12 @@
#define RING_NOPID(base) _MMIO((base) + 0x94)
#define RING_HWSTAM(base) _MMIO((base) + 0x98)
#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
+#define ASYNC_FLIP_PERF_DISABLE REG_BIT(14)
+#define MI_FLUSH_ENABLE REG_BIT(12)
+#define TGL_NESTED_BB_EN REG_BIT(12)
+#define MODE_IDLE REG_BIT(9)
+#define STOP_RING REG_BIT(8)
+#define VS_TIMER_DISPATCH REG_BIT(6)
#define RING_IMR(base) _MMIO((base) + 0xa8)
#define RING_EIR(base) _MMIO((base) + 0xb0)
#define RING_EMR(base) _MMIO((base) + 0xb4)
@@ -211,8 +217,25 @@
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4)
+#define GEN11_VCS_SFC_FORCED_LOCK(base) _MMIO((base) + 0x88c)
+#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_STATUS(base) _MMIO((base) + 0x890)
+#define GEN11_VCS_SFC_USAGE_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1)
+
+#define GEN11_VECS_SFC_FORCED_LOCK(base) _MMIO((base) + 0x201c)
+#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VECS_SFC_LOCK_ACK(base) _MMIO((base) + 0x2018)
+#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0)
+#define GEN11_VECS_SFC_USAGE(base) _MMIO((base) + 0x2014)
+#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
+#define GEN12_HCP_SFC_LOCK_STATUS(base) _MMIO((base) + 0x2914)
+#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1)
+#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0)
+
#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10)
#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 49e26d23ce1f..4a9ef688fac2 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3465,7 +3465,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
(u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
execlists->csb_write =
- &engine->status_page.addr[intel_hws_csb_write_index(i915)];
+ &engine->status_page.addr[INTEL_HWS_CSB_WRITE_INDEX(i915)];
if (GRAPHICS_VER(i915) < 11)
execlists->csb_size = GEN8_CSB_ENTRIES;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 95c9145232f5..f294753bc947 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -9,6 +9,7 @@
#include "i915_pvinfo.h"
#include "i915_vgpu.h"
#include "intel_gt_regs.h"
+#include "intel_mchbar_regs.h"
/**
* DOC: fence register handling
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 57763aede976..545a2b1f1834 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -5,15 +5,17 @@
#include <drm/intel-gtt.h>
-#include "intel_gt_debugfs.h"
-
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
+#include "pxp/intel_pxp.h"
+
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_regs.h"
#include "intel_gt.h"
#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_debugfs.h"
#include "intel_gt_pm.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
@@ -25,7 +27,6 @@
#include "intel_rps.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
-#include "pxp/intel_pxp.h"
void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 9db3dcbd917f..cadfd85785b1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -3,6 +3,7 @@
* Copyright © 2014-2018 Intel Corporation
*/
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 4e448c13a64c..37765919fe32 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -15,6 +15,7 @@
#include "intel_gt_pm_debugfs.h"
#include "intel_gt_regs.h"
#include "intel_llc.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rc6.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 16d98ebee687..e8143fa4b5a8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -8,779 +8,166 @@
#include "i915_reg_defs.h"
-#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
-#define ILK_GRDOM_FULL (0 << 1)
-#define ILK_GRDOM_RENDER (1 << 1)
-#define ILK_GRDOM_MEDIA (3 << 1)
-#define ILK_GRDOM_MASK (3 << 1)
-#define ILK_GRDOM_RESET_ENABLE (1 << 0)
-
-#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
-#define GEN6_MBC_SNPCR_SHIFT 21
-#define GEN6_MBC_SNPCR_MASK (3 << 21)
-#define GEN6_MBC_SNPCR_MAX (0 << 21)
-#define GEN6_MBC_SNPCR_MED (1 << 21)
-#define GEN6_MBC_SNPCR_LOW (2 << 21)
-#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */
-
-#define VLV_G3DCTL _MMIO(0x9024)
-#define VLV_GSCKGCTL _MMIO(0x9028)
-
-#define FBC_LLC_READ_CTRL _MMIO(0x9044)
-#define FBC_LLC_FULLY_OPEN REG_BIT(30)
-
-#define GEN6_MBCTL _MMIO(0x0907c)
-#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
-#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
-#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
-#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
-#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
-
-#define GEN6_GDRST _MMIO(0x941c)
-#define GEN6_GRDOM_FULL (1 << 0)
-#define GEN6_GRDOM_RENDER (1 << 1)
-#define GEN6_GRDOM_MEDIA (1 << 2)
-#define GEN6_GRDOM_BLT (1 << 3)
-#define GEN6_GRDOM_VECS (1 << 4)
-#define GEN9_GRDOM_GUC (1 << 5)
-#define GEN8_GRDOM_MEDIA2 (1 << 7)
-/* GEN11 changed all bit defs except for FULL & RENDER */
-#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL
-#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER
-#define GEN11_GRDOM_BLT (1 << 2)
-#define GEN11_GRDOM_GUC (1 << 3)
-#define GEN11_GRDOM_MEDIA (1 << 5)
-#define GEN11_GRDOM_MEDIA2 (1 << 6)
-#define GEN11_GRDOM_MEDIA3 (1 << 7)
-#define GEN11_GRDOM_MEDIA4 (1 << 8)
-#define GEN11_GRDOM_MEDIA5 (1 << 9)
-#define GEN11_GRDOM_MEDIA6 (1 << 10)
-#define GEN11_GRDOM_MEDIA7 (1 << 11)
-#define GEN11_GRDOM_MEDIA8 (1 << 12)
-#define GEN11_GRDOM_VECS (1 << 13)
-#define GEN11_GRDOM_VECS2 (1 << 14)
-#define GEN11_GRDOM_VECS3 (1 << 15)
-#define GEN11_GRDOM_VECS4 (1 << 16)
-#define GEN11_GRDOM_SFC0 (1 << 17)
-#define GEN11_GRDOM_SFC1 (1 << 18)
-#define GEN11_GRDOM_SFC2 (1 << 19)
-#define GEN11_GRDOM_SFC3 (1 << 20)
-#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
-#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
-
-#define GEN11_VCS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x88C)
-#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0)
-#define GEN11_VCS_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x890)
-#define GEN11_VCS_SFC_USAGE_BIT (1 << 0)
-#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1)
-
-#define GEN11_VECS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x201C)
-#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0)
-#define GEN11_VECS_SFC_LOCK_ACK(engine) _MMIO((engine)->mmio_base + 0x2018)
-#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0)
-#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
-#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
-
-#define GEN12_HCP_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x2910)
-#define GEN12_HCP_SFC_FORCED_LOCK_BIT REG_BIT(0)
-#define GEN12_HCP_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x2914)
-#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1)
-#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0)
-
-#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
-
-#define WAIT_FOR_RC6_EXIT _MMIO(0x20CC)
-/* HSW only */
-#define HSW_SELECTIVE_READ_ADDRESSING_SHIFT 2
-#define HSW_SELECTIVE_READ_ADDRESSING_MASK (0x3 << HSW_SLECTIVE_READ_ADDRESSING_SHIFT)
-#define HSW_SELECTIVE_WRITE_ADDRESS_SHIFT 4
-#define HSW_SELECTIVE_WRITE_ADDRESS_MASK (0x7 << HSW_SELECTIVE_WRITE_ADDRESS_SHIFT)
-/* HSW+ */
-#define HSW_WAIT_FOR_RC6_EXIT_ENABLE (1 << 0)
-#define HSW_RCS_CONTEXT_ENABLE (1 << 7)
-#define HSW_RCS_INHIBIT (1 << 8)
-/* Gen8 */
-#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
-#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
-#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
-#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
-#define GEN8_SELECTIVE_WRITE_ADDRESSING_ENABLE (1 << 6)
-#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT 9
-#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT)
-#define GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT 11
-#define GEN8_SELECTIVE_READ_SLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT)
-#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
-
-#define GAM_ECOCHK _MMIO(0x4090)
-#define BDW_DISABLE_HDC_INVALIDATION (1 << 25)
-#define ECOCHK_SNB_BIT (1 << 10)
-#define ECOCHK_DIS_TLB (1 << 8)
-#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6)
-#define ECOCHK_PPGTT_CACHE64B (0x3 << 3)
-#define ECOCHK_PPGTT_CACHE4B (0x0 << 3)
-#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4)
-#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3)
-#define ECOCHK_PPGTT_UC_HSW (0x1 << 3)
-#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
-#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
-
-#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
-
-#define GAC_ECO_BITS _MMIO(0x14090)
-#define ECOBITS_SNB_BIT (1 << 13)
-#define ECOBITS_PPGTT_CACHE64B (3 << 8)
-#define ECOBITS_PPGTT_CACHE4B (0 << 8)
-
-#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54)
-#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
-#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
-
-#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c)
-#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
-#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
-#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
-
-#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28)
-#define RENDER_MOD_CTRL _MMIO(0xcf2c)
-#define COMP_MOD_CTRL _MMIO(0xcf30)
-#define VDBX_MOD_CTRL _MMIO(0xcf34)
-#define VEBX_MOD_CTRL _MMIO(0xcf38)
-#define FORCE_MISS_FTLB REG_BIT(3)
-
-#define GAB_CTL _MMIO(0x24000)
-#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
-
-#define HSW_MI_PREDICATE_RESULT_2 _MMIO(0x2214)
-
-/*
- * Registers used only by the command parser
- */
-#define BCS_SWCTRL _MMIO(0x22200)
-#define BCS_SRC_Y REG_BIT(0)
-#define BCS_DST_Y REG_BIT(1)
-
-#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
-#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
-#define HS_INVOCATION_COUNT _MMIO(0x2300)
-#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4)
-#define DS_INVOCATION_COUNT _MMIO(0x2308)
-#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4)
-#define IA_VERTICES_COUNT _MMIO(0x2310)
-#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4)
-#define IA_PRIMITIVES_COUNT _MMIO(0x2318)
-#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4)
-#define VS_INVOCATION_COUNT _MMIO(0x2320)
-#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4)
-#define GS_INVOCATION_COUNT _MMIO(0x2328)
-#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4)
-#define GS_PRIMITIVES_COUNT _MMIO(0x2330)
-#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4)
-#define CL_INVOCATION_COUNT _MMIO(0x2338)
-#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4)
-#define CL_PRIMITIVES_COUNT _MMIO(0x2340)
-#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4)
-#define PS_INVOCATION_COUNT _MMIO(0x2348)
-#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4)
-#define PS_DEPTH_COUNT _MMIO(0x2350)
-#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4)
-
-/* There are the 4 64-bit counter registers, one for each stream output */
-#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8)
-#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4)
-
-#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
-#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
-
-#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420)
-#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430)
-#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434)
-#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438)
-#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243C)
-#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440)
-
-#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500)
-#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
-#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
-
-#define GEN12_SQCM _MMIO(0x8724)
-#define EN_32B_ACCESS REG_BIT(30)
-
-/*
- * Flexible, Aggregate EU Counter Registers.
- * Note: these aren't contiguous
- */
-#define EU_PERF_CNTL0 _MMIO(0xe458)
-#define EU_PERF_CNTL1 _MMIO(0xe558)
-#define EU_PERF_CNTL2 _MMIO(0xe658)
-#define EU_PERF_CNTL3 _MMIO(0xe758)
-#define EU_PERF_CNTL4 _MMIO(0xe45c)
-#define EU_PERF_CNTL5 _MMIO(0xe55c)
-#define EU_PERF_CNTL6 _MMIO(0xe65c)
-
-#define RT_CTRL _MMIO(0xe530)
-#define DIS_NULL_QUERY REG_BIT(10)
-
/* RPM unit config (Gen8+) */
-#define RPM_CONFIG0 _MMIO(0x0D00)
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3
-#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
-#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
-
-#define RPM_CONFIG1 _MMIO(0x0D04)
-#define GEN10_GT_NOA_ENABLE (1 << 9)
-
-/* GPM unit config (Gen9+) */
-#define CTC_MODE _MMIO(0xA26C)
-#define CTC_SOURCE_PARAMETER_MASK 1
-#define CTC_SOURCE_CRYSTAL_CLOCK 0
-#define CTC_SOURCE_DIVIDE_LOGIC 1
-#define CTC_SHIFT_PARAMETER_SHIFT 1
-#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
+#define RPM_CONFIG0 _MMIO(0xd00)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
+
+#define RPM_CONFIG1 _MMIO(0xd04)
+#define GEN10_GT_NOA_ENABLE (1 << 9)
/* RCP unit config (Gen8+) */
-#define RCP_CONFIG _MMIO(0x0D08)
-
-#define MICRO_BP0_0 _MMIO(0x9800)
-#define MICRO_BP0_2 _MMIO(0x9804)
-#define MICRO_BP0_1 _MMIO(0x9808)
-
-#define MICRO_BP1_0 _MMIO(0x980C)
-#define MICRO_BP1_2 _MMIO(0x9810)
-#define MICRO_BP1_1 _MMIO(0x9814)
-
-#define MICRO_BP2_0 _MMIO(0x9818)
-#define MICRO_BP2_2 _MMIO(0x981C)
-#define MICRO_BP2_1 _MMIO(0x9820)
-
-#define MICRO_BP3_0 _MMIO(0x9824)
-#define MICRO_BP3_2 _MMIO(0x9828)
-#define MICRO_BP3_1 _MMIO(0x982C)
-
-#define MICRO_BP_TRIGGER _MMIO(0x9830)
-#define MICRO_BP3_COUNT_STATUS01 _MMIO(0x9834)
-#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
-#define MICRO_BP_FIRED_ARMED _MMIO(0x983C)
-
-#define GAMTARBMODE _MMIO(0x04a08)
-#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
-#define ARB_MODE_SWIZZLE_BDW (1 << 1)
-#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
-
-#define _RING_FAULT_REG_RCS 0x4094
-#define _RING_FAULT_REG_VCS 0x4194
-#define _RING_FAULT_REG_BCS 0x4294
-#define _RING_FAULT_REG_VECS 0x4394
-#define RING_FAULT_REG(engine) _MMIO(_PICK((engine)->class, \
- _RING_FAULT_REG_RCS, \
- _RING_FAULT_REG_VCS, \
- _RING_FAULT_REG_VECS, \
- _RING_FAULT_REG_BCS))
-#define GEN8_RING_FAULT_REG _MMIO(0x4094)
-#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
-#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
-#define RING_FAULT_GTTSEL_MASK (1 << 11)
-#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
-#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
-#define RING_FAULT_VALID (1 << 0)
-#define DONE_REG _MMIO(0x40b0)
-#define GEN12_GAM_DONE _MMIO(0xcf68)
-#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
-#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
-#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
-#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
-#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
-#define GEN12_VD0_AUX_NV _MMIO(0x4218)
-#define GEN12_VD1_AUX_NV _MMIO(0x4228)
-#define GEN12_VD2_AUX_NV _MMIO(0x4298)
-#define GEN12_VD3_AUX_NV _MMIO(0x42A8)
-#define GEN12_VE0_AUX_NV _MMIO(0x4238)
-#define GEN12_VE1_AUX_NV _MMIO(0x42B8)
-#define AUX_INV REG_BIT(0)
-#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
-#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
-
-#define MISC_STATUS0 _MMIO(0xA500)
-#define MISC_STATUS1 _MMIO(0xA504)
-
-#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
-
-#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
-#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18)
-
-#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
-#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
-#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
-
-#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
-#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
-#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
-#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
+#define RCP_CONFIG _MMIO(0xd08)
+
+#define RC6_LOCATION _MMIO(0xd40)
+#define RC6_CTX_IN_DRAM (1 << 0)
+#define RC6_CTX_BASE _MMIO(0xd48)
+#define RC6_CTX_BASE_MASK 0xFFFFFFF0
+
+#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0xd50 + (n) * 4)
+#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0xd70 + (n) * 4)
+#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0xd84)
+#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0xd88)
+
+#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
+#define SF_MCR_SELECTOR _MMIO(0xfd8)
+#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
+#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
+#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
+#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
+#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
+#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27)
+#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
+#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
+#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
+
+#define IPEIR_I965 _MMIO(0x2064)
+#define IPEHR_I965 _MMIO(0x2068)
-#define GEN8_RTCR _MMIO(0x4260)
-#define GEN8_M1TCR _MMIO(0x4264)
-#define GEN8_M2TCR _MMIO(0x4268)
-#define GEN8_BTCR _MMIO(0x426c)
-#define GEN8_VTCR _MMIO(0x4270)
-
-#define IPEIR_I965 _MMIO(0x2064)
-#define IPEHR_I965 _MMIO(0x2068)
-#define GEN7_SC_INSTDONE _MMIO(0x7100)
-#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
-#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
-#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
-#define GEN7_ROW_INSTDONE _MMIO(0xe164)
-#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c)
-#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
-#define SF_MCR_SELECTOR _MMIO(0xfd8)
-#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
-#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
-#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
-#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
-#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
-#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27)
-#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
-#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
-#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
* The GEN2 counterpart of this register is GEN2_INSTDONE.
*/
-#define INSTPS _MMIO(0x2070) /* 965+ only */
-#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
-#define ACTHD_I965 _MMIO(0x2074)
-#define HWS_PGA _MMIO(0x2080)
-#define HWS_ADDRESS_MASK 0xfffff000
-#define HWS_START_ADDRESS_SHIFT 4
-#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
-#define PWRCTX_EN (1 << 0)
-#define GEN2_INSTDONE _MMIO(0x2090)
-#define NOPID _MMIO(0x2094)
-#define HWSTAM _MMIO(0x2098)
-
-#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
-#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
-
-#define ERROR_GEN6 _MMIO(0x40a0)
-
-#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
-#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
-#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8)
-#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
-#define FAULT_VA_HIGH_BITS (0xf << 0)
-#define FAULT_GTT_SEL (1 << 4)
-
-#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
-#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
-#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
-#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
-
-#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
+#define INSTPS _MMIO(0x2070) /* 965+ only */
+#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
+#define ACTHD_I965 _MMIO(0x2074)
+#define HWS_PGA _MMIO(0x2080)
+#define HWS_ADDRESS_MASK 0xfffff000
+#define HWS_START_ADDRESS_SHIFT 4
+
+#define _3D_CHICKEN _MMIO(0x2084)
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
+
+#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
+#define PWRCTX_EN (1 << 0)
+
+#define FF_SLICE_CHICKEN _MMIO(0x2088)
+#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit.
*/
-#define _3D_CHICKEN _MMIO(0x2084)
-#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
-#define _3D_CHICKEN2 _MMIO(0x208c)
-
-#define FF_SLICE_CHICKEN _MMIO(0x2088)
-#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
-
+#define _3D_CHICKEN2 _MMIO(0x208c)
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
* particular danger of not doing so is not specified.
*/
-# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
-#define _3D_CHICKEN3 _MMIO(0x2090)
-#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
-#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
-#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
-#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
-#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */
-#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
-
-#define MI_MODE _MMIO(0x209c)
-# define VS_TIMER_DISPATCH (1 << 6)
-# define MI_FLUSH_ENABLE (1 << 12)
-# define TGL_NESTED_BB_EN (1 << 12)
-# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
-# define MODE_IDLE (1 << 9)
-# define STOP_RING (1 << 8)
-
-#define GEN6_GT_MODE _MMIO(0x20d0)
-#define GEN7_GT_MODE _MMIO(0x7008)
-#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
-#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
-#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
-#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
-#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
-#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
-#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
-#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
+#define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+
+#define _3D_CHICKEN3 _MMIO(0x2090)
+#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
+#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
+#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
+#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */
+#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
+
+#define GEN2_INSTDONE _MMIO(0x2090)
+#define NOPID _MMIO(0x2094)
+#define HWSTAM _MMIO(0x2098)
+
+#define WAIT_FOR_RC6_EXIT _MMIO(0x20cc)
+/* HSW only */
+#define HSW_SELECTIVE_READ_ADDRESSING_SHIFT 2
+#define HSW_SELECTIVE_READ_ADDRESSING_MASK (0x3 << HSW_SLECTIVE_READ_ADDRESSING_SHIFT)
+#define HSW_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define HSW_SELECTIVE_WRITE_ADDRESS_MASK (0x7 << HSW_SELECTIVE_WRITE_ADDRESS_SHIFT)
+/* HSW+ */
+#define HSW_WAIT_FOR_RC6_EXIT_ENABLE (1 << 0)
+#define HSW_RCS_CONTEXT_ENABLE (1 << 7)
+#define HSW_RCS_INHIBIT (1 << 8)
+/* Gen8 */
+#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
+#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
+#define GEN8_SELECTIVE_WRITE_ADDRESSING_ENABLE (1 << 6)
+#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT 9
+#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT)
+#define GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT 11
+#define GEN8_SELECTIVE_READ_SLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT)
+#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
+
+#define GEN6_GT_MODE _MMIO(0x20d0)
+#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
+#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
+#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
+#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
+#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
-#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
-#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
-#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20d4)
+#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
-#define SCCGCTL94DC _MMIO(0x94dc)
-#define CG3DDISURB REG_BIT(14)
+#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
+#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14)
-#define MLTICTXCTL _MMIO(0xb170)
-#define TDONRENDER REG_BIT(2)
+#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
+#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
-#define L3SQCREG1_CCS0 _MMIO(0xb200)
-#define FLUSHALLNONCOH REG_BIT(5)
+#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
+#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20ec)
+#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0)
/* WaClearTdlStateAckDirtyBits */
-#define GEN8_STATE_ACK _MMIO(0x20F0)
-#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
-#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100)
-#define GEN9_STATE_ACK_TDL0 (1 << 12)
-#define GEN9_STATE_ACK_TDL1 (1 << 13)
-#define GEN9_STATE_ACK_TDL2 (1 << 14)
-#define GEN9_STATE_ACK_TDL3 (1 << 15)
-#define GEN9_SUBSLICE_TDL_ACK_BITS \
+#define GEN8_STATE_ACK _MMIO(0x20f0)
+#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20f8)
+#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100)
+#define GEN9_STATE_ACK_TDL0 (1 << 12)
+#define GEN9_STATE_ACK_TDL1 (1 << 13)
+#define GEN9_STATE_ACK_TDL2 (1 << 14)
+#define GEN9_STATE_ACK_TDL3 (1 << 15)
+#define GEN9_SUBSLICE_TDL_ACK_BITS \
(GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \
GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0)
-#define GFX_MODE _MMIO(0x2520)
-
-#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
-#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
-#define CM0_IZ_OPT_DISABLE (1 << 6)
-#define CM0_ZR_OPT_DISABLE (1 << 5)
-#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
-#define CM0_DEPTH_EVICT_DISABLE (1 << 4)
-#define CM0_COLOR_EVICT_DISABLE (1 << 3)
-#define CM0_DEPTH_WRITE_DISABLE (1 << 1)
-#define CM0_RC_OP_FLUSH_DISABLE (1 << 0)
-#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
-#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
-#define GFX_FLSH_CNTL_EN (1 << 0)
-
-#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
-#define RC_OP_FLUSH_ENABLE (1 << 0)
-#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
-#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
-#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
-#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
-#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
+#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
+#define CM0_IZ_OPT_DISABLE (1 << 6)
+#define CM0_ZR_OPT_DISABLE (1 << 5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
+#define CM0_DEPTH_EVICT_DISABLE (1 << 4)
+#define CM0_COLOR_EVICT_DISABLE (1 << 3)
+#define CM0_DEPTH_WRITE_DISABLE (1 << 1)
+#define CM0_RC_OP_FLUSH_DISABLE (1 << 0)
-#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
-#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
-
-#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
-#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
-#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
-
-/* Fuse readout registers for GT */
-#define HSW_PAVP_FUSE1 _MMIO(0x911C)
-#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24)
-#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16)
-#define HSW_F1_EU_DIS_10EUS 0
-#define HSW_F1_EU_DIS_8EUS 1
-#define HSW_F1_EU_DIS_6EUS 2
-
-#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
-#define CHV_FGT_DISABLE_SS0 (1 << 10)
-#define CHV_FGT_DISABLE_SS1 (1 << 11)
-#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
-#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
-#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
-#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
-#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
-#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
-#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
-#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
-
-#define GEN8_FUSE2 _MMIO(0x9120)
-#define GEN8_F2_SS_DIS_SHIFT 21
-#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
-#define GEN8_F2_S_ENA_SHIFT 25
-#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
-
-#define GEN9_F2_SS_DIS_SHIFT 20
-#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
-
-#define GEN10_F2_S_ENA_SHIFT 22
-#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
-#define GEN10_F2_SS_DIS_SHIFT 18
-#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
-
-#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
-#define GEN10_L3BANK_PAIR_COUNT 4
-#define GEN10_L3BANK_MASK 0x0F
-/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
-#define GEN12_MAX_MSLICES 4
-#define GEN12_MEML3_EN_MASK 0x0F
-
-#define GEN8_EU_DISABLE0 _MMIO(0x9134)
-#define GEN8_EU_DIS0_S0_MASK 0xffffff
-#define GEN8_EU_DIS0_S1_SHIFT 24
-#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
-
-#define GEN8_EU_DISABLE1 _MMIO(0x9138)
-#define GEN8_EU_DIS1_S1_MASK 0xffff
-#define GEN8_EU_DIS1_S2_SHIFT 16
-#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
-
-#define GEN8_EU_DISABLE2 _MMIO(0x913c)
-#define GEN8_EU_DIS2_S2_MASK 0xff
-
-#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
-
-#define GEN10_EU_DISABLE3 _MMIO(0x9140)
-#define GEN10_EU_DIS_SS_MASK 0xff
-
-#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
-#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
-#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
-
-#define GEN11_EU_DISABLE _MMIO(0x9134)
-#define GEN11_EU_DIS_MASK 0xFF
-
-#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138)
-#define GEN11_GT_S_ENA_MASK 0xFF
-
-#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
-
-#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913C)
-#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
-
-#define XEHP_EU_ENABLE _MMIO(0x9134)
-#define XEHP_EU_ENA_MASK 0xFF
-
-#define CRSTANDVID _MMIO(0x11100)
-#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
-#define PXVFREQ_PX_MASK 0x7f000000
-#define PXVFREQ_PX_SHIFT 24
-#define VIDFREQ_BASE _MMIO(0x11110)
-#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */
-#define VIDFREQ2 _MMIO(0x11114)
-#define VIDFREQ3 _MMIO(0x11118)
-#define VIDFREQ4 _MMIO(0x1111c)
-#define VIDFREQ_P0_MASK 0x1f000000
-#define VIDFREQ_P0_SHIFT 24
-#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
-#define VIDFREQ_P0_CSCLK_SHIFT 20
-#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
-#define VIDFREQ_P0_CRCLK_SHIFT 16
-#define VIDFREQ_P1_MASK 0x00001f00
-#define VIDFREQ_P1_SHIFT 8
-#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
-#define VIDFREQ_P1_CSCLK_SHIFT 4
-#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
-#define INTTOEXT_BASE_ILK _MMIO(0x11300)
-#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */
-#define INTTOEXT_MAP3_SHIFT 24
-#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
-#define INTTOEXT_MAP2_SHIFT 16
-#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
-#define INTTOEXT_MAP1_SHIFT 8
-#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
-#define INTTOEXT_MAP0_SHIFT 0
-#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
-#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */
-#define MEMCTL_CMD_MASK 0xe000
-#define MEMCTL_CMD_SHIFT 13
-#define MEMCTL_CMD_RCLK_OFF 0
-#define MEMCTL_CMD_RCLK_ON 1
-#define MEMCTL_CMD_CHFREQ 2
-#define MEMCTL_CMD_CHVID 3
-#define MEMCTL_CMD_VMMOFF 4
-#define MEMCTL_CMD_VMMON 5
-#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears
- when command complete */
-#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
-#define MEMCTL_FREQ_SHIFT 8
-#define MEMCTL_SFCAVM (1 << 7)
-#define MEMCTL_TGT_VID_MASK 0x007f
-#define MEMIHYST _MMIO(0x1117c)
-#define MEMINTREN _MMIO(0x11180) /* 16 bits */
-#define MEMINT_RSEXIT_EN (1 << 8)
-#define MEMINT_CX_SUPR_EN (1 << 7)
-#define MEMINT_CONT_BUSY_EN (1 << 6)
-#define MEMINT_AVG_BUSY_EN (1 << 5)
-#define MEMINT_EVAL_CHG_EN (1 << 4)
-#define MEMINT_MON_IDLE_EN (1 << 3)
-#define MEMINT_UP_EVAL_EN (1 << 2)
-#define MEMINT_DOWN_EVAL_EN (1 << 1)
-#define MEMINT_SW_CMD_EN (1 << 0)
-#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
-#define MEM_RSEXIT_MASK 0xc000
-#define MEM_RSEXIT_SHIFT 14
-#define MEM_CONT_BUSY_MASK 0x3000
-#define MEM_CONT_BUSY_SHIFT 12
-#define MEM_AVG_BUSY_MASK 0x0c00
-#define MEM_AVG_BUSY_SHIFT 10
-#define MEM_EVAL_CHG_MASK 0x0300
-#define MEM_EVAL_BUSY_SHIFT 8
-#define MEM_MON_IDLE_MASK 0x00c0
-#define MEM_MON_IDLE_SHIFT 6
-#define MEM_UP_EVAL_MASK 0x0030
-#define MEM_UP_EVAL_SHIFT 4
-#define MEM_DOWN_EVAL_MASK 0x000c
-#define MEM_DOWN_EVAL_SHIFT 2
-#define MEM_SW_CMD_MASK 0x0003
-#define MEM_INT_STEER_GFX 0
-#define MEM_INT_STEER_CMR 1
-#define MEM_INT_STEER_SMI 2
-#define MEM_INT_STEER_SCI 3
-#define MEMINTRSTS _MMIO(0x11184)
-#define MEMINT_RSEXIT (1 << 7)
-#define MEMINT_CONT_BUSY (1 << 6)
-#define MEMINT_AVG_BUSY (1 << 5)
-#define MEMINT_EVAL_CHG (1 << 4)
-#define MEMINT_MON_IDLE (1 << 3)
-#define MEMINT_UP_EVAL (1 << 2)
-#define MEMINT_DOWN_EVAL (1 << 1)
-#define MEMINT_SW_CMD (1 << 0)
-#define MEMMODECTL _MMIO(0x11190)
-#define MEMMODE_BOOST_EN (1 << 31)
-#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
-#define MEMMODE_BOOST_FREQ_SHIFT 24
-#define MEMMODE_IDLE_MODE_MASK 0x00030000
-#define MEMMODE_IDLE_MODE_SHIFT 16
-#define MEMMODE_IDLE_MODE_EVAL 0
-#define MEMMODE_IDLE_MODE_CONT 1
-#define MEMMODE_HWIDLE_EN (1 << 15)
-#define MEMMODE_SWMODE_EN (1 << 14)
-#define MEMMODE_RCLK_GATE (1 << 13)
-#define MEMMODE_HW_UPDATE (1 << 12)
-#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
-#define MEMMODE_FSTART_SHIFT 8
-#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
-#define MEMMODE_FMAX_SHIFT 4
-#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
-#define RCBMAXAVG _MMIO(0x1119c)
-#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */
-#define SWMEMCMD_RENDER_OFF (0 << 13)
-#define SWMEMCMD_RENDER_ON (1 << 13)
-#define SWMEMCMD_SWFREQ (2 << 13)
-#define SWMEMCMD_TARVID (3 << 13)
-#define SWMEMCMD_VRM_OFF (4 << 13)
-#define SWMEMCMD_VRM_ON (5 << 13)
-#define CMDSTS (1 << 12)
-#define SFCAVM (1 << 11)
-#define SWFREQ_MASK 0x0380 /* P0-7 */
-#define SWFREQ_SHIFT 7
-#define TARVID_MASK 0x001f
-#define MEMSTAT_CTG _MMIO(0x111a0)
-#define RCBMINAVG _MMIO(0x111a0)
-#define RCUPEI _MMIO(0x111b0)
-#define RCDNEI _MMIO(0x111b4)
-#define RSTDBYCTL _MMIO(0x111b8)
-#define RS1EN (1 << 31)
-#define RS2EN (1 << 30)
-#define RS3EN (1 << 29)
-#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */
-#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */
-#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */
-#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */
-#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */
-#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */
-#define RSX_STATUS_MASK (7 << 20)
-#define RSX_STATUS_ON (0 << 20)
-#define RSX_STATUS_RC1 (1 << 20)
-#define RSX_STATUS_RC1E (2 << 20)
-#define RSX_STATUS_RS1 (3 << 20)
-#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */
-#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */
-#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */
-#define RSX_STATUS_RSVD2 (7 << 20)
-#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */
-#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */
-#define JRSC (1 << 17) /* rsx coupled to cpu c-state */
-#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */
-#define RS1CONTSAV_MASK (3 << 14)
-#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */
-#define RS1CONTSAV_RSVD (1 << 14)
-#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */
-#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */
-#define NORMSLEXLAT_MASK (3 << 12)
-#define SLOW_RS123 (0 << 12)
-#define SLOW_RS23 (1 << 12)
-#define SLOW_RS3 (2 << 12)
-#define NORMAL_RS123 (3 << 12)
-#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */
-#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
-#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
-#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */
-#define RS_CSTATE_MASK (3 << 4)
-#define RS_CSTATE_C367_RS1 (0 << 4)
-#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
-#define RS_CSTATE_RSVD (2 << 4)
-#define RS_CSTATE_C367_RS2 (3 << 4)
-#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */
-#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */
-#define VIDCTL _MMIO(0x111c0)
-#define VIDSTS _MMIO(0x111c8)
-#define VIDSTART _MMIO(0x111cc) /* 8 bits */
-#define MEMSTAT_ILK _MMIO(0x111f8)
-#define MEMSTAT_VID_MASK 0x7f00
-#define MEMSTAT_VID_SHIFT 8
-#define MEMSTAT_PSTATE_MASK 0x00f8
-#define MEMSTAT_PSTATE_SHIFT 3
-#define MEMSTAT_MON_ACTV (1 << 2)
-#define MEMSTAT_SRC_CTL_MASK 0x0003
-#define MEMSTAT_SRC_CTL_CORE 0
-#define MEMSTAT_SRC_CTL_TRB 1
-#define MEMSTAT_SRC_CTL_THM 2
-#define MEMSTAT_SRC_CTL_STDBY 3
-#define RCPREVBSYTUPAVG _MMIO(0x113b8)
-#define RCPREVBSYTDNAVG _MMIO(0x113bc)
-#define PMMISC _MMIO(0x11214)
-#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */
-#define SDEW _MMIO(0x1124c)
-#define CSIEW0 _MMIO(0x11250)
-#define CSIEW1 _MMIO(0x11254)
-#define CSIEW2 _MMIO(0x11258)
-#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */
-#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */
-#define MCHAFE _MMIO(0x112c0)
-#define CSIEC _MMIO(0x112e0)
-#define DMIEC _MMIO(0x112e4)
-#define DDREC _MMIO(0x112e8)
-#define PEG0EC _MMIO(0x112ec)
-#define PEG1EC _MMIO(0x112f0)
-#define GFXEC _MMIO(0x112f4)
-#define RPPREVBSYTUPAVG _MMIO(0x113b8)
-#define RPPREVBSYTDNAVG _MMIO(0x113bc)
-#define ECR _MMIO(0x11600)
-#define ECR_GPFE (1 << 31)
-#define ECR_IMONE (1 << 30)
-#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
-#define OGW0 _MMIO(0x11608)
-#define OGW1 _MMIO(0x1160c)
-#define EG0 _MMIO(0x11610)
-#define EG1 _MMIO(0x11614)
-#define EG2 _MMIO(0x11618)
-#define EG3 _MMIO(0x1161c)
-#define EG4 _MMIO(0x11620)
-#define EG5 _MMIO(0x11624)
-#define EG6 _MMIO(0x11628)
-#define EG7 _MMIO(0x1162c)
-#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */
-#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */
-#define LCFUSE02 _MMIO(0x116c0)
-#define LCFUSE_HIV_MASK 0x000000ff
-
-#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
-#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
/*
* Logical Context regs
@@ -798,427 +185,538 @@
* - GT1 size just indicates how much of render context
* doesn't need saving on GT1
*/
-#define CXT_SIZE _MMIO(0x21a0)
-#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
-#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
-#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
-#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f)
-#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f)
-#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
- GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
- GEN6_CXT_PIPELINE_SIZE(cxt_reg))
-#define GEN7_CXT_SIZE _MMIO(0x21a8)
-#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
-#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
-#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
-#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f)
-#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7)
-#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
-#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
- GEN7_CXT_VFSTATE_SIZE(ctx_reg))
-
-enum {
- INTEL_ADVANCED_CONTEXT = 0,
- INTEL_LEGACY_32B_CONTEXT,
- INTEL_ADVANCED_AD_CONTEXT,
- INTEL_LEGACY_64B_CONTEXT
-};
-
-enum {
- FAULT_AND_HANG = 0,
- FAULT_AND_HALT, /* Debug only */
- FAULT_AND_STREAM,
- FAULT_AND_CONTINUE /* Unsupported */
-};
-
-#define CTX_GTT_ADDRESS_MASK GENMASK(31, 12)
-#define GEN8_CTX_VALID (1 << 0)
-#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
-#define GEN8_CTX_FORCE_RESTORE (1 << 2)
-#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
-#define GEN8_CTX_PRIVILEGE (1 << 8)
-#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
-
-#define GEN8_CTX_ID_SHIFT 32
-#define GEN8_CTX_ID_WIDTH 21
-#define GEN11_SW_CTX_ID_SHIFT 37
-#define GEN11_SW_CTX_ID_WIDTH 11
-#define GEN11_ENGINE_CLASS_SHIFT 61
-#define GEN11_ENGINE_CLASS_WIDTH 3
-#define GEN11_ENGINE_INSTANCE_SHIFT 48
-#define GEN11_ENGINE_INSTANCE_WIDTH 6
-
-#define XEHP_SW_CTX_ID_SHIFT 39
-#define XEHP_SW_CTX_ID_WIDTH 16
-#define XEHP_SW_COUNTER_SHIFT 58
-#define XEHP_SW_COUNTER_WIDTH 6
-
-#define UNSLCGCTL9440 _MMIO(0x9440)
-#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
-#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
-#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
-#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
-
-#define UNSLCGCTL9444 _MMIO(0x9444)
-#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
-#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
-#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
-#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
-#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
-#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
-#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
-#define LTCDD_CLKGATE_DIS REG_BIT(10)
-
-#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
-#define SARBUNIT_CLKGATE_DIS (1 << 5)
-#define RCCUNIT_CLKGATE_DIS (1 << 7)
-#define MSCUNIT_CLKGATE_DIS (1 << 10)
-#define NODEDSS_CLKGATE_DIS REG_BIT(12)
-#define L3_CLKGATE_DIS REG_BIT(16)
-#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
-
-#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
-#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28)
-#define GWUNIT_CLKGATE_DIS REG_BIT(16)
-
-#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
-#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
-
-#define SSMCGCTL9530 _MMIO(0x9530)
-#define RTFUNIT_CLKGATE_DIS REG_BIT(18)
-
-#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
-#define VFUNIT_CLKGATE_DIS REG_BIT(20)
-#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
-#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
-#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
-#define HSUNIT_CLKGATE_DIS REG_BIT(8)
-#define VSUNIT_CLKGATE_DIS REG_BIT(3)
-
-#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
-#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
-#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
-
-#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
-#define CGPSF_CLKGATE_DIS (1 << 3)
-
-#define GEN11_GT_INTR_DW0 _MMIO(0x190018)
-#define GEN11_CSME (31)
-#define GEN11_GUNIT (28)
-#define GEN11_GUC (25)
-#define GEN11_WDPERF (20)
-#define GEN11_KCR (19)
-#define GEN11_GTPM (16)
-#define GEN11_BCS (15)
-#define GEN11_RCS0 (0)
-
-#define GEN11_GT_INTR_DW1 _MMIO(0x19001c)
-#define GEN11_VECS(x) (31 - (x))
-#define GEN11_VCS(x) (x)
-
-#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
-
-#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060)
-#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064)
-#define GEN11_INTR_DATA_VALID (1 << 31)
-#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
-#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
-#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
-/* irq instances for OTHER_CLASS */
-#define OTHER_GUC_INSTANCE 0
-#define OTHER_GTPM_INSTANCE 1
-#define OTHER_KCR_INSTANCE 4
-
-#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
-
-#define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070)
-#define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074)
-
-#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
-
-#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
-#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
-#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038)
-#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c)
-#define GEN11_CRYPTO_RSVD_INTR_ENABLE _MMIO(0x190040)
-#define GEN11_GUNIT_CSME_INTR_ENABLE _MMIO(0x190044)
-
-#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090)
-#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0)
-#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8)
-#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac)
-#define GEN12_VCS4_VCS5_INTR_MASK _MMIO(0x1900b0)
-#define GEN12_VCS6_VCS7_INTR_MASK _MMIO(0x1900b4)
-#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
-#define GEN12_VECS2_VECS3_INTR_MASK _MMIO(0x1900d4)
-#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8)
-#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
-#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0)
-#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
-
-#define ENGINE1_MASK REG_GENMASK(31, 16)
-#define ENGINE0_MASK REG_GENMASK(15, 0)
-
-#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
-#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14)
-
-#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
-#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
-#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
+#define CXT_SIZE _MMIO(0x21a0)
+#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
+ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+ GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE _MMIO(0x21a8)
+#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
+#define HSW_MI_PREDICATE_RESULT_2 _MMIO(0x2214)
+
+#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
+#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+
+#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
+#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
+
+#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
+#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
+
+#define HS_INVOCATION_COUNT _MMIO(0x2300)
+#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4)
+#define DS_INVOCATION_COUNT _MMIO(0x2308)
+#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4)
+#define IA_VERTICES_COUNT _MMIO(0x2310)
+#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4)
+#define IA_PRIMITIVES_COUNT _MMIO(0x2318)
+#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4)
+#define VS_INVOCATION_COUNT _MMIO(0x2320)
+#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4)
+#define GS_INVOCATION_COUNT _MMIO(0x2328)
+#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4)
+#define GS_PRIMITIVES_COUNT _MMIO(0x2330)
+#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4)
+#define CL_INVOCATION_COUNT _MMIO(0x2338)
+#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4)
+#define CL_PRIMITIVES_COUNT _MMIO(0x2340)
+#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4)
+#define PS_INVOCATION_COUNT _MMIO(0x2348)
+#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4)
+#define PS_DEPTH_COUNT _MMIO(0x2350)
+#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4)
+#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420)
+#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430)
+#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434)
+#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438)
+#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243c)
+#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440)
+#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500)
+#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
+#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
+
+#define GFX_MODE _MMIO(0x2520)
+
+#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
+#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
+#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
+#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
+#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
+#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
+
+#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
+
+#define RENDER_HWS_PGA_GEN7 _MMIO(0x4080)
+
+#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
+#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
-#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
-#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
-#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
-#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+#define GAM_ECOCHK _MMIO(0x4090)
+#define BDW_DISABLE_HDC_INVALIDATION (1 << 25)
+#define ECOCHK_SNB_BIT (1 << 10)
+#define ECOCHK_DIS_TLB (1 << 8)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6)
+#define ECOCHK_PPGTT_CACHE64B (0x3 << 3)
+#define ECOCHK_PPGTT_CACHE4B (0x0 << 3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3)
+#define ECOCHK_PPGTT_UC_HSW (0x1 << 3)
+#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
+#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
+
+#define GEN8_RING_FAULT_REG _MMIO(0x4094)
+#define _RING_FAULT_REG_RCS 0x4094
+#define _RING_FAULT_REG_VCS 0x4194
+#define _RING_FAULT_REG_BCS 0x4294
+#define _RING_FAULT_REG_VECS 0x4394
+#define RING_FAULT_REG(engine) _MMIO(_PICK((engine)->class, \
+ _RING_FAULT_REG_RCS, \
+ _RING_FAULT_REG_VCS, \
+ _RING_FAULT_REG_VECS, \
+ _RING_FAULT_REG_BCS))
+
+#define ERROR_GEN6 _MMIO(0x40a0)
+
+#define DONE_REG _MMIO(0x40b0)
+#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
+#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
+#define BSD_HWS_PGA_GEN7 _MMIO(0x4180)
+#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
+#define GEN12_VD0_AUX_NV _MMIO(0x4218)
+#define GEN12_VD1_AUX_NV _MMIO(0x4228)
+
+#define GEN8_RTCR _MMIO(0x4260)
+#define GEN8_M1TCR _MMIO(0x4264)
+#define GEN8_M2TCR _MMIO(0x4268)
+#define GEN8_BTCR _MMIO(0x426c)
+#define GEN8_VTCR _MMIO(0x4270)
+
+#define GEN12_VD2_AUX_NV _MMIO(0x4298)
+#define GEN12_VD3_AUX_NV _MMIO(0x42a8)
+#define GEN12_VE0_AUX_NV _MMIO(0x4238)
+
+#define BLT_HWS_PGA_GEN7 _MMIO(0x4280)
+
+#define GEN12_VE1_AUX_NV _MMIO(0x42b8)
+#define AUX_INV REG_BIT(0)
+#define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380)
+
+#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
+
+#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+
+#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
+
+#define GAMTARBMODE _MMIO(0x4a08)
+#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
+#define ARB_MODE_SWIZZLE_BDW (1 << 1)
+
+#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
+#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18)
-#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20EC)
-#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0)
+#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
+#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
-#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
-#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
-#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
-#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
-#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
-#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
-#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
+#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
+#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
-/* GEN7 chicken */
-#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
- #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10)
- #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
+#define GEN11_GACB_PERF_CTRL _MMIO(0x4b80)
+#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0)
+#define GEN11_HASH_CTRL_BIT0 (1 << 0)
+#define GEN11_HASH_CTRL_BIT4 (1 << 12)
-#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
- #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
- #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
- #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
- #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+/* gamt regs */
+#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
+#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
+#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
+#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
+#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
-#define GEN8_L3CNTLREG _MMIO(0x7034)
- #define GEN8_ERRDETBCTRL (1 << 9)
+#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
+#define MMCD_PCLA (1 << 31)
+#define MMCD_HOTSPOT_EN (1 << 27)
-#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
-#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
-#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12)
-#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
-#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
+/* There are the 4 64-bit counter registers, one for each stream output */
+#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8)
+#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4)
-#define HIZ_CHICKEN _MMIO(0x7018)
-# define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15)
-# define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14)
-# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3)
+#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
+#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
-#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
-#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
+#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
+#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
-#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
-#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
+#define VFLSKPD _MMIO(0x62a8)
+#define DIS_OVER_FETCH_CACHE REG_BIT(1)
+#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
-#define GEN7_SARCHKMD _MMIO(0xB000)
-#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
-#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
+#define FF_MODE2 _MMIO(0x6604)
+#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
+#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
+#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
+#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
-#define GEN7_L3SQCREG1 _MMIO(0xB010)
-#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
+#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c)
-#define GEN8_L3SQCREG1 _MMIO(0xB100)
-/*
- * Note that on CHV the following has an off-by-one error wrt. to BSpec.
- * Using the formula in BSpec leads to a hang, while the formula here works
- * fine and matches the formulas for all other platforms. A BSpec change
- * request has been filed to clarify this.
- */
-#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
-#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
-#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
+#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
+#define RC_OP_FLUSH_ENABLE (1 << 0)
+#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
+#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
-#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
-#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
-#define GEN7_L3AGDIS (1 << 19)
-#define GEN7_L3CNTLREG2 _MMIO(0xB020)
-#define GEN7_L3CNTLREG3 _MMIO(0xB024)
+#define GEN7_GT_MODE _MMIO(0x7008)
+#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
+#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
-#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xB030)
-#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
-#define GEN10_L3_CHICKEN_MODE_REGISTER _MMIO(0xB114)
-#define GEN11_I2M_WRITE_DISABLE (1 << 28)
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
+#define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10)
+#define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
-#define GEN7_L3SQCREG4 _MMIO(0xb034)
-#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
+#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+#define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
+#define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
+#define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
+#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
-#define GEN11_SCRATCH2 _MMIO(0xb140)
-#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19)
+#define HIZ_CHICKEN _MMIO(0x7018)
+#define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15)
+#define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14)
+#define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3)
-#define GEN8_L3SQCREG4 _MMIO(0xb118)
-#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
-#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
-#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
-#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
+#define GEN8_L3CNTLREG _MMIO(0x7034)
+#define GEN8_ERRDETBCTRL (1 << 9)
-#define GEN11_L3SQCREG5 _MMIO(0xb158)
-#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
-
-#define XEHP_L3SCQREG7 _MMIO(0xb188)
-#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+#define GEN7_SC_INSTDONE _MMIO(0x7100)
+#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
+#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
-#define ICL_HDC_MODE _MMIO(0xE5F4)
-#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15)
-#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14)
-#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11)
-#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5)
-#define HDC_FORCE_NON_COHERENT (1 << 4)
-#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
-
-#define GEN12_HDC_CHICKEN0 _MMIO(0xE5F0)
-#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
-
-#define SARB_CHICKEN1 _MMIO(0xe90c)
-#define COMP_CKN_IN REG_GENMASK(30, 29)
+#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15)
+#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5)
+#define HDC_FORCE_NON_COHERENT (1 << 4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
+#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
+#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
+#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12)
+#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
+#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
+
/* GEN9 chicken */
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
-#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
-#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
+#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
+#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
+
+#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
+
+#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+
+#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
+#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
+ ((slice) % 3) * 0x4)
+#define GEN9_PGCTL_SLICE_ACK (1 << 0)
+#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
+#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
+
+#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
+#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
+#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
+#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
+#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
+#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
+#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
+#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
+#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
+#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
+#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
+
+#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
+
+#define GEN12_SQCM _MMIO(0x8724)
+#define EN_32B_ACCESS REG_BIT(30)
+
+#define HSW_IDICR _MMIO(0x9008)
+#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
+
+#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
+#define GEN6_MBC_SNPCR_SHIFT 21
+#define GEN6_MBC_SNPCR_MASK (3 << 21)
+#define GEN6_MBC_SNPCR_MAX (0 << 21)
+#define GEN6_MBC_SNPCR_MED (1 << 21)
+#define GEN6_MBC_SNPCR_LOW (2 << 21)
+#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */
+
+#define VLV_G3DCTL _MMIO(0x9024)
+#define VLV_GSCKGCTL _MMIO(0x9028)
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
-#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11)
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11)
-#define HSW_SCRATCH1 _MMIO(0xb038)
-#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27)
-
-#define BDW_SCRATCH1 _MMIO(0xb11c)
-#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
-
-#define VFLSKPD _MMIO(0x62a8)
-#define DIS_OVER_FETCH_CACHE REG_BIT(1)
-#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
-
-#define FF_MODE2 _MMIO(0x6604)
-#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
-#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
-#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
-#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
-
-#define RC6_LOCATION _MMIO(0xD40)
-#define RC6_CTX_IN_DRAM (1 << 0)
-#define RC6_CTX_BASE _MMIO(0xD48)
-#define RC6_CTX_BASE_MASK 0xFFFFFFF0
-#define FORCEWAKE _MMIO(0xA18C)
-#define FORCEWAKE_VLV _MMIO(0x1300b0)
-#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
-#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8)
-#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc)
-#define FORCEWAKE_ACK_HSW _MMIO(0x130044)
-#define FORCEWAKE_ACK _MMIO(0x130090)
-#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090)
-#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
-#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
-#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
+#define FBC_LLC_READ_CTRL _MMIO(0x9044)
+#define FBC_LLC_FULLY_OPEN REG_BIT(30)
-#define VLV_GTLC_PW_STATUS _MMIO(0x130094)
-#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
-#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
-#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
-#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
-#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
-#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
-#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4)
-#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4)
-#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
-#define FORCEWAKE_GT_GEN9 _MMIO(0xa188)
-#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
-#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0x0D50 + (n) * 4)
-#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0x0D70 + (n) * 4)
-#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
-#define FORCEWAKE_ACK_GT_GEN9 _MMIO(0x130044)
-#define FORCEWAKE_KERNEL BIT(0)
-#define FORCEWAKE_USER BIT(1)
-#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
-#define FORCEWAKE_MT_ACK _MMIO(0x130040)
-#define ECOBUS _MMIO(0xa180)
-#define FORCEWAKE_MT_ENABLE (1 << 5)
-#define VLV_SPAREG2H _MMIO(0xA194)
-#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0)
-#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
-#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
+#define GEN6_MBCTL _MMIO(0x907c)
+#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
+#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
-#define GTFIFODBG _MMIO(0x120000)
-#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
-#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
-#define GT_FIFO_SBDROPERR (1 << 6)
-#define GT_FIFO_BLOBDROPERR (1 << 5)
-#define GT_FIFO_SB_READ_ABORTERR (1 << 4)
-#define GT_FIFO_DROPERR (1 << 3)
-#define GT_FIFO_OVFERR (1 << 2)
-#define GT_FIFO_IAWRERR (1 << 1)
-#define GT_FIFO_IARDERR (1 << 0)
-
-#define GTFIFOCTL _MMIO(0x120008)
-#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
-#define GT_FIFO_NUM_RESERVED_ENTRIES 20
-#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
-#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
-
-#define HSW_IDICR _MMIO(0x9008)
-#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
+/* Fuse readout registers for GT */
+#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT 4
+#define GEN10_L3BANK_MASK 0x0F
+/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
+#define GEN12_MAX_MSLICES 4
+#define GEN12_MEML3_EN_MASK 0x0F
+
+#define HSW_PAVP_FUSE1 _MMIO(0x911c)
+#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24)
+#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16)
+#define HSW_F1_EU_DIS_10EUS 0
+#define HSW_F1_EU_DIS_8EUS 1
+#define HSW_F1_EU_DIS_6EUS 2
+
+#define GEN8_FUSE2 _MMIO(0x9120)
+#define GEN8_F2_SS_DIS_SHIFT 21
+#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
+#define GEN8_F2_S_ENA_SHIFT 25
+#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
+#define GEN9_F2_SS_DIS_SHIFT 20
+#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
+#define GEN10_F2_S_ENA_SHIFT 22
+#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
+#define GEN10_F2_SS_DIS_SHIFT 18
+#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+
+#define GEN8_EU_DISABLE0 _MMIO(0x9134)
+#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
+#define GEN11_EU_DISABLE _MMIO(0x9134)
+#define GEN8_EU_DIS0_S0_MASK 0xffffff
+#define GEN8_EU_DIS0_S1_SHIFT 24
+#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
+#define GEN11_EU_DIS_MASK 0xFF
+#define XEHP_EU_ENABLE _MMIO(0x9134)
+#define XEHP_EU_ENA_MASK 0xFF
+
+#define GEN8_EU_DISABLE1 _MMIO(0x9138)
+#define GEN8_EU_DIS1_S1_MASK 0xffff
+#define GEN8_EU_DIS1_S2_SHIFT 16
+#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
+
+#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138)
+#define GEN11_GT_S_ENA_MASK 0xFF
+
+#define GEN8_EU_DISABLE2 _MMIO(0x913c)
+#define GEN8_EU_DIS2_S2_MASK 0xff
+
+#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913c)
+#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913c)
+
+#define GEN10_EU_DISABLE3 _MMIO(0x9140)
+#define GEN10_EU_DIS_SS_MASK 0xff
+#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
+#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
+#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
+#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
+
+#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
#define GEN6_UCGCTL1 _MMIO(0x9400)
-# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
-# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
-# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
-# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
+#define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
+#define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
+#define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+#define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 _MMIO(0x9404)
-# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
-# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
-# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
-# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
-# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
-# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+#define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
+#define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
+#define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
+#define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define GEN6_UCGCTL3 _MMIO(0x9408)
-# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
+#define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
#define GEN7_UCGCTL4 _MMIO(0x940c)
-#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25)
-#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
+
+#define GEN6_GDRST _MMIO(0x941c)
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+#define GEN6_GRDOM_VECS (1 << 4)
+#define GEN9_GRDOM_GUC (1 << 5)
+#define GEN8_GRDOM_MEDIA2 (1 << 7)
+/* GEN11 changed all bit defs except for FULL & RENDER */
+#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL
+#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER
+#define GEN11_GRDOM_BLT (1 << 2)
+#define GEN11_GRDOM_GUC (1 << 3)
+#define GEN11_GRDOM_MEDIA (1 << 5)
+#define GEN11_GRDOM_MEDIA2 (1 << 6)
+#define GEN11_GRDOM_MEDIA3 (1 << 7)
+#define GEN11_GRDOM_MEDIA4 (1 << 8)
+#define GEN11_GRDOM_MEDIA5 (1 << 9)
+#define GEN11_GRDOM_MEDIA6 (1 << 10)
+#define GEN11_GRDOM_MEDIA7 (1 << 11)
+#define GEN11_GRDOM_MEDIA8 (1 << 12)
+#define GEN11_GRDOM_VECS (1 << 13)
+#define GEN11_GRDOM_VECS2 (1 << 14)
+#define GEN11_GRDOM_VECS3 (1 << 15)
+#define GEN11_GRDOM_VECS4 (1 << 16)
+#define GEN11_GRDOM_SFC0 (1 << 17)
+#define GEN11_GRDOM_SFC1 (1 << 18)
+#define GEN11_GRDOM_SFC2 (1 << 19)
+#define GEN11_GRDOM_SFC3 (1 << 20)
+#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
+#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
+
#define GEN6_RSTCTL _MMIO(0x9420)
+#define GEN7_MISCCPCTL _MMIO(0x9424)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
+#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
+#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
+#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
+
#define GEN8_UCGCTL6 _MMIO(0x9430)
#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24)
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14)
-#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
+#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
#define UNSLCGCTL9430 _MMIO(0x9430)
#define MSQDUNIT_CLKGATE_DIS REG_BIT(3)
-#define GEN6_GFXPAUSE _MMIO(0xA000)
-#define GEN6_RPNSWREQ _MMIO(0xA008)
+#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
+#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
+#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
+#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
+#define HSUNIT_CLKGATE_DIS REG_BIT(8)
+#define VSUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLCGCTL9440 _MMIO(0x9440)
+#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
+#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
+#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
+#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
+
+#define UNSLCGCTL9444 _MMIO(0x9444)
+#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
+#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
+#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
+#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
+#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
+#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
+#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
+#define LTCDD_CLKGATE_DIS REG_BIT(10)
+
+#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
+#define SARBUNIT_CLKGATE_DIS (1 << 5)
+#define RCCUNIT_CLKGATE_DIS (1 << 7)
+#define MSCUNIT_CLKGATE_DIS (1 << 10)
+#define NODEDSS_CLKGATE_DIS REG_BIT(12)
+#define L3_CLKGATE_DIS REG_BIT(16)
+#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
+
+#define SCCGCTL94DC _MMIO(0x94dc)
+#define CG3DDISURB REG_BIT(14)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
+#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
+#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
+
+#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
+#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28)
+#define GWUNIT_CLKGATE_DIS REG_BIT(16)
+
+#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
+#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+
+#define SSMCGCTL9530 _MMIO(0x9530)
+#define RTFUNIT_CLKGATE_DIS REG_BIT(18)
+
+#define GEN10_DFR_RATIO_EN_AND_CHICKEN _MMIO(0x9550)
+#define DFR_DISABLE (1 << 9)
+
+#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
+#define CGPSF_CLKGATE_DIS (1 << 3)
+
+#define MICRO_BP0_0 _MMIO(0x9800)
+#define MICRO_BP0_2 _MMIO(0x9804)
+#define MICRO_BP0_1 _MMIO(0x9808)
+#define MICRO_BP1_0 _MMIO(0x980c)
+#define MICRO_BP1_2 _MMIO(0x9810)
+#define MICRO_BP1_1 _MMIO(0x9814)
+#define MICRO_BP2_0 _MMIO(0x9818)
+#define MICRO_BP2_2 _MMIO(0x981c)
+#define MICRO_BP2_1 _MMIO(0x9820)
+#define MICRO_BP3_0 _MMIO(0x9824)
+#define MICRO_BP3_2 _MMIO(0x9828)
+#define MICRO_BP3_1 _MMIO(0x982c)
+#define MICRO_BP_TRIGGER _MMIO(0x9830)
+#define MICRO_BP3_COUNT_STATUS01 _MMIO(0x9834)
+#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
+#define MICRO_BP_FIRED_ARMED _MMIO(0x983c)
+
+#define GEN6_GFXPAUSE _MMIO(0xa000)
+#define GEN6_RPNSWREQ _MMIO(0xa008)
#define GEN6_TURBO_DISABLE (1 << 31)
#define GEN6_FREQUENCY(x) ((x) << 25)
#define HSW_FREQUENCY(x) ((x) << 24)
@@ -1228,8 +726,7 @@ enum {
#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23
#define GEN9_IGNORE_SLICE_RATIO (0 << 0)
-#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
-#define GEN6_RC_CONTROL _MMIO(0xA090)
+#define GEN6_RC_VIDEO_FREQ _MMIO(0xa00c)
#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17)
#define GEN6_RC_CTL_RC6_ENABLE (1 << 18)
@@ -1239,16 +736,16 @@ enum {
#define GEN7_RC_CTL_TO_MODE (1 << 28)
#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27)
#define GEN6_RC_CTL_HW_ENABLE (1 << 31)
-#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010)
-#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014)
-#define GEN6_RPSTAT1 _MMIO(0xA01C)
+#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xa010)
+#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xa014)
+#define GEN6_RPSTAT1 _MMIO(0xa01c)
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
#define GEN9_CAGF_SHIFT 23
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
-#define GEN6_RP_CONTROL _MMIO(0xA024)
+#define GEN6_RP_CONTROL _MMIO(0xa024)
#define GEN6_RP_MEDIA_TURBO (1 << 11)
#define GEN6_RP_MEDIA_MODE_MASK (3 << 9)
#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9)
@@ -1265,193 +762,294 @@ enum {
#define GEN6_RPSWCTL_SHIFT 9
#define GEN9_RPSWCTL_ENABLE (0x2 << GEN6_RPSWCTL_SHIFT)
#define GEN9_RPSWCTL_DISABLE (0x0 << GEN6_RPSWCTL_SHIFT)
-#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
-#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
-#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
+#define GEN6_RP_UP_THRESHOLD _MMIO(0xa02c)
+#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xa030)
+#define GEN6_RP_CUR_UP_EI _MMIO(0xa050)
#define GEN6_RP_EI_MASK 0xffffff
#define GEN6_CURICONT_MASK GEN6_RP_EI_MASK
-#define GEN6_RP_CUR_UP _MMIO(0xA054)
+#define GEN6_RP_CUR_UP _MMIO(0xa054)
#define GEN6_CURBSYTAVG_MASK GEN6_RP_EI_MASK
-#define GEN6_RP_PREV_UP _MMIO(0xA058)
-#define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C)
+#define GEN6_RP_PREV_UP _MMIO(0xa058)
+#define GEN6_RP_CUR_DOWN_EI _MMIO(0xa05c)
#define GEN6_CURIAVG_MASK GEN6_RP_EI_MASK
-#define GEN6_RP_CUR_DOWN _MMIO(0xA060)
-#define GEN6_RP_PREV_DOWN _MMIO(0xA064)
-#define GEN6_RP_UP_EI _MMIO(0xA068)
-#define GEN6_RP_DOWN_EI _MMIO(0xA06C)
-#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xA070)
-#define GEN6_RPDEUHWTC _MMIO(0xA080)
-#define GEN6_RPDEUC _MMIO(0xA084)
-#define GEN6_RPDEUCSW _MMIO(0xA088)
-#define GEN6_RC_STATE _MMIO(0xA094)
+#define GEN6_RP_CUR_DOWN _MMIO(0xa060)
+#define GEN6_RP_PREV_DOWN _MMIO(0xa064)
+#define GEN6_RP_UP_EI _MMIO(0xa068)
+#define GEN6_RP_DOWN_EI _MMIO(0xa06c)
+#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xa070)
+#define GEN6_RPDEUHWTC _MMIO(0xa080)
+#define GEN6_RPDEUC _MMIO(0xa084)
+#define GEN6_RPDEUCSW _MMIO(0xa088)
+#define GEN6_RC_CONTROL _MMIO(0xa090)
+#define GEN6_RC_STATE _MMIO(0xa094)
#define RC_SW_TARGET_STATE_SHIFT 16
#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT)
-#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
-#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
-#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
-#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xA0A0)
-#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8)
-#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC)
-#define GEN6_RC_SLEEP _MMIO(0xA0B0)
-#define GEN6_RCUBMABDTMR _MMIO(0xA0B0)
-#define GEN6_RC1e_THRESHOLD _MMIO(0xA0B4)
-#define GEN6_RC6_THRESHOLD _MMIO(0xA0B8)
-#define GEN6_RC6p_THRESHOLD _MMIO(0xA0BC)
-#define VLV_RCEDATA _MMIO(0xA0BC)
-#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
-#define GEN6_PMINTRMSK _MMIO(0xA168)
+#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xa098)
+#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xa09c)
+#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xa0a0)
+#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xa0a0)
+#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xa0a8)
+#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xa0ac)
+#define GEN6_RC_SLEEP _MMIO(0xa0b0)
+#define GEN6_RCUBMABDTMR _MMIO(0xa0b0)
+#define GEN6_RC1e_THRESHOLD _MMIO(0xa0b4)
+#define GEN6_RC6_THRESHOLD _MMIO(0xa0b8)
+#define GEN6_RC6p_THRESHOLD _MMIO(0xa0bc)
+#define VLV_RCEDATA _MMIO(0xa0bc)
+#define GEN6_RC6pp_THRESHOLD _MMIO(0xa0c0)
+#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xa0c4)
+#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xa0c8)
+
+#define GEN6_PMINTRMSK _MMIO(0xa168)
#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1 << 31)
#define ARAT_EXPIRED_INTRMSK (1 << 9)
-#define GEN8_MISC_CTRL0 _MMIO(0xA180)
-#define VLV_PWRDWNUPCTL _MMIO(0xA294)
-#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
-#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
-#define GEN9_PG_ENABLE _MMIO(0xA210)
+
+#define GEN8_MISC_CTRL0 _MMIO(0xa180)
+
+#define ECOBUS _MMIO(0xa180)
+#define FORCEWAKE_MT_ENABLE (1 << 5)
+
+#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
+#define FORCEWAKE_GT_GEN9 _MMIO(0xa188)
+#define FORCEWAKE _MMIO(0xa18c)
+
+#define VLV_SPAREG2H _MMIO(0xa194)
+
+#define GEN9_PG_ENABLE _MMIO(0xa210)
#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n))
#define VDN_MFX_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n))
-#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
-#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
-#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
-#define GEN6_PMISR _MMIO(0x44020)
-#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
-#define GEN6_PMIIR _MMIO(0x44028)
-#define GEN6_PMIER _MMIO(0x4402C)
-#define GEN6_PM_MBOX_EVENT (1 << 25)
-#define GEN6_PM_THERMAL_EVENT (1 << 24)
+#define GEN8_PUSHBUS_CONTROL _MMIO(0xa248)
+#define GEN8_PUSHBUS_ENABLE _MMIO(0xa250)
+#define GEN8_PUSHBUS_SHIFT _MMIO(0xa25c)
-/*
- * For Gen11 these are in the upper word of the GPM_WGBOXPERF
- * registers. Shifting is handled on accessing the imr and ier.
- */
-#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
-#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
-#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
-#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2)
-#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1)
-#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \
- GEN6_PM_RP_UP_THRESHOLD | \
- GEN6_PM_RP_DOWN_EI_EXPIRED | \
- GEN6_PM_RP_DOWN_THRESHOLD | \
- GEN6_PM_RP_DOWN_TIMEOUT)
+/* GPM unit config (Gen9+) */
+#define CTC_MODE _MMIO(0xa26c)
+#define CTC_SOURCE_PARAMETER_MASK 1
+#define CTC_SOURCE_CRYSTAL_CLOCK 0
+#define CTC_SOURCE_DIVIDE_LOGIC 1
+#define CTC_SHIFT_PARAMETER_SHIFT 1
+#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
-#define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4)
-#define GEN7_GT_SCRATCH_REG_NUM 8
+#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
+#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
-#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
-#define VLV_GFX_CLK_STATUS_BIT (1 << 3)
-#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2)
+#define VLV_PWRDWNUPCTL _MMIO(0xa294)
-#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
-#define VLV_COUNTER_CONTROL _MMIO(0x138104)
-#define VLV_COUNT_RANGE_HIGH (1 << 15)
-#define VLV_MEDIA_RC0_COUNT_EN (1 << 5)
-#define VLV_RENDER_RC0_COUNT_EN (1 << 4)
-#define VLV_MEDIA_RC6_COUNT_EN (1 << 1)
-#define VLV_RENDER_RC6_COUNT_EN (1 << 0)
-#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
-#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
-#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C)
+#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xa2a0)
+#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
+#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
-#define GEN6_GT_GFX_RC6p _MMIO(0x13810C)
-#define GEN6_GT_GFX_RC6pp _MMIO(0x138110)
-#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
-#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C)
-
-#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
-#define GEN6_CORE_CPD_STATE_MASK (7 << 4)
-#define GEN6_RCn_MASK 7
-#define GEN6_RC0 0
-#define GEN6_RC3 2
-#define GEN6_RC6 3
-#define GEN6_RC7 4
-
-#define GEN8_GT_SLICE_INFO _MMIO(0x138064)
-#define GEN8_LSLICESTAT_MASK 0x7
-
-#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
-#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
-#define CHV_SS_PG_ENABLE (1 << 1)
-#define CHV_EU08_PG_ENABLE (1 << 9)
-#define CHV_EU19_PG_ENABLE (1 << 17)
-#define CHV_EU210_PG_ENABLE (1 << 25)
-
-#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
-#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
-#define CHV_EU311_PG_ENABLE (1 << 1)
-
-#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
-#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
- ((slice) % 3) * 0x4)
-#define GEN9_PGCTL_SLICE_ACK (1 << 0)
-#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
-#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
-
-#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
-#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
- ((slice) % 3) * 0x8)
-#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
-#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
- ((slice) % 3) * 0x8)
-#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
-#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
-#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
-#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
-#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
-#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
-#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
-#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
+#define MISC_STATUS0 _MMIO(0xa500)
+#define MISC_STATUS1 _MMIO(0xa504)
-#define GEN7_MISCCPCTL _MMIO(0x9424)
-#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
-#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
-#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
-#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
+#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4)
+#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4)
+
+#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
+#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
+#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
+#define CHV_SS_PG_ENABLE (1 << 1)
+#define CHV_EU08_PG_ENABLE (1 << 9)
+#define CHV_EU19_PG_ENABLE (1 << 17)
+#define CHV_EU210_PG_ENABLE (1 << 25)
+#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
+#define CHV_EU311_PG_ENABLE (1 << 1)
-#define GEN8_GARBCNTL _MMIO(0xB004)
+#define GEN7_SARCHKMD _MMIO(0xb000)
+#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
+#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
+
+#define GEN8_GARBCNTL _MMIO(0xb004)
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7)
#define GEN11_ARBITRATION_PRIO_ORDER_MASK (0x3f << 22)
#define GEN11_HASH_CTRL_EXCL_MASK (0x7f << 0)
#define GEN11_HASH_CTRL_EXCL_BIT0 (1 << 0)
-#define GEN11_GLBLINVL _MMIO(0xB404)
-#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
-#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
+#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
+#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
-#define GEN10_DFR_RATIO_EN_AND_CHICKEN _MMIO(0x9550)
-#define DFR_DISABLE (1 << 9)
+#define GEN7_L3SQCREG1 _MMIO(0xb010)
+#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
-#define GEN11_GACB_PERF_CTRL _MMIO(0x4B80)
-#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0)
-#define GEN11_HASH_CTRL_BIT0 (1 << 0)
-#define GEN11_HASH_CTRL_BIT4 (1 << 12)
+#define GEN7_L3CNTLREG1 _MMIO(0xb01c)
+#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
+#define GEN7_L3AGDIS (1 << 19)
+#define GEN7_L3CNTLREG2 _MMIO(0xb020)
+
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
+#define GEN9_LNCFCMOCS_REG_COUNT 32
+
+#define GEN7_L3CNTLREG3 _MMIO(0xb024)
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xb030)
+#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+
+#define GEN7_L3SQCREG4 _MMIO(0xb034)
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
+
+#define HSW_SCRATCH1 _MMIO(0xb038)
+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27)
+
+#define GEN7_L3LOG(slice, i) _MMIO(0xb070 + (slice) * 0x200 + (i) * 4)
+#define GEN7_L3LOG_SIZE 0x80
-#define GEN11_LSN_UNSLCVC _MMIO(0xB43C)
+#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
+#define PMFLUSHDONE_LNICRSDROP (1 << 20)
+#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
+#define PMFLUSHDONE_LNEBLK (1 << 22)
+
+#define XEHP_L3NODEARBCFG _MMIO(0xb0b4)
+#define XEHP_LNESPARE REG_BIT(19)
+
+#define GEN8_L3SQCREG1 _MMIO(0xb100)
+/*
+ * Note that on CHV the following has an off-by-one error wrt. to BSpec.
+ * Using the formula in BSpec leads to a hang, while the formula here works
+ * fine and matches the formulas for all other platforms. A BSpec change
+ * request has been filed to clarify this.
+ */
+#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
+#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
+#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
+
+#define GEN10_L3_CHICKEN_MODE_REGISTER _MMIO(0xb114)
+#define GEN11_I2M_WRITE_DISABLE (1 << 28)
+
+#define GEN8_L3SQCREG4 _MMIO(0xb118)
+#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
+#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
+#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
+#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
+
+#define GEN9_SCRATCH1 _MMIO(0xb11c)
+#define EVICTION_PERF_FIX_ENABLE REG_BIT(8)
+
+#define BDW_SCRATCH1 _MMIO(0xb11c)
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
+
+#define GEN11_SCRATCH2 _MMIO(0xb140)
+#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19)
+
+#define GEN11_L3SQCREG5 _MMIO(0xb158)
+#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
+
+#define MLTICTXCTL _MMIO(0xb170)
+#define TDONRENDER REG_BIT(2)
+
+#define XEHP_L3SCQREG7 _MMIO(0xb188)
+#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+
+#define L3SQCREG1_CCS0 _MMIO(0xb200)
+#define FLUSHALLNONCOH REG_BIT(5)
+
+#define GEN11_GLBLINVL _MMIO(0xb404)
+#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
+#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
+
+#define GEN11_LSN_UNSLCVC _MMIO(0xb43c)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
-#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
-#define ENABLE_SMALLPL REG_BIT(15)
-#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
+#define __GEN9_RCS0_MOCS0 0xc800
+#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS0_MOCS0 0xc900
+#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS1_MOCS0 0xca00
+#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4)
+#define __GEN9_VECS0_MOCS0 0xcb00
+#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4)
+#define __GEN9_BCS0_MOCS0 0xcc00
+#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4)
+
+#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8)
+#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
+#define FAULT_VA_HIGH_BITS (0xf << 0)
+#define FAULT_GTT_SEL (1 << 4)
+
+#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
+#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
+#define RING_FAULT_GTTSEL_MASK (1 << 11)
+#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
+#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
+#define RING_FAULT_VALID (1 << 0)
+
+#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
+#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
+#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
+#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
+
+#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28)
+#define RENDER_MOD_CTRL _MMIO(0xcf2c)
+#define COMP_MOD_CTRL _MMIO(0xcf30)
+#define VDBX_MOD_CTRL _MMIO(0xcf34)
+#define VEBX_MOD_CTRL _MMIO(0xcf38)
+#define FORCE_MISS_FTLB REG_BIT(3)
+
+#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c)
+#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
+#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
+#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
+
+#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54)
+#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
+#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
-#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
-#define GEN7_L3LOG_SIZE 0x80
+#define GEN12_GAM_DONE _MMIO(0xcf68)
-#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
-#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
+#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
#define GEN7_MAX_PS_THREAD_DEP (8 << 12)
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10)
#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1 << 4)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3)
-#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
-#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5)
-#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
+#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
+#define GEN7_ROW_INSTDONE _MMIO(0xe164)
+
+#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
+#define GEN8_ST_PO_DISABLE (1 << 13)
+
+#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
+#define HSW_SAMPLE_C_PERFORMANCE (1 << 9)
+#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8)
+#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5)
+#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
+
+#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
+#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5)
+#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
+
+#define GEN10_SAMPLER_MODE _MMIO(0xe18c)
+#define ENABLE_SMALLPL REG_BIT(15)
+#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
+
+#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
+#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8)
+#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2)
+
+#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
+#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
+#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
+
+#define EU_PERF_CNTL0 _MMIO(0xe458)
+#define EU_PERF_CNTL4 _MMIO(0xe45c)
+
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
+
+#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
+#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
-#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
+#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
#define FLOW_CONTROL_ENABLE REG_BIT(15)
#define UGM_BACKUP_MODE REG_BIT(13)
#define MDQ_ARBITRATION_MODE REG_BIT(12)
@@ -1466,6 +1064,20 @@ enum {
#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12)
#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define RT_CTRL _MMIO(0xe530)
+#define DIS_NULL_QUERY REG_BIT(10)
+
+#define EU_PERF_CNTL1 _MMIO(0xe558)
+#define EU_PERF_CNTL5 _MMIO(0xe55c)
+
+#define GEN12_HDC_CHICKEN0 _MMIO(0xe5f0)
+#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
+#define ICL_HDC_MODE _MMIO(0xe5f4)
+
+#define EU_PERF_CNTL2 _MMIO(0xe658)
+#define EU_PERF_CNTL6 _MMIO(0xe65c)
+#define EU_PERF_CNTL3 _MMIO(0xe758)
+
#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8)
#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4)
@@ -1475,80 +1087,436 @@ enum {
#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32)
#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32)
-#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
-#define DOP_CLOCK_GATING_DISABLE (1 << 0)
-#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
-#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
+#define SARB_CHICKEN1 _MMIO(0xe90c)
+#define COMP_CKN_IN REG_GENMASK(30, 29)
-#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
-#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
-#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
-#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
-#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
+#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
-#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
-#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
+#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
+#define DOP_CLOCK_GATING_DISABLE (1 << 0)
+#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
+#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
-#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
-#define GEN8_ST_PO_DISABLE (1 << 13)
+#define __GEN11_VCS2_MOCS0 0x10000
+#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
+
+#define CRSTANDVID _MMIO(0x11100)
+#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ_PX_MASK 0x7f000000
+#define PXVFREQ_PX_SHIFT 24
+#define VIDFREQ_BASE _MMIO(0x11110)
+#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2 _MMIO(0x11114)
+#define VIDFREQ3 _MMIO(0x11118)
+#define VIDFREQ4 _MMIO(0x1111c)
+#define VIDFREQ_P0_MASK 0x1f000000
+#define VIDFREQ_P0_SHIFT 24
+#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
+#define VIDFREQ_P0_CSCLK_SHIFT 20
+#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
+#define VIDFREQ_P0_CRCLK_SHIFT 16
+#define VIDFREQ_P1_MASK 0x00001f00
+#define VIDFREQ_P1_SHIFT 8
+#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
+#define VIDFREQ_P1_CSCLK_SHIFT 4
+#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
+#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */
+#define INTTOEXT_MAP3_SHIFT 24
+#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
+#define INTTOEXT_MAP2_SHIFT 16
+#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
+#define INTTOEXT_MAP1_SHIFT 8
+#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
+#define INTTOEXT_MAP0_SHIFT 0
+#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */
+#define MEMCTL_CMD_MASK 0xe000
+#define MEMCTL_CMD_SHIFT 13
+#define MEMCTL_CMD_RCLK_OFF 0
+#define MEMCTL_CMD_RCLK_ON 1
+#define MEMCTL_CMD_CHFREQ 2
+#define MEMCTL_CMD_CHVID 3
+#define MEMCTL_CMD_VMMOFF 4
+#define MEMCTL_CMD_VMMON 5
+#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears
+ when command complete */
+#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
+#define MEMCTL_FREQ_SHIFT 8
+#define MEMCTL_SFCAVM (1 << 7)
+#define MEMCTL_TGT_VID_MASK 0x007f
+#define MEMIHYST _MMIO(0x1117c)
+#define MEMINTREN _MMIO(0x11180) /* 16 bits */
+#define MEMINT_RSEXIT_EN (1 << 8)
+#define MEMINT_CX_SUPR_EN (1 << 7)
+#define MEMINT_CONT_BUSY_EN (1 << 6)
+#define MEMINT_AVG_BUSY_EN (1 << 5)
+#define MEMINT_EVAL_CHG_EN (1 << 4)
+#define MEMINT_MON_IDLE_EN (1 << 3)
+#define MEMINT_UP_EVAL_EN (1 << 2)
+#define MEMINT_DOWN_EVAL_EN (1 << 1)
+#define MEMINT_SW_CMD_EN (1 << 0)
+#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
+#define MEM_RSEXIT_MASK 0xc000
+#define MEM_RSEXIT_SHIFT 14
+#define MEM_CONT_BUSY_MASK 0x3000
+#define MEM_CONT_BUSY_SHIFT 12
+#define MEM_AVG_BUSY_MASK 0x0c00
+#define MEM_AVG_BUSY_SHIFT 10
+#define MEM_EVAL_CHG_MASK 0x0300
+#define MEM_EVAL_BUSY_SHIFT 8
+#define MEM_MON_IDLE_MASK 0x00c0
+#define MEM_MON_IDLE_SHIFT 6
+#define MEM_UP_EVAL_MASK 0x0030
+#define MEM_UP_EVAL_SHIFT 4
+#define MEM_DOWN_EVAL_MASK 0x000c
+#define MEM_DOWN_EVAL_SHIFT 2
+#define MEM_SW_CMD_MASK 0x0003
+#define MEM_INT_STEER_GFX 0
+#define MEM_INT_STEER_CMR 1
+#define MEM_INT_STEER_SMI 2
+#define MEM_INT_STEER_SCI 3
+#define MEMINTRSTS _MMIO(0x11184)
+#define MEMINT_RSEXIT (1 << 7)
+#define MEMINT_CONT_BUSY (1 << 6)
+#define MEMINT_AVG_BUSY (1 << 5)
+#define MEMINT_EVAL_CHG (1 << 4)
+#define MEMINT_MON_IDLE (1 << 3)
+#define MEMINT_UP_EVAL (1 << 2)
+#define MEMINT_DOWN_EVAL (1 << 1)
+#define MEMINT_SW_CMD (1 << 0)
+#define MEMMODECTL _MMIO(0x11190)
+#define MEMMODE_BOOST_EN (1 << 31)
+#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define MEMMODE_BOOST_FREQ_SHIFT 24
+#define MEMMODE_IDLE_MODE_MASK 0x00030000
+#define MEMMODE_IDLE_MODE_SHIFT 16
+#define MEMMODE_IDLE_MODE_EVAL 0
+#define MEMMODE_IDLE_MODE_CONT 1
+#define MEMMODE_HWIDLE_EN (1 << 15)
+#define MEMMODE_SWMODE_EN (1 << 14)
+#define MEMMODE_RCLK_GATE (1 << 13)
+#define MEMMODE_HW_UPDATE (1 << 12)
+#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
+#define MEMMODE_FSTART_SHIFT 8
+#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
+#define MEMMODE_FMAX_SHIFT 4
+#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG _MMIO(0x1119c)
+#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */
+#define SWMEMCMD_RENDER_OFF (0 << 13)
+#define SWMEMCMD_RENDER_ON (1 << 13)
+#define SWMEMCMD_SWFREQ (2 << 13)
+#define SWMEMCMD_TARVID (3 << 13)
+#define SWMEMCMD_VRM_OFF (4 << 13)
+#define SWMEMCMD_VRM_ON (5 << 13)
+#define CMDSTS (1 << 12)
+#define SFCAVM (1 << 11)
+#define SWFREQ_MASK 0x0380 /* P0-7 */
+#define SWFREQ_SHIFT 7
+#define TARVID_MASK 0x001f
+#define MEMSTAT_CTG _MMIO(0x111a0)
+#define RCBMINAVG _MMIO(0x111a0)
+#define RCUPEI _MMIO(0x111b0)
+#define RCDNEI _MMIO(0x111b4)
+#define RSTDBYCTL _MMIO(0x111b8)
+#define RS1EN (1 << 31)
+#define RS2EN (1 << 30)
+#define RS3EN (1 << 29)
+#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */
+#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7 << 20)
+#define RSX_STATUS_ON (0 << 20)
+#define RSX_STATUS_RC1 (1 << 20)
+#define RSX_STATUS_RC1E (2 << 20)
+#define RSX_STATUS_RS1 (3 << 20)
+#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7 << 20)
+#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */
+#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1 << 17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3 << 14)
+#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1 << 14)
+#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3 << 12)
+#define SLOW_RS123 (0 << 12)
+#define SLOW_RS23 (1 << 12)
+#define SLOW_RS3 (2 << 12)
+#define NORMAL_RS123 (3 << 12)
+#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */
+#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3 << 4)
+#define RS_CSTATE_C367_RS1 (0 << 4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
+#define RS_CSTATE_RSVD (2 << 4)
+#define RS_CSTATE_C367_RS2 (3 << 4)
+#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */
+#define VIDCTL _MMIO(0x111c0)
+#define VIDSTS _MMIO(0x111c8)
+#define VIDSTART _MMIO(0x111cc) /* 8 bits */
+#define MEMSTAT_ILK _MMIO(0x111f8)
+#define MEMSTAT_VID_MASK 0x7f00
+#define MEMSTAT_VID_SHIFT 8
+#define MEMSTAT_PSTATE_MASK 0x00f8
+#define MEMSTAT_PSTATE_SHIFT 3
+#define MEMSTAT_MON_ACTV (1 << 2)
+#define MEMSTAT_SRC_CTL_MASK 0x0003
+#define MEMSTAT_SRC_CTL_CORE 0
+#define MEMSTAT_SRC_CTL_TRB 1
+#define MEMSTAT_SRC_CTL_THM 2
+#define MEMSTAT_SRC_CTL_STDBY 3
+#define PMMISC _MMIO(0x11214)
+#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */
+#define SDEW _MMIO(0x1124c)
+#define CSIEW0 _MMIO(0x11250)
+#define CSIEW1 _MMIO(0x11254)
+#define CSIEW2 _MMIO(0x11258)
+#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */
+#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */
+#define MCHAFE _MMIO(0x112c0)
+#define CSIEC _MMIO(0x112e0)
+#define DMIEC _MMIO(0x112e4)
+#define DDREC _MMIO(0x112e8)
+#define PEG0EC _MMIO(0x112ec)
+#define PEG1EC _MMIO(0x112f0)
+#define GFXEC _MMIO(0x112f4)
+#define INTTOEXT_BASE_ILK _MMIO(0x11300)
+#define RPPREVBSYTUPAVG _MMIO(0x113b8)
+#define RCPREVBSYTUPAVG _MMIO(0x113b8)
+#define RCPREVBSYTDNAVG _MMIO(0x113bc)
+#define RPPREVBSYTDNAVG _MMIO(0x113bc)
+#define ECR _MMIO(0x11600)
+#define ECR_GPFE (1 << 31)
+#define ECR_IMONE (1 << 30)
+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
+#define OGW0 _MMIO(0x11608)
+#define OGW1 _MMIO(0x1160c)
+#define EG0 _MMIO(0x11610)
+#define EG1 _MMIO(0x11614)
+#define EG2 _MMIO(0x11618)
+#define EG3 _MMIO(0x1161c)
+#define EG4 _MMIO(0x11620)
+#define EG5 _MMIO(0x11624)
+#define EG6 _MMIO(0x11628)
+#define EG7 _MMIO(0x1162c)
+#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */
+#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */
+#define LCFUSE02 _MMIO(0x116c0)
+#define LCFUSE_HIV_MASK 0x000000ff
+
+#define GAC_ECO_BITS _MMIO(0x14090)
+#define ECOBITS_SNB_BIT (1 << 13)
+#define ECOBITS_PPGTT_CACHE64B (3 << 8)
+#define ECOBITS_PPGTT_CACHE4B (0 << 8)
+
+#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_DISABLE_SS0 (1 << 10)
+#define CHV_FGT_DISABLE_SS1 (1 << 11)
+#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
+#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
+#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
+#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
+#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+
+#define BCS_SWCTRL _MMIO(0x22200)
+#define BCS_SRC_Y REG_BIT(0)
+#define BCS_DST_Y REG_BIT(1)
+
+#define GAB_CTL _MMIO(0x24000)
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
-#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
-#define HSW_SAMPLE_C_PERFORMANCE (1 << 9)
-#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8)
-#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5)
-#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
+#define GEN6_PMISR _MMIO(0x44020)
+#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
+#define GEN6_PMIIR _MMIO(0x44028)
+#define GEN6_PMIER _MMIO(0x4402c)
+#define GEN6_PM_MBOX_EVENT (1 << 25)
+#define GEN6_PM_THERMAL_EVENT (1 << 24)
+/*
+ * For Gen11 these are in the upper word of the GPM_WGBOXPERF
+ * registers. Shifting is handled on accessing the imr and ier.
+ */
+#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
+#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1)
+#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \
+ GEN6_PM_RP_UP_THRESHOLD | \
+ GEN6_PM_RP_DOWN_EI_EXPIRED | \
+ GEN6_PM_RP_DOWN_THRESHOLD | \
+ GEN6_PM_RP_DOWN_TIMEOUT)
-#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
-#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15)
-#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8)
-#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4)
-#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2)
+#define GEN7_GT_SCRATCH(i) _MMIO(0x4f100 + (i) * 4)
+#define GEN7_GT_SCRATCH_REG_NUM 8
+
+#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
+#define GFX_FLSH_CNTL_EN (1 << 0)
+
+#define GTFIFODBG _MMIO(0x120000)
+#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
+#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
+#define GT_FIFO_SBDROPERR (1 << 6)
+#define GT_FIFO_BLOBDROPERR (1 << 5)
+#define GT_FIFO_SB_READ_ABORTERR (1 << 4)
+#define GT_FIFO_DROPERR (1 << 3)
+#define GT_FIFO_OVFERR (1 << 2)
+#define GT_FIFO_IAWRERR (1 << 1)
+#define GT_FIFO_IARDERR (1 << 0)
+
+#define GTFIFOCTL _MMIO(0x120008)
+#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
+#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
+#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
+
+#define FORCEWAKE_MT_ACK _MMIO(0x130040)
+#define FORCEWAKE_ACK_HSW _MMIO(0x130044)
+#define FORCEWAKE_ACK_GT_GEN9 _MMIO(0x130044)
+#define FORCEWAKE_KERNEL BIT(0)
+#define FORCEWAKE_USER BIT(1)
+#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
+#define FORCEWAKE_ACK _MMIO(0x130090)
+#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090)
+#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
+#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
+#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
+#define VLV_GTLC_PW_STATUS _MMIO(0x130094)
+#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
+#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
+#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
+#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
+#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
+#define VLV_GFX_CLK_STATUS_BIT (1 << 3)
+#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2)
+#define FORCEWAKE_VLV _MMIO(0x1300b0)
+#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
+#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8)
+#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc)
+
+#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+
+#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
+#define GEN6_CORE_CPD_STATE_MASK (7 << 4)
+#define GEN6_RCn_MASK 7
+#define GEN6_RC0 0
+#define GEN6_RC3 2
+#define GEN6_RC6 3
+#define GEN6_RC7 4
+
+#define GEN8_GT_SLICE_INFO _MMIO(0x138064)
+#define GEN8_LSLICESTAT_MASK 0x7
-/* MOCS (Memory Object Control State) registers */
-#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
-#define GEN9_LNCFCMOCS_REG_COUNT 32
-
-#define __GEN9_RCS0_MOCS0 0xc800
-#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
-#define __GEN9_VCS0_MOCS0 0xc900
-#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4)
-#define __GEN9_VCS1_MOCS0 0xca00
-#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4)
-#define __GEN9_VECS0_MOCS0 0xcb00
-#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4)
-#define __GEN9_BCS0_MOCS0 0xcc00
-#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4)
-#define __GEN11_VCS2_MOCS0 0x10000
-#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
-
-#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
-#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
-
-#define GEN9_SCRATCH1 _MMIO(0xb11c)
-#define EVICTION_PERF_FIX_ENABLE REG_BIT(8)
-
-#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
-#define PMFLUSHDONE_LNICRSDROP (1 << 20)
-#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
-#define PMFLUSHDONE_LNEBLK (1 << 22)
-
-#define XEHP_L3NODEARBCFG _MMIO(0xb0b4)
-#define XEHP_LNESPARE REG_BIT(19)
-
-#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
+#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
+#define VLV_COUNTER_CONTROL _MMIO(0x138104)
+#define VLV_COUNT_RANGE_HIGH (1 << 15)
+#define VLV_MEDIA_RC0_COUNT_EN (1 << 5)
+#define VLV_RENDER_RC0_COUNT_EN (1 << 4)
+#define VLV_MEDIA_RC6_COUNT_EN (1 << 1)
+#define VLV_RENDER_RC6_COUNT_EN (1 << 0)
+#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
+#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
+#define VLV_GT_MEDIA_RC6 _MMIO(0x13810c)
-/* gamt regs */
-#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
-#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
-#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
-#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
-#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
+#define GEN6_GT_GFX_RC6p _MMIO(0x13810c)
+#define GEN6_GT_GFX_RC6pp _MMIO(0x138110)
+#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
+#define VLV_MEDIA_C0_COUNT _MMIO(0x13811c)
+
+#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
+#define GEN11_CSME (31)
+#define GEN11_GUNIT (28)
+#define GEN11_GUC (25)
+#define GEN11_WDPERF (20)
+#define GEN11_KCR (19)
+#define GEN11_GTPM (16)
+#define GEN11_BCS (15)
+#define GEN11_RCS0 (0)
+#define GEN11_VECS(x) (31 - (x))
+#define GEN11_VCS(x) (x)
+
+#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
+#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
+#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038)
+#define ENGINE1_MASK REG_GENMASK(31, 16)
+#define ENGINE0_MASK REG_GENMASK(15, 0)
+#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c)
+#define GEN11_CRYPTO_RSVD_INTR_ENABLE _MMIO(0x190040)
+#define GEN11_GUNIT_CSME_INTR_ENABLE _MMIO(0x190044)
+
+#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
+#define GEN11_INTR_DATA_VALID (1 << 31)
+#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
+#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
+#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
+/* irq instances for OTHER_CLASS */
+#define OTHER_GUC_INSTANCE 0
+#define OTHER_GTPM_INSTANCE 1
+#define OTHER_KCR_INSTANCE 4
+
+#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
+
+#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090)
+#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0)
+#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8)
+#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac)
+#define GEN12_VCS4_VCS5_INTR_MASK _MMIO(0x1900b0)
+#define GEN12_VCS6_VCS7_INTR_MASK _MMIO(0x1900b4)
+#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
+#define GEN12_VECS2_VECS3_INTR_MASK _MMIO(0x1900d4)
+#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8)
+#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
+#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0)
+#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
+
+#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
+
+enum {
+ INTEL_ADVANCED_CONTEXT = 0,
+ INTEL_LEGACY_32B_CONTEXT,
+ INTEL_ADVANCED_AD_CONTEXT,
+ INTEL_LEGACY_64B_CONTEXT
+};
-#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
-#define MMCD_PCLA (1 << 31)
-#define MMCD_HOTSPOT_EN (1 << 27)
+enum {
+ FAULT_AND_HANG = 0,
+ FAULT_AND_HALT, /* Debug only */
+ FAULT_AND_STREAM,
+ FAULT_AND_CONTINUE /* Unsupported */
+};
-#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731C)
-#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+#define CTX_GTT_ADDRESS_MASK GENMASK(31, 12)
+#define GEN8_CTX_VALID (1 << 0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
+#define GEN8_CTX_FORCE_RESTORE (1 << 2)
+#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
+#define GEN8_CTX_PRIVILEGE (1 << 8)
+#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
+#define GEN8_CTX_ID_SHIFT 32
+#define GEN8_CTX_ID_WIDTH 21
+#define GEN11_SW_CTX_ID_SHIFT 37
+#define GEN11_SW_CTX_ID_WIDTH 11
+#define GEN11_ENGINE_CLASS_SHIFT 61
+#define GEN11_ENGINE_CLASS_WIDTH 3
+#define GEN11_ENGINE_INSTANCE_SHIFT 48
+#define GEN11_ENGINE_INSTANCE_WIDTH 6
+#define XEHP_SW_CTX_ID_SHIFT 39
+#define XEHP_SW_CTX_ID_WIDTH 16
+#define XEHP_SW_COUNTER_SHIFT 58
+#define XEHP_SW_COUNTER_WIDTH 6
#endif /* __INTEL_GT_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index b5d3cde08aac..8506db9983da 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -10,6 +10,7 @@
#include <drm/drm_cache.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 335c65758d6f..40e2e28ee6c7 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -10,6 +10,7 @@
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_llc.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
struct ia_constants {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index e46bf1429f2c..91c87a3a8e0b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -8,6 +8,8 @@
#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "i915_perf.h"
+#include "i915_reg.h"
+#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
@@ -1688,6 +1690,17 @@ static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
#endif
}
+static u32 lrc_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
void lrc_update_runtime(struct intel_context *ce)
{
u32 old;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 7f697845c4cf..0b76f096b559 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -8,12 +8,12 @@
#include <linux/types.h>
-#include "intel_context.h"
-#include "intel_lrc_reg.h"
-
struct drm_i915_gem_object;
+struct i915_gem_ww_ctx;
+struct intel_context;
struct intel_engine_cs;
struct intel_ring;
+struct kref;
/* At the start of the context image is its per-process HWS page */
#define LRC_PPHWSP_PN (0)
@@ -68,15 +68,5 @@ void lrc_check_regs(const struct intel_context *ce,
const char *when);
void lrc_update_runtime(struct intel_context *ce);
-static inline u32 lrc_get_runtime(const struct intel_context *ce)
-{
- /*
- * We can use either ppHWSP[16] which is recorded before the context
- * switch (and so excludes the cost of context switches) or use the
- * value from the context image itself, which is saved/restored earlier
- * and so includes the cost of the save.
- */
- return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index b575cd6e0b7a..5121e6dc2fa5 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Intel Corporation
*/
+#include "gem/i915_gem_internal.h"
+
#include "i915_drv.h"
#include "intel_renderstate.h"
#include "intel_context.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 59beb69ff6f2..ae7542f70afb 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -14,6 +14,7 @@
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "intel_breadcrumbs.h"
@@ -22,6 +23,7 @@
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
+#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_reset.h"
@@ -347,25 +349,25 @@ static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
MISSING_CASE(engine->class);
fallthrough;
case VIDEO_DECODE_CLASS:
- sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine);
+ sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
- sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
- sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
break;
case VIDEO_ENHANCEMENT_CLASS:
- sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine);
+ sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
- sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine);
+ sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
- sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine);
+ sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
@@ -412,7 +414,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine,
* forced lock on the VE engine that shares the same SFC.
*/
if (!(intel_uncore_read_fw(uncore,
- GEN12_HCP_SFC_LOCK_STATUS(engine)) &
+ GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
GEN12_HCP_SFC_USAGE_BIT))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 723055340c9b..40ffcb94e379 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -3,6 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 8362eb09092e..6d7ec3bf1f32 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -3,6 +3,10 @@
* Copyright © 2008-2021 Intel Corporation
*/
+#include <drm/drm_cache.h>
+
+#include "gem/i915_gem_internal.h"
+
#include "gen2_engine_cs.h"
#include "gen6_engine_cs.h"
#include "gen6_ppgtt.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 0cb299cdfc8f..fd95449ed46d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -13,6 +13,7 @@
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_gt_regs.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rps.h"
#include "vlv_sideband.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 438bbc7b8147..b9640212d659 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -3,9 +3,12 @@
* Copyright © 2016-2018 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_cache.h>
+
+#include "gem/i915_gem_internal.h"
#include "i915_active.h"
+#include "i915_drv.h"
#include "i915_syncmap.h"
#include "intel_gt.h"
#include "intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 59ad23118875..eeda1692d845 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -237,7 +237,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
- wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+ wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
wa_masked_en(wal, GEN8_ROW_CHICKEN,
@@ -2463,7 +2463,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
wa_masked_en(wal,
- MI_MODE,
+ RING_MI_MODE(RENDER_RING_BASE),
ASYNC_FLIP_PERF_DISABLE);
if (GRAPHICS_VER(i915) == 6) {
@@ -2522,7 +2522,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_GRAPHICS_VER(i915, 4, 6))
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- wa_add(wal, MI_MODE,
+ wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index e10da897e07a..72d5faab8f9a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -5,6 +5,7 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 4a20ba63446c..9e9ccb139ba7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -6,6 +6,7 @@
#include <linux/kthread.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "i915_gem_evict.h"
#include "intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 618c905daa19..21c29d315cc0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -5,6 +5,8 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_internal.h"
+
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index fa4293d2944f..c9c4f391c5cc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -5,6 +5,8 @@
#include <linux/sort.h>
+#include "gem/i915_gem_internal.h"
+
#include "selftests/i915_random.h"
static const unsigned int sizes[] = {
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index e1e5dd5f7638..6a69ac0184ad 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -6,6 +6,8 @@
#include <linux/pm_qos.h>
#include <linux/sort.h>
+#include "gem/i915_gem_internal.h"
+
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 0287c2573c51..67a9aab801dd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -3,6 +3,7 @@
* Copyright © 2018 Intel Corporation
*/
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index ddbea939b1dc..b3d28b003b73 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -3,9 +3,12 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_cache.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_guc_slpc.h"
+#include "intel_mchbar_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index dd588ecf048a..549d5919dc70 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -5,6 +5,8 @@
#include <linux/bitfield.h>
#include <linux/firmware.h>
+
+#include <drm/drm_cache.h>
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index d02a48a5335a..12b7c36d8e8f 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -31,6 +31,11 @@
#include <linux/dma-buf.h>
#include <linux/vfio.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+
+#include "gem/i915_gem_dmabuf.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index c8dcda6d4f0d..66d354c4195b 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -163,7 +163,7 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
+ hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
&write_pointer, 4);
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index c2ae79092b14..5e3ae5970c6b 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -40,6 +40,7 @@
#include "i915_reg.h"
#include "gvt.h"
#include "i915_pvinfo.h"
+#include "intel_mchbar_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 20b82fb036f8..e8d6c76e9234 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -46,6 +46,8 @@
#include <linux/nospec.h>
+#include <drm/drm_edid.h>
+
#include "i915_drv.h"
#include "gvt.h"
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index aea4c30645ff..5f6e41636655 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_cache.h>
+
#include "gt/intel_engine.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4fe1ce1433fe..54c66d4b6887 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -49,6 +49,7 @@
#include "i915_debugfs_params.h"
#include "i915_irq.h"
#include "i915_scheduler.h"
+#include "intel_mchbar_regs.h"
#include "intel_pm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -137,6 +138,17 @@ static const char *stringify_vma_type(const struct i915_vma *vma)
return "ppgtt";
}
+static const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
+ case I915_CACHE_L3_LLC: return " L3+LLC";
+ case I915_CACHE_WT: return " WT";
+ default: return "";
+ }
+}
+
void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 4b56d4d83e75..a892a1b546a8 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -62,6 +62,8 @@
#include "display/intel_vga.h"
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_create.h"
+#include "gem/i915_gem_dmabuf.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_pm.h"
@@ -71,6 +73,7 @@
#include "pxp/intel_pxp_pm.h"
+#include "i915_file_private.h"
#include "i915_debugfs.h"
#include "i915_driver.h"
#include "i915_drv.h"
@@ -1821,6 +1824,21 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
+/*
+ * Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ * - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 6
+#define DRIVER_PATCHLEVEL 0
+
static const struct drm_driver i915_drm_driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 9ef8db4aa0a6..9d11de65daaf 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -12,6 +12,11 @@ struct pci_dev;
struct pci_device_id;
struct drm_i915_private;
+#define DRIVER_NAME "i915"
+#define DRIVER_DESC "Intel Graphics"
+#define DRIVER_DATE "20201103"
+#define DRIVER_TIMESTAMP 1604406085
+
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f7bdb973c880..ffde71b6b3f1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,38 +31,17 @@
#define _I915_DRV_H_
#include <uapi/drm/i915_drm.h>
-#include <uapi/drm/drm_fourcc.h>
#include <asm/hypervisor.h>
-#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <linux/backlight.h>
-#include <linux/hash.h>
#include <linux/intel-iommu.h>
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/perf_event.h>
#include <linux/pm_qos.h>
-#include <linux/dma-resv.h>
-#include <linux/shmem_fs.h>
-#include <linux/stackdepot.h>
-#include <linux/xarray.h>
-
-#include <drm/drm_gem.h>
-#include <drm/drm_auth.h>
-#include <drm/drm_cache.h>
-#include <drm/drm_util.h>
-#include <drm/drm_dsc.h>
-#include <drm/drm_atomic.h>
+
#include <drm/drm_connector.h>
-#include <drm/i915_mei_hdcp_interface.h>
#include <drm/ttm/ttm_device.h>
-#include "i915_params.h"
-#include "i915_utils.h"
-
#include "display/intel_bios.h"
#include "display/intel_cdclk.h"
#include "display/intel_display.h"
@@ -77,9 +56,9 @@
#include "display/intel_opregion.h"
#include "gem/i915_gem_context_types.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
-#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
@@ -87,6 +66,12 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
+#include "i915_gem.h"
+#include "i915_gpu_error.h"
+#include "i915_params.h"
+#include "i915_perf_types.h"
+#include "i915_scheduler.h"
+#include "i915_utils.h"
#include "intel_device_info.h"
#include "intel_memory_region.h"
#include "intel_pch.h"
@@ -94,28 +79,32 @@
#include "intel_runtime_pm.h"
#include "intel_step.h"
#include "intel_uncore.h"
-#include "intel_wakeref.h"
#include "intel_wopcm.h"
-#include "i915_gem.h"
-#include "i915_gem_gtt.h"
-#include "i915_gpu_error.h"
-#include "i915_perf_types.h"
-#include "i915_request.h"
-#include "i915_scheduler.h"
-#include "gt/intel_timeline.h"
-#include "i915_vma.h"
-
-
-/* General customization:
- */
-
-#define DRIVER_NAME "i915"
-#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20201103"
-#define DRIVER_TIMESTAMP 1604406085
-
+struct dpll;
+struct drm_i915_clock_gating_funcs;
struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_audio_funcs;
+struct intel_cdclk_config;
+struct intel_cdclk_funcs;
+struct intel_cdclk_state;
+struct intel_cdclk_vals;
+struct intel_color_funcs;
+struct intel_connector;
+struct intel_crtc;
+struct intel_dp;
+struct intel_dpll_funcs;
+struct intel_encoder;
+struct intel_fbdev;
+struct intel_fdi_funcs;
+struct intel_hotplug_funcs;
+struct intel_initial_plane_config;
+struct intel_limit;
+struct intel_overlay;
+struct intel_overlay_error_state;
+struct vlv_s0ix_state;
/* Threshold == 5 for long IRQs, 50 for short */
#define HPD_STORM_DEFAULT_THRESHOLD 50
@@ -166,117 +155,6 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-struct drm_i915_private;
-
-struct drm_i915_file_private {
- struct drm_i915_private *dev_priv;
-
- union {
- struct drm_file *file;
- struct rcu_head rcu;
- };
-
- /** @proto_context_lock: Guards all struct i915_gem_proto_context
- * operations
- *
- * This not only guards @proto_context_xa, but is always held
- * whenever we manipulate any struct i915_gem_proto_context,
- * including finalizing it on first actual use of the GEM context.
- *
- * See i915_gem_proto_context.
- */
- struct mutex proto_context_lock;
-
- /** @proto_context_xa: xarray of struct i915_gem_proto_context
- *
- * Historically, the context uAPI allowed for two methods of
- * setting context parameters: SET_CONTEXT_PARAM and
- * CONTEXT_CREATE_EXT_SETPARAM. The former is allowed to be called
- * at any time while the later happens as part of
- * GEM_CONTEXT_CREATE. Everything settable via one was settable
- * via the other. While some params are fairly simple and setting
- * them on a live context is harmless such as the context priority,
- * others are far trickier such as the VM or the set of engines.
- * In order to swap out the VM, for instance, we have to delay
- * until all current in-flight work is complete, swap in the new
- * VM, and then continue. This leads to a plethora of potential
- * race conditions we'd really rather avoid.
- *
- * We have since disallowed setting these more complex parameters
- * on active contexts. This works by delaying the creation of the
- * actual context until after the client is done configuring it
- * with SET_CONTEXT_PARAM. From the perspective of the client, it
- * has the same u32 context ID the whole time. From the
- * perspective of i915, however, it's a struct i915_gem_proto_context
- * right up until the point where we attempt to do something which
- * the proto-context can't handle. Then the struct i915_gem_context
- * gets created.
- *
- * This is accomplished via a little xarray dance. When
- * GEM_CONTEXT_CREATE is called, we create a struct
- * i915_gem_proto_context, reserve a slot in @context_xa but leave
- * it NULL, and place the proto-context in the corresponding slot
- * in @proto_context_xa. Then, in i915_gem_context_lookup(), we
- * first check @context_xa. If it's there, we return the struct
- * i915_gem_context and we're done. If it's not, we look in
- * @proto_context_xa and, if we find it there, we create the actual
- * context and kill the proto-context.
- *
- * In order for this dance to work properly, everything which ever
- * touches a struct i915_gem_proto_context is guarded by
- * @proto_context_lock, including context creation. Yes, this
- * means context creation now takes a giant global lock but it
- * can't really be helped and that should never be on any driver's
- * fast-path anyway.
- */
- struct xarray proto_context_xa;
-
- /** @context_xa: xarray of fully created i915_gem_context
- *
- * Write access to this xarray is guarded by @proto_context_lock.
- * Otherwise, writers may race with finalize_create_context_locked().
- *
- * See @proto_context_xa.
- */
- struct xarray context_xa;
- struct xarray vm_xa;
-
- unsigned int bsd_engine;
-
-/*
- * Every context ban increments per client ban score. Also
- * hangs in short succession increments ban score. If ban threshold
- * is reached, client is considered banned and submitting more work
- * will fail. This is a stop gap measure to limit the badly behaving
- * clients access to gpu. Note that unbannable contexts never increment
- * the client ban score.
- */
-#define I915_CLIENT_SCORE_HANG_FAST 1
-#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
-#define I915_CLIENT_SCORE_CONTEXT_BAN 3
-#define I915_CLIENT_SCORE_BANNED 9
- /** ban_score: Accumulated score of all ctx bans and fast hangs. */
- atomic_t ban_score;
- unsigned long hang_timestamp;
-};
-
-/* Interface history:
- *
- * 1.1: Original.
- * 1.2: Add Power Management
- * 1.3: Add vblank support
- * 1.4: Fix cmdbuffer path, add heap destroy
- * 1.5: Add vblank pipe configuration
- * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
- * - Support vertical blank on secondary display pipe
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 6
-#define DRIVER_PATCHLEVEL 0
-
-struct intel_overlay;
-struct intel_overlay_error_state;
-
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
@@ -286,23 +164,6 @@ struct sdvo_device_mapping {
u8 ddc_pin;
};
-struct intel_connector;
-struct intel_encoder;
-struct intel_atomic_state;
-struct intel_cdclk_config;
-struct intel_cdclk_funcs;
-struct intel_cdclk_state;
-struct intel_cdclk_vals;
-struct intel_initial_plane_config;
-struct intel_crtc;
-struct intel_limit;
-struct dpll;
-
-/* functions used internal in intel_pm.c */
-struct drm_i915_clock_gating_funcs {
- void (*init_clock_gating)(struct drm_i915_private *dev_priv);
-};
-
/* functions used for watermark calcs for display. */
struct drm_i915_wm_disp_funcs {
/* update_wm is for legacy wm management */
@@ -320,38 +181,6 @@ struct drm_i915_wm_disp_funcs {
int (*compute_global_watermarks)(struct intel_atomic_state *state);
};
-struct intel_color_funcs {
- int (*color_check)(struct intel_crtc_state *crtc_state);
- /*
- * Program double buffered color management registers during
- * vblank evasion. The registers should then latch during the
- * next vblank start, alongside any other double buffered registers
- * involved with the same commit.
- */
- void (*color_commit)(const struct intel_crtc_state *crtc_state);
- /*
- * Load LUTs (and other single buffered color management
- * registers). Will (hopefully) be called during the vblank
- * following the latching of any double buffered registers
- * involved with the same commit.
- */
- void (*load_luts)(const struct intel_crtc_state *crtc_state);
- void (*read_luts)(struct intel_crtc_state *crtc_state);
-};
-
-struct intel_hotplug_funcs {
- void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
-};
-
-struct intel_fdi_funcs {
- void (*fdi_link_train)(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state);
-};
-
-struct intel_dpll_funcs {
- int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state);
-};
-
struct drm_i915_display_funcs {
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@@ -385,7 +214,6 @@ enum drrs_support_type {
SEAMLESS_DRRS_SUPPORT = 2
};
-struct intel_dp;
struct i915_drrs {
struct mutex mutex;
struct delayed_work work;
@@ -403,8 +231,6 @@ struct i915_drrs {
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
-struct intel_fbdev;
-
struct intel_gmbus {
struct i2c_adapter adapter;
#define GMBUS_FORCE_BIT_RETRY (1U << 31)
@@ -423,8 +249,6 @@ struct i915_suspend_saved_registers {
u16 saveGCDGMBUS;
};
-struct vlv_s0ix_state;
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -533,6 +357,9 @@ struct intel_vbt_data {
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
enum drm_panel_orientation orientation;
+ bool override_afc_startup;
+ u8 override_afc_startup_val;
+
enum drrs_support_type drrs_type;
struct {
@@ -613,7 +440,6 @@ struct i915_selftest_stash {
};
/* intel_audio.c private */
-struct intel_audio_funcs;
struct intel_audio_private {
/* Display internal audio functions */
const struct intel_audio_funcs *funcs;
@@ -1657,65 +1483,16 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-int i915_gem_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-static inline u32 i915_reset_count(struct i915_gpu_error *error)
-{
- return atomic_read(&error->reset_count);
-}
-
-static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
- const struct intel_engine_cs *engine)
-{
- return atomic_read(&error->reset_engine_count[engine->uabi_class]);
-}
-
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
void i915_gem_driver_register(struct drm_i915_private *i915);
void i915_gem_driver_unregister(struct drm_i915_private *i915);
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-void i915_gem_suspend(struct drm_i915_private *dev_priv);
-void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
-void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
-int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-
-struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
-
-struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
-
-static inline struct i915_address_space *
-i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
-{
- struct i915_address_space *vm;
-
- xa_lock(&file_priv->vm_xa);
- vm = xa_load(&file_priv->vm_xa, id);
- if (vm)
- kref_get(&vm->ref);
- xa_unlock(&file_priv->vm_xa);
-
- return vm;
-}
-
-/* i915_gem_internal.c */
-struct drm_i915_gem_object *
-i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
- phys_addr_t size);
-struct drm_i915_gem_object *
-__i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
- const struct drm_i915_gem_object_ops *ops,
- phys_addr_t size);
-
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
@@ -1725,8 +1502,6 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
i915_gem_object_is_tiled(obj);
}
-const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
@@ -1734,14 +1509,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)INTEL_INFO(dev_priv);
}
-static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
-{
- if (GRAPHICS_VER(i915) >= 11)
- return ICL_HWS_CSB_WRITE_INDEX;
- else
- return I915_HWS_CSB_WRITE_INDEX;
-}
-
static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj, bool always_coherent)
diff --git a/drivers/gpu/drm/i915/i915_file_private.h b/drivers/gpu/drm/i915/i915_file_private.h
new file mode 100644
index 000000000000..fb16cc431b2a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_file_private.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_FILE_PRIVATE_H__
+#define __I915_FILE_PRIVATE_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+struct drm_i915_private;
+struct drm_file;
+
+struct drm_i915_file_private {
+ struct drm_i915_private *dev_priv;
+
+ union {
+ struct drm_file *file;
+ struct rcu_head rcu;
+ };
+
+ /** @proto_context_lock: Guards all struct i915_gem_proto_context
+ * operations
+ *
+ * This not only guards @proto_context_xa, but is always held
+ * whenever we manipulate any struct i915_gem_proto_context,
+ * including finalizing it on first actual use of the GEM context.
+ *
+ * See i915_gem_proto_context.
+ */
+ struct mutex proto_context_lock;
+
+ /** @proto_context_xa: xarray of struct i915_gem_proto_context
+ *
+ * Historically, the context uAPI allowed for two methods of
+ * setting context parameters: SET_CONTEXT_PARAM and
+ * CONTEXT_CREATE_EXT_SETPARAM. The former is allowed to be called
+ * at any time while the later happens as part of
+ * GEM_CONTEXT_CREATE. Everything settable via one was settable
+ * via the other. While some params are fairly simple and setting
+ * them on a live context is harmless such as the context priority,
+ * others are far trickier such as the VM or the set of engines.
+ * In order to swap out the VM, for instance, we have to delay
+ * until all current in-flight work is complete, swap in the new
+ * VM, and then continue. This leads to a plethora of potential
+ * race conditions we'd really rather avoid.
+ *
+ * We have since disallowed setting these more complex parameters
+ * on active contexts. This works by delaying the creation of the
+ * actual context until after the client is done configuring it
+ * with SET_CONTEXT_PARAM. From the perspective of the client, it
+ * has the same u32 context ID the whole time. From the
+ * perspective of i915, however, it's a struct i915_gem_proto_context
+ * right up until the point where we attempt to do something which
+ * the proto-context can't handle. Then the struct i915_gem_context
+ * gets created.
+ *
+ * This is accomplished via a little xarray dance. When
+ * GEM_CONTEXT_CREATE is called, we create a struct
+ * i915_gem_proto_context, reserve a slot in @context_xa but leave
+ * it NULL, and place the proto-context in the corresponding slot
+ * in @proto_context_xa. Then, in i915_gem_context_lookup(), we
+ * first check @context_xa. If it's there, we return the struct
+ * i915_gem_context and we're done. If it's not, we look in
+ * @proto_context_xa and, if we find it there, we create the actual
+ * context and kill the proto-context.
+ *
+ * In order for this dance to work properly, everything which ever
+ * touches a struct i915_gem_proto_context is guarded by
+ * @proto_context_lock, including context creation. Yes, this
+ * means context creation now takes a giant global lock but it
+ * can't really be helped and that should never be on any driver's
+ * fast-path anyway.
+ */
+ struct xarray proto_context_xa;
+
+ /** @context_xa: xarray of fully created i915_gem_context
+ *
+ * Write access to this xarray is guarded by @proto_context_lock.
+ * Otherwise, writers may race with finalize_create_context_locked().
+ *
+ * See @proto_context_xa.
+ */
+ struct xarray context_xa;
+ struct xarray vm_xa;
+
+ unsigned int bsd_engine;
+
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
+ */
+#define I915_CLIENT_SCORE_HANG_FAST 1
+#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN 3
+#define I915_CLIENT_SCORE_BANNED 9
+ /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+ atomic_t ban_score;
+ unsigned long hang_timestamp;
+};
+
+#endif /* __I915_FILE_PRIVATE_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5ef959a9f594..db897fb11f9b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -25,7 +25,6 @@
*
*/
-#include <drm/drm_vma_manager.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/dma-resv.h>
@@ -37,6 +36,9 @@
#include <linux/dma-buf.h>
#include <linux/mman.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_vma_manager.h>
+
#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
@@ -44,6 +46,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_pm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_userptr.h"
#include "gt/intel_engine_user.h"
@@ -52,9 +55,9 @@
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-
#include "intel_pm.h"
static int
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 102c7bb747f8..79ef0e21d71a 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -34,6 +34,7 @@
#include <linux/utsname.h>
#include <linux/zlib.h>
+#include <drm/drm_cache.h>
#include <drm/drm_print.h>
#include "display/intel_dmc.h"
@@ -46,6 +47,7 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_memcpy.h"
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 5aedf5129814..903d838e2e63 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -210,6 +210,17 @@ struct drm_i915_error_state_buf {
int err;
};
+static inline u32 i915_reset_count(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_count);
+}
+
+static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
+ const struct intel_engine_cs *engine)
+{
+ return atomic_read(&error->reset_engine_count[engine->uabi_class]);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c05eb09d8a66..fdd568ba4a16 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -49,6 +49,7 @@
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_pm.h"
@@ -4347,6 +4348,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
return ret;
}
+struct intel_hotplug_funcs {
+ void (*hpd_irq_setup)(struct drm_i915_private *i915);
+};
+
#define HPD_FUNCS(platform) \
static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
.hpd_irq_setup = platform##_hpd_irq_setup, \
@@ -4361,6 +4366,12 @@ HPD_FUNCS(spt);
HPD_FUNCS(ilk);
#undef HPD_FUNCS
+void intel_hpd_irq_setup(struct drm_i915_private *i915)
+{
+ if (i915->display_irqs_enabled && i915->hotplug_funcs)
+ i915->hotplug_funcs->hpd_irq_setup(i915);
+}
+
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 0eb90d271fa7..82639d9d7e82 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -37,6 +37,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void intel_hpd_irq_setup(struct drm_i915_private *i915);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
u32 mask,
u32 bits);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c
index 84f12598d145..def7302ef7fe 100644
--- a/drivers/gpu/drm/i915/i915_mitigations.c
+++ b/drivers/gpu/drm/i915/i915_mitigations.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/string.h>
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_mitigations.h"
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 8451822637f0..f276005b8070 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_object.h"
#include "i915_active.h"
+#include "i915_driver.h"
#include "i915_params.h"
#include "i915_pci.h"
#include "i915_perf.h"
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 189d3bb8955a..76e590fcb903 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -22,6 +22,7 @@
*
*/
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 590efdb9de8d..3ecb89e96fb6 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -196,6 +196,7 @@
#include <linux/uuid.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
@@ -205,9 +206,11 @@
#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_lrc.h"
+#include "gt/intel_lrc_reg.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_perf.h"
#include "i915_perf_oa_regs.h"
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 87c92314ee26..de403ee18bc7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1830,117 +1830,8 @@
_PALETTE_B, _CHV_PALETTE_C) + \
(i) * 4)
-/* MCH MMIO space */
-
-/*
- * MCHBAR mirror.
- *
- * This mirrors the MCHBAR MMIO space whose location is determined by
- * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
- * every way. It is not accessible from the CP register read instructions.
- *
- * Starting from Haswell, you can't write registers using the MCHBAR mirror,
- * just read.
- */
-#define MCHBAR_MIRROR_BASE 0x10000
-
-#define MCHBAR_MIRROR_BASE_SNB 0x140000
-
-#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34)
-#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
-#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
-#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
-#define G4X_STOLEN_RESERVED_ENABLE (1 << 0)
-
-/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
-#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
-
-/* 915-945 and GM965 MCH register controlling DRAM channel access */
-#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200)
-#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
-#define DCC_ADDRESSING_MODE_MASK (3 << 0)
-#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
-#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
-#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204)
-#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
-
-/* Pineview MCH register contains DDR3 setting */
-#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8)
-#define CSHRDDR3CTL_DDR3 (1 << 2)
-
-/* 965 MCH register controlling DRAM channel configuration */
-#define C0DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x206)
-#define C1DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x606)
-
-/* snb MCH registers for reading the DRAM channel configuration */
-#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004)
-#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008)
-#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
-#define MAD_DIMM_ECC_MASK (0x3 << 24)
-#define MAD_DIMM_ECC_OFF (0x0 << 24)
-#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
-#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
-#define MAD_DIMM_ECC_ON (0x3 << 24)
-#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
-#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
-#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
-#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
-#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
-#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
-#define MAD_DIMM_A_SELECT (0x1 << 16)
-/* DIMM sizes are in multiples of 256mb. */
-#define MAD_DIMM_B_SIZE_SHIFT 8
-#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
-#define MAD_DIMM_A_SIZE_SHIFT 0
-#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
-
-/* snb MCH registers for priority tuning */
-#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
-#define MCH_SSKPD_WM0_MASK 0x3f
-#define MCH_SSKPD_WM0_VAL 0xc
-
-/* Clocking configuration register */
-#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
-#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
-#define CLKCFG_FSB_400_ALT (5 << 0) /* hrawclk 100 */
-#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
-#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
-#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
-#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
-#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
-#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
-#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
-#define CLKCFG_FSB_1600_ALT (6 << 0) /* hrawclk 400 */
-#define CLKCFG_FSB_MASK (7 << 0)
-#define CLKCFG_MEM_533 (1 << 4)
-#define CLKCFG_MEM_667 (2 << 4)
-#define CLKCFG_MEM_800 (3 << 4)
-#define CLKCFG_MEM_MASK (7 << 4)
-
-#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38)
-#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
-
-#define TSC1 _MMIO(0x11001)
-#define TSE (1 << 0)
-#define TR1 _MMIO(0x11006)
-#define TSFS _MMIO(0x11020)
-#define TSFS_SLOPE_MASK 0x0000ff00
-#define TSFS_SLOPE_SHIFT 8
-#define TSFS_INTR_MASK 0x000000ff
-
-#define CSIPLL0 _MMIO(0x12c10)
-#define DDRMPLL1 _MMIO(0X12c20)
#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
-#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948)
-#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070)
-#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
-#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
-#define RP0_CAP_MASK REG_GENMASK(7, 0)
-#define RP1_CAP_MASK REG_GENMASK(15, 8)
-#define RPN_CAP_MASK REG_GENMASK(23, 16)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
@@ -4291,49 +4182,32 @@
#define _WM0_PIPEC_IVB 0x45200
#define WM0_PIPE_ILK(pipe) _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \
_WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
-#define WM0_PIPE_PLANE_MASK (0xffff << 16)
-#define WM0_PIPE_PLANE_SHIFT 16
-#define WM0_PIPE_SPRITE_MASK (0xff << 8)
-#define WM0_PIPE_SPRITE_SHIFT 8
-#define WM0_PIPE_CURSOR_MASK (0xff)
+#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
+#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
+#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x))
+#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x))
+#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x))
#define WM1_LP_ILK _MMIO(0x45108)
-#define WM1_LP_SR_EN (1 << 31)
-#define WM1_LP_LATENCY_SHIFT 24
-#define WM1_LP_LATENCY_MASK (0x7f << 24)
-#define WM1_LP_FBC_MASK (0xf << 20)
-#define WM1_LP_FBC_SHIFT 20
-#define WM1_LP_FBC_SHIFT_BDW 19
-#define WM1_LP_SR_MASK (0x7ff << 8)
-#define WM1_LP_SR_SHIFT 8
-#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK _MMIO(0x4510c)
-#define WM2_LP_EN (1 << 31)
#define WM3_LP_ILK _MMIO(0x45110)
-#define WM3_LP_EN (1 << 31)
+#define WM_LP_ENABLE REG_BIT(31)
+#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24)
+#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19)
+#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20)
+#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8)
+#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x))
+#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x))
+#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x))
+#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x))
+#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x))
#define WM1S_LP_ILK _MMIO(0x45120)
#define WM2S_LP_IVB _MMIO(0x45124)
#define WM3S_LP_IVB _MMIO(0x45128)
-#define WM1S_LP_EN (1 << 31)
-
-#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
- (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
- ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
-
-/* Memory latency timer register */
-#define MLTR_ILK _MMIO(0x11222)
-#define MLTR_WM1_SHIFT 0
-#define MLTR_WM2_SHIFT 8
-/* the unit of memory self-refresh latency time is 0.5us */
-#define ILK_SRLT_MASK 0x3f
-
-
-/* the address where we get all kinds of latency value */
-#define SSKPD _MMIO(0x5d10)
-#define SSKPD_WM_MASK 0x3f
-#define SSKPD_WM0_SHIFT 0
-#define SSKPD_WM1_SHIFT 8
-#define SSKPD_WM2_SHIFT 16
-#define SSKPD_WM3_SHIFT 24
+#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */
+#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0)
+#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x))
/*
* The two pipe frame counter registers are not synchronized, so
@@ -5632,7 +5506,8 @@
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154)
-#define DMC_DEBUG3 _MMIO(0x101090)
+#define TGL_DMC_DEBUG3 _MMIO(0x101090)
+#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
@@ -6040,11 +5915,14 @@
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
-#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
-#define ICL_DELAY_PMRSP REG_BIT(22)
-#define DISABLE_FLR_SRC REG_BIT(15)
-#define MASK_WAKEMEM REG_BIT(13)
+#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
+#define LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
+#define LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
+#define LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define ICL_DELAY_PMRSP REG_BIT(22)
+#define DISABLE_FLR_SRC REG_BIT(15)
+#define MASK_WAKEMEM REG_BIT(13)
#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
@@ -7986,6 +7864,12 @@ enum skl_power_gate {
#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
_TGL_DPLL1_CFGCR0)
+#define _TGL_DPLL0_DIV0 0x164B00
+#define _TGL_DPLL1_DIV0 0x164C00
+#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0)
+#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
+#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val))
+
#define _TGL_DPLL0_CFGCR1 0x164288
#define _TGL_DPLL1_CFGCR1 0x164290
#define _TGL_TBTPLL_CFGCR1 0x1642A0
@@ -8033,6 +7917,8 @@ enum skl_power_gate {
/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */
#define _DKL_PLL_DIV0 0x200
+#define DKL_PLL_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
+#define DKL_PLL_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(DKL_PLL_DIV0_AFC_STARTUP_MASK, (val))
#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16)
#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16)
#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12)
@@ -8042,6 +7928,10 @@ enum skl_power_gate {
#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT)
#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0)
+#define DKL_PLL_DIV0_MASK (DKL_PLL_DIV0_INTEG_COEFF_MASK | \
+ DKL_PLL_DIV0_PROP_COEFF_MASK | \
+ DKL_PLL_DIV0_FBPREDIV_MASK | \
+ DKL_PLL_DIV0_FBDIV_INT_MASK)
#define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
_DKL_PHY2_BASE) + \
_DKL_PLL_DIV0)
@@ -8215,93 +8105,7 @@ enum skl_power_gate {
#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
-#define BXT_D_CR_DRP0_DUNIT8 0x1000
-#define BXT_D_CR_DRP0_DUNIT9 0x1200
-#define BXT_D_CR_DRP0_DUNIT_START 8
-#define BXT_D_CR_DRP0_DUNIT_END 11
-#define BXT_D_CR_DRP0_DUNIT(x) _MMIO(MCHBAR_MIRROR_BASE_SNB + \
- _PICK_EVEN((x) - 8, BXT_D_CR_DRP0_DUNIT8,\
- BXT_D_CR_DRP0_DUNIT9))
-#define BXT_DRAM_RANK_MASK 0x3
-#define BXT_DRAM_RANK_SINGLE 0x1
-#define BXT_DRAM_RANK_DUAL 0x3
-#define BXT_DRAM_WIDTH_MASK (0x3 << 4)
-#define BXT_DRAM_WIDTH_SHIFT 4
-#define BXT_DRAM_WIDTH_X8 (0x0 << 4)
-#define BXT_DRAM_WIDTH_X16 (0x1 << 4)
-#define BXT_DRAM_WIDTH_X32 (0x2 << 4)
-#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
-#define BXT_DRAM_SIZE_MASK (0x7 << 6)
-#define BXT_DRAM_SIZE_SHIFT 6
-#define BXT_DRAM_SIZE_4GBIT (0x0 << 6)
-#define BXT_DRAM_SIZE_6GBIT (0x1 << 6)
-#define BXT_DRAM_SIZE_8GBIT (0x2 << 6)
-#define BXT_DRAM_SIZE_12GBIT (0x3 << 6)
-#define BXT_DRAM_SIZE_16GBIT (0x4 << 6)
-#define BXT_DRAM_TYPE_MASK (0x7 << 22)
-#define BXT_DRAM_TYPE_SHIFT 22
-#define BXT_DRAM_TYPE_DDR3 (0x0 << 22)
-#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22)
-#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
-#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
-
-#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
-#define DG1_GEAR_TYPE REG_BIT(16)
-
-#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
-#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0)
-#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0)
-#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0)
-#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0)
-#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0)
-
-#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
-#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
-#define SKL_DRAM_S_SHIFT 16
-#define SKL_DRAM_SIZE_MASK 0x3F
-#define SKL_DRAM_WIDTH_MASK (0x3 << 8)
-#define SKL_DRAM_WIDTH_SHIFT 8
-#define SKL_DRAM_WIDTH_X8 (0x0 << 8)
-#define SKL_DRAM_WIDTH_X16 (0x1 << 8)
-#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
-#define SKL_DRAM_RANK_MASK (0x1 << 10)
-#define SKL_DRAM_RANK_SHIFT 10
-#define SKL_DRAM_RANK_1 (0x0 << 10)
-#define SKL_DRAM_RANK_2 (0x1 << 10)
-#define SKL_DRAM_RANK_MASK (0x1 << 10)
-#define ICL_DRAM_SIZE_MASK 0x7F
-#define ICL_DRAM_WIDTH_MASK (0x3 << 7)
-#define ICL_DRAM_WIDTH_SHIFT 7
-#define ICL_DRAM_WIDTH_X8 (0x0 << 7)
-#define ICL_DRAM_WIDTH_X16 (0x1 << 7)
-#define ICL_DRAM_WIDTH_X32 (0x2 << 7)
-#define ICL_DRAM_RANK_MASK (0x3 << 9)
-#define ICL_DRAM_RANK_SHIFT 9
-#define ICL_DRAM_RANK_1 (0x0 << 9)
-#define ICL_DRAM_RANK_2 (0x1 << 9)
-#define ICL_DRAM_RANK_3 (0x2 << 9)
-#define ICL_DRAM_RANK_4 (0x3 << 9)
-
-#define SA_PERF_STATUS_0_0_0_MCHBAR_PC _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5918)
-#define DG1_QCLK_RATIO_MASK REG_GENMASK(9, 2)
-#define DG1_QCLK_REFERENCE REG_BIT(10)
-
-#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
-#define DG1_DRAM_T_RDPRE_MASK REG_GENMASK(16, 11)
-#define DG1_DRAM_T_RP_MASK REG_GENMASK(6, 0)
-#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4004)
-#define DG1_DRAM_T_RCD_MASK REG_GENMASK(15, 9)
-#define DG1_DRAM_T_RAS_MASK REG_GENMASK(8, 1)
-
-/*
- * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
- * since on HSW we can't write to it using intel_uncore_write.
- */
-#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
-#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
-#define D_COMP_COMP_FORCE (1 << 8)
-#define D_COMP_COMP_DISABLE (1 << 0)
/* Pipe WM_LINETIME - watermark line time */
#define _WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index 34d37bbf08cd..d78d78fce431 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -37,6 +37,21 @@
__is_constexpr(__low) && \
((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
+/**
+ * REG_GENMASK64() - Prepare a continuous u64 bitmask
+ * @__high: 0-based high bit
+ * @__low: 0-based low bit
+ *
+ * Local wrapper for GENMASK_ULL() to force u64, with compile time checks.
+ *
+ * @return: Continuous bitmask from @__high to @__low, inclusive.
+ */
+#define REG_GENMASK64(__high, __low) \
+ ((u64)(GENMASK_ULL(__high, __low) + \
+ BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
+ __is_constexpr(__low) && \
+ ((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
+
/*
* Local integer constant expression version of is_power_of_2().
*/
@@ -71,6 +86,18 @@
*/
#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
+/**
+ * REG_FIELD_GET64() - Extract a u64 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u64 and for consistency with
+ * REG_GENMASK64().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
+
typedef struct {
u32 reg;
} i915_reg_t;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5d94f86940f7..505276373cc2 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -44,6 +44,7 @@
#include "i915_active.h"
#include "i915_deps.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_pm.h"
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 723bd0411a0e..174c95c3e10f 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_dram.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
struct dram_dimm_info {
diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h
new file mode 100644
index 000000000000..2aad2f0cc8db
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_MCHBAR_REGS__
+#define __INTEL_MCHBAR_REGS__
+
+#include "i915_reg_defs.h"
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way. It is not accessible from the CP register read instructions.
+ *
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
+ */
+
+#define MCHBAR_MIRROR_BASE 0x10000
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
+#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34)
+#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
+#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
+#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
+#define G4X_STOLEN_RESERVED_ENABLE (1 << 0)
+
+/* Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8)
+#define CSHRDDR3CTL_DDR3 (1 << 2)
+
+/* 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200)
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204)
+#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
+
+/* 965 MCH register controlling DRAM channel configuration */
+#define C0DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x206)
+#define C1DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x606)
+
+/* Clocking configuration register */
+#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
+#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_400_ALT (5 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
+#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
+#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
+#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
+#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
+#define CLKCFG_FSB_1600_ALT (6 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_MASK (7 << 0)
+#define CLKCFG_MEM_533 (1 << 4)
+#define CLKCFG_MEM_667 (2 << 4)
+#define CLKCFG_MEM_800 (3 << 4)
+#define CLKCFG_MEM_MASK (7 << 4)
+
+#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
+#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38)
+
+#define TSC1 _MMIO(MCHBAR_MIRROR_BASE + 0x1001)
+#define TSE (1 << 0)
+#define TR1 _MMIO(MCHBAR_MIRROR_BASE + 0x1006)
+#define TSFS _MMIO(MCHBAR_MIRROR_BASE + 0x1020)
+#define TSFS_SLOPE_MASK 0x0000ff00
+#define TSFS_SLOPE_SHIFT 8
+#define TSFS_INTR_MASK 0x000000ff
+
+/* Memory latency timer register */
+#define MLTR_ILK _MMIO(MCHBAR_MIRROR_BASE + 0x1222)
+/* the unit of memory self-refresh latency time is 0.5us */
+#define MLTR_WM2_MASK REG_GENMASK(13, 8)
+#define MLTR_WM1_MASK REG_GENMASK(5, 0)
+
+#define CSIPLL0 _MMIO(MCHBAR_MIRROR_BASE + 0x2c10)
+#define DDRMPLL1 _MMIO(MCHBAR_MIRROR_BASE + 0x2c20)
+
+#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
+#define ILK_GRDOM_FULL (0 << 1)
+#define ILK_GRDOM_RENDER (1 << 1)
+#define ILK_GRDOM_MEDIA (3 << 1)
+#define ILK_GRDOM_MASK (3 << 1)
+#define ILK_GRDOM_RESET_ENABLE (1 << 0)
+
+#define BXT_D_CR_DRP0_DUNIT8 0x1000
+#define BXT_D_CR_DRP0_DUNIT9 0x1200
+#define BXT_D_CR_DRP0_DUNIT_START 8
+#define BXT_D_CR_DRP0_DUNIT_END 11
+#define BXT_D_CR_DRP0_DUNIT(x) _MMIO(MCHBAR_MIRROR_BASE_SNB + \
+ _PICK_EVEN((x) - 8, BXT_D_CR_DRP0_DUNIT8,\
+ BXT_D_CR_DRP0_DUNIT9))
+#define BXT_DRAM_RANK_MASK 0x3
+#define BXT_DRAM_RANK_SINGLE 0x1
+#define BXT_DRAM_RANK_DUAL 0x3
+#define BXT_DRAM_WIDTH_MASK (0x3 << 4)
+#define BXT_DRAM_WIDTH_SHIFT 4
+#define BXT_DRAM_WIDTH_X8 (0x0 << 4)
+#define BXT_DRAM_WIDTH_X16 (0x1 << 4)
+#define BXT_DRAM_WIDTH_X32 (0x2 << 4)
+#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
+#define BXT_DRAM_SIZE_MASK (0x7 << 6)
+#define BXT_DRAM_SIZE_SHIFT 6
+#define BXT_DRAM_SIZE_4GBIT (0x0 << 6)
+#define BXT_DRAM_SIZE_6GBIT (0x1 << 6)
+#define BXT_DRAM_SIZE_8GBIT (0x2 << 6)
+#define BXT_DRAM_SIZE_12GBIT (0x3 << 6)
+#define BXT_DRAM_SIZE_16GBIT (0x4 << 6)
+#define BXT_DRAM_TYPE_MASK (0x7 << 22)
+#define BXT_DRAM_TYPE_SHIFT 22
+#define BXT_DRAM_TYPE_DDR3 (0x0 << 22)
+#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22)
+#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
+#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
+
+#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
+#define DG1_DRAM_T_RDPRE_MASK REG_GENMASK(16, 11)
+#define DG1_DRAM_T_RP_MASK REG_GENMASK(6, 0)
+#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4004)
+#define DG1_DRAM_T_RCD_MASK REG_GENMASK(15, 9)
+#define DG1_DRAM_T_RAS_MASK REG_GENMASK(8, 1)
+
+#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
+#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0)
+
+/* snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define MAD_DIMM_ECC_MASK (0x3 << 24)
+#define MAD_DIMM_ECC_OFF (0x0 << 24)
+#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
+#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
+#define MAD_DIMM_ECC_ON (0x3 << 24)
+#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
+#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
+#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
+#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
+#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
+#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
+#define MAD_DIMM_A_SELECT (0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define MAD_DIMM_B_SIZE_SHIFT 8
+#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define MAD_DIMM_A_SIZE_SHIFT 0
+#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
+#define SKL_DRAM_S_SHIFT 16
+#define SKL_DRAM_SIZE_MASK 0x3F
+#define SKL_DRAM_WIDTH_MASK (0x3 << 8)
+#define SKL_DRAM_WIDTH_SHIFT 8
+#define SKL_DRAM_WIDTH_X8 (0x0 << 8)
+#define SKL_DRAM_WIDTH_X16 (0x1 << 8)
+#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
+#define SKL_DRAM_RANK_MASK (0x1 << 10)
+#define SKL_DRAM_RANK_SHIFT 10
+#define SKL_DRAM_RANK_1 (0x0 << 10)
+#define SKL_DRAM_RANK_2 (0x1 << 10)
+#define SKL_DRAM_RANK_MASK (0x1 << 10)
+#define ICL_DRAM_SIZE_MASK 0x7F
+#define ICL_DRAM_WIDTH_MASK (0x3 << 7)
+#define ICL_DRAM_WIDTH_SHIFT 7
+#define ICL_DRAM_WIDTH_X8 (0x0 << 7)
+#define ICL_DRAM_WIDTH_X16 (0x1 << 7)
+#define ICL_DRAM_WIDTH_X32 (0x2 << 7)
+#define ICL_DRAM_RANK_MASK (0x3 << 9)
+#define ICL_DRAM_RANK_SHIFT 9
+#define ICL_DRAM_RANK_1 (0x0 << 9)
+#define ICL_DRAM_RANK_2 (0x1 << 9)
+#define ICL_DRAM_RANK_3 (0x2 << 9)
+#define ICL_DRAM_RANK_4 (0x3 << 9)
+
+#define SA_PERF_STATUS_0_0_0_MCHBAR_PC _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5918)
+#define DG1_QCLK_RATIO_MASK REG_GENMASK(9, 2)
+#define DG1_QCLK_REFERENCE REG_BIT(10)
+
+#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
+#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define RP0_CAP_MASK REG_GENMASK(7, 0)
+#define RP1_CAP_MASK REG_GENMASK(15, 8)
+#define RPN_CAP_MASK REG_GENMASK(23, 16)
+
+/* snb MCH registers for priority tuning */
+#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
+#define SSKPD_NEW_WM0_MASK_HSW REG_GENMASK64(63, 56)
+#define SSKPD_WM4_MASK_HSW REG_GENMASK64(40, 32)
+#define SSKPD_WM3_MASK_HSW REG_GENMASK64(28, 20)
+#define SSKPD_WM2_MASK_HSW REG_GENMASK64(19, 12)
+#define SSKPD_WM1_MASK_HSW REG_GENMASK64(11, 4)
+#define SSKPD_OLD_WM0_MASK_HSW REG_GENMASK64(3, 0)
+#define SSKPD_WM3_MASK_SNB REG_GENMASK(29, 24)
+#define SSKPD_WM2_MASK_SNB REG_GENMASK(21, 16)
+#define SSKPD_WM1_MASK_SNB REG_GENMASK(13, 8)
+#define SSKPD_WM0_MASK_SNB REG_GENMASK(5, 0)
+
+/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
+#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
+#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
+#define DG1_GEAR_TYPE REG_BIT(16)
+
+/*
+ * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using intel_uncore_write.
+ */
+#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5f0c)
+#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
+#define D_COMP_COMP_FORCE (1 << 8)
+#define D_COMP_COMP_DISABLE (1 << 0)
+
+#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070)
+
+#endif /* __INTEL_MCHBAR_REGS */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 23d4bb011fc8..113537343461 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -50,11 +50,16 @@
#include "i915_drv.h"
#include "i915_fixed.h"
#include "i915_irq.h"
+#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_pm.h"
#include "vlv_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+struct drm_i915_clock_gating_funcs {
+ void (*init_clock_gating)(struct drm_i915_private *i915);
+};
+
/* Stores plane specific WM parameters */
struct skl_wm_params {
bool x_tiled, y_tiled;
@@ -2942,27 +2947,27 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
- wm[0] = (sskpd >> 56) & 0xFF;
+ wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
if (wm[0] == 0)
- wm[0] = sskpd & 0xF;
- wm[1] = (sskpd >> 4) & 0xFF;
- wm[2] = (sskpd >> 12) & 0xFF;
- wm[3] = (sskpd >> 20) & 0x1FF;
- wm[4] = (sskpd >> 32) & 0x1FF;
+ wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
+ wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
+ wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
+ wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
+ wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
} else if (DISPLAY_VER(dev_priv) >= 6) {
u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
- wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
- wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
- wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
- wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
+ wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
+ wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
+ wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
+ wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
} else if (DISPLAY_VER(dev_priv) >= 5) {
u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
wm[0] = 7;
- wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
- wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
+ wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
+ wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
} else {
MISSING_CASE(INTEL_DEVID(dev_priv));
}
@@ -3175,12 +3180,8 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
}
pipe_wm->pipe_enabled = crtc_state->hw.active;
- if (sprstate) {
- pipe_wm->sprites_enabled = sprstate->uapi.visible;
- pipe_wm->sprites_scaled = sprstate->uapi.visible &&
- (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
- drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
- }
+ pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
+ pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
usable_level = max_level;
@@ -3409,29 +3410,28 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
- (r->pri_val << WM1_LP_SR_SHIFT) |
- r->cur_val;
+ WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_PRIMARY(r->pri_val) |
+ WM_LP_CURSOR(r->cur_val);
if (r->enable)
- results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
+ results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
if (DISPLAY_VER(dev_priv) >= 8)
- results->wm_lp[wm_lp - 1] |=
- r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
else
- results->wm_lp[wm_lp - 1] |=
- r->fbc_val << WM1_LP_FBC_SHIFT;
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
+
+ results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
/*
- * Always set WM1S_LP_EN when spr_val != 0, even if the
+ * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
- results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
- } else
- results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+ results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
+ }
}
/* LP0 register values */
@@ -3444,9 +3444,9 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
continue;
results->wm_pipe[pipe] =
- (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
- (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
- r->cur_val;
+ WM0_PIPE_PRIMARY(r->pri_val) |
+ WM0_PIPE_SPRITE(r->spr_val) |
+ WM0_PIPE_CURSOR(r->cur_val);
}
}
@@ -3538,24 +3538,24 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
struct ilk_wm_values *previous = &dev_priv->wm.hw;
bool changed = false;
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
- previous->wm_lp[2] &= ~WM1_LP_SR_EN;
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
+ previous->wm_lp[2] &= ~WM_LP_ENABLE;
intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
- previous->wm_lp[1] &= ~WM1_LP_SR_EN;
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
+ previous->wm_lp[1] &= ~WM_LP_ENABLE;
intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
- previous->wm_lp[0] &= ~WM1_LP_SR_EN;
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
+ previous->wm_lp[0] &= ~WM_LP_ENABLE;
intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
/*
- * Don't touch WM1S_LP_EN here.
+ * Don't touch WM_LP_SPRITE_ENABLE here.
* Doing so could cause underruns.
*/
@@ -3802,8 +3802,9 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
if (!new_bw_state)
return;
- if (DISPLAY_VER(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
- intel_disable_sagv(dev_priv);
+ if (DISPLAY_VER(dev_priv) < 11) {
+ if (!intel_can_enable_sagv(dev_priv, new_bw_state))
+ intel_disable_sagv(dev_priv);
return;
}
@@ -3853,8 +3854,9 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
if (!new_bw_state)
return;
- if (DISPLAY_VER(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
- intel_enable_sagv(dev_priv);
+ if (DISPLAY_VER(dev_priv) < 11) {
+ if (intel_can_enable_sagv(dev_priv, new_bw_state))
+ intel_enable_sagv(dev_priv);
return;
}
@@ -4034,6 +4036,15 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return 0;
}
+static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
+ u16 start, u16 end)
+{
+ entry->start = start;
+ entry->end = end;
+
+ return end;
+}
+
static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->dbuf.size /
@@ -4172,8 +4183,7 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
int ret;
if (new_dbuf_state->weight[pipe] == 0) {
- new_dbuf_state->ddb[pipe].start = 0;
- new_dbuf_state->ddb[pipe].end = 0;
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
goto out;
}
@@ -4189,8 +4199,10 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
start = ddb_range_size * weight_start / weight_total;
end = ddb_range_size * weight_end / weight_total;
- new_dbuf_state->ddb[pipe].start = ddb_slices.start - mbus_offset + start;
- new_dbuf_state->ddb[pipe].end = ddb_slices.start - mbus_offset + end;
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
+ ddb_slices.start - mbus_offset + start,
+ ddb_slices.start - mbus_offset + end);
+
out:
if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
@@ -4268,11 +4280,11 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
}
-static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
- struct skl_ddb_entry *entry, u32 reg)
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
{
- entry->start = REG_FIELD_GET(PLANE_BUF_START_MASK, reg);
- entry->end = REG_FIELD_GET(PLANE_BUF_END_MASK, reg);
+ skl_ddb_entry_init(entry,
+ REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
+ REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
if (entry->end)
entry->end++;
}
@@ -4290,7 +4302,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(ddb_y, val);
return;
}
@@ -4304,7 +4316,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) >= 11) {
val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(ddb_y, val);
} else {
val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
@@ -4313,8 +4325,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
swap(val, val2);
- skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
- skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
+ skl_ddb_entry_init_from_hw(ddb_y, val);
+ skl_ddb_entry_init_from_hw(ddb_uv, val2);
}
}
@@ -4342,55 +4354,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
intel_display_power_put(dev_priv, power_domain, wakeref);
}
-/*
- * Determines the downscale amount of a plane for the purposes of watermark calculations.
- * The bspec defines downscale amount as:
- *
- * """
- * Horizontal down scale amount = maximum[1, Horizontal source size /
- * Horizontal destination size]
- * Vertical down scale amount = maximum[1, Vertical source size /
- * Vertical destination size]
- * Total down scale amount = Horizontal down scale amount *
- * Vertical down scale amount
- * """
- *
- * Return value is provided in 16.16 fixed point form to retain fractional part.
- * Caller should take care of dividing & rounding off the value.
- */
-static uint_fixed_16_16_t
-skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 src_w, src_h, dst_w, dst_h;
- uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
- uint_fixed_16_16_t downscale_h, downscale_w;
-
- if (drm_WARN_ON(&dev_priv->drm,
- !intel_wm_plane_visible(crtc_state, plane_state)))
- return u32_to_fixed16(0);
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- *
- * n.b., src is 16.16 fixed point, dst is whole integer.
- */
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- dst_w = drm_rect_width(&plane_state->uapi.dst);
- dst_h = drm_rect_height(&plane_state->uapi.dst);
-
- fp_w_ratio = div_fixed16(src_w, dst_w);
- fp_h_ratio = div_fixed16(src_h, dst_h);
- downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
- downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
-
- return mul_fixed16(downscale_w, downscale_h);
-}
-
struct dbuf_slice_conf_entry {
u8 active_pipes;
u8 dbuf_mask[I915_MAX_PIPES];
@@ -4946,10 +4909,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- u32 data_rate;
- u32 width = 0, height = 0;
- uint_fixed_16_16_t down_scale_amount;
- u64 rate;
+ int width, height;
if (!plane_state->uapi.visible)
return 0;
@@ -4983,14 +4943,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
height /= 2;
}
- data_rate = width * height;
-
- down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
-
- rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
-
- rate *= fb->format->cpp[color_plane];
- return rate;
+ return width * height * fb->format->cpp[color_plane];
}
static u64
@@ -5147,9 +5100,31 @@ static bool icl_need_wm1_wa(struct drm_i915_private *i915,
(IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
}
+struct skl_plane_ddb_iter {
+ u64 data_rate;
+ u16 total[I915_MAX_PLANES];
+ u16 uv_total[I915_MAX_PLANES];
+ u16 start, size;
+};
+
+static u16
+skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
+ const struct skl_wm_level *wm,
+ u64 data_rate)
+{
+ u16 extra;
+
+ extra = min_t(u16, iter->size,
+ DIV64_U64_ROUND_UP(iter->size * data_rate, iter->data_rate));
+ iter->size -= extra;
+ iter->data_rate -= data_rate;
+
+ return wm->min_ddb_alloc + extra;
+}
+
static int
-skl_allocate_plane_ddb(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
@@ -5158,10 +5133,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
intel_atomic_get_new_dbuf_state(state);
const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
int num_active = hweight8(dbuf_state->active_pipes);
- u16 alloc_size, start = 0;
- u16 total[I915_MAX_PLANES] = {};
- u16 uv_total[I915_MAX_PLANES] = {};
- u64 total_data_rate;
+ struct skl_plane_ddb_iter iter = {};
enum plane_id plane_id;
u32 blocks;
int level;
@@ -5174,24 +5146,21 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
return 0;
if (DISPLAY_VER(dev_priv) >= 11)
- total_data_rate =
- icl_get_total_relative_data_rate(state, crtc);
+ iter.data_rate = icl_get_total_relative_data_rate(state, crtc);
else
- total_data_rate =
- skl_get_total_relative_data_rate(state, crtc);
+ iter.data_rate = skl_get_total_relative_data_rate(state, crtc);
- alloc_size = skl_ddb_entry_size(alloc);
- if (alloc_size == 0)
+ iter.size = skl_ddb_entry_size(alloc);
+ if (iter.size == 0)
return 0;
/* Allocate fixed number of blocks for cursor. */
- total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
- alloc_size -= total[PLANE_CURSOR];
- crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
- alloc->end - total[PLANE_CURSOR];
- crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+ iter.total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= iter.total[PLANE_CURSOR];
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR],
+ alloc->end - iter.total[PLANE_CURSOR], alloc->end);
- if (total_data_rate == 0)
+ if (iter.data_rate == 0)
return 0;
/*
@@ -5205,7 +5174,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
- if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
+ if (wm->wm[level].min_ddb_alloc > iter.total[PLANE_CURSOR]) {
drm_WARN_ON(&dev_priv->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
@@ -5218,8 +5187,8 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
blocks += wm->uv_wm[level].min_ddb_alloc;
}
- if (blocks <= alloc_size) {
- alloc_size -= blocks;
+ if (blocks <= iter.size) {
+ iter.size -= blocks;
break;
}
}
@@ -5228,7 +5197,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
drm_dbg_kms(&dev_priv->drm,
"Requested display configuration exceeds system DDB limitations");
drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
- blocks, alloc_size);
+ blocks, iter.size);
return -EINVAL;
}
@@ -5240,8 +5209,6 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- u64 rate;
- u16 extra;
if (plane_id == PLANE_CURSOR)
continue;
@@ -5250,32 +5217,24 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
* We've accounted for all active planes; remaining planes are
* all disabled.
*/
- if (total_data_rate == 0)
+ if (iter.data_rate == 0)
break;
- rate = crtc_state->plane_data_rate[plane_id];
- extra = min_t(u16, alloc_size,
- DIV64_U64_ROUND_UP(alloc_size * rate,
- total_data_rate));
- total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
- alloc_size -= extra;
- total_data_rate -= rate;
+ iter.total[plane_id] =
+ skl_allocate_plane_ddb(&iter, &wm->wm[level],
+ crtc_state->plane_data_rate[plane_id]);
- if (total_data_rate == 0)
+ if (iter.data_rate == 0)
break;
- rate = crtc_state->uv_plane_data_rate[plane_id];
- extra = min_t(u16, alloc_size,
- DIV64_U64_ROUND_UP(alloc_size * rate,
- total_data_rate));
- uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
- alloc_size -= extra;
- total_data_rate -= rate;
+ iter.uv_total[plane_id] =
+ skl_allocate_plane_ddb(&iter, &wm->uv_wm[level],
+ crtc_state->uv_plane_data_rate[plane_id]);
}
- drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
+ drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
/* Set the actual DDB start/end points for each plane */
- start = alloc->start;
+ iter.start = alloc->start;
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
@@ -5287,20 +5246,16 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
/* Gen11+ uses a separate plane for UV watermarks */
drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 11 && uv_total[plane_id]);
+ DISPLAY_VER(dev_priv) >= 11 && iter.uv_total[plane_id]);
/* Leave disabled planes at (0,0) */
- if (total[plane_id]) {
- plane_alloc->start = start;
- start += total[plane_id];
- plane_alloc->end = start;
- }
+ if (iter.total[plane_id])
+ iter.start = skl_ddb_entry_init(plane_alloc, iter.start,
+ iter.start + iter.total[plane_id]);
- if (uv_total[plane_id]) {
- uv_plane_alloc->start = start;
- start += uv_total[plane_id];
- uv_plane_alloc->end = start;
- }
+ if (iter.uv_total[plane_id])
+ iter.start = skl_ddb_entry_init(uv_plane_alloc, iter.start,
+ iter.start + iter.uv_total[plane_id]);
}
/*
@@ -5315,7 +5270,8 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.optimal.planes[plane_id];
skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level],
- total[plane_id], uv_total[plane_id]);
+ iter.total[plane_id],
+ iter.uv_total[plane_id]);
if (icl_need_wm1_wa(dev_priv, plane_id) &&
level == 1 && wm->wm[0].enable) {
@@ -5334,9 +5290,9 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- skl_check_wm_level(&wm->trans_wm, total[plane_id]);
- skl_check_wm_level(&wm->sagv.wm0, total[plane_id]);
- skl_check_wm_level(&wm->sagv.trans_wm, total[plane_id]);
+ skl_check_wm_level(&wm->trans_wm, iter.total[plane_id]);
+ skl_check_wm_level(&wm->sagv.wm0, iter.total[plane_id]);
+ skl_check_wm_level(&wm->sagv.trans_wm, iter.total[plane_id]);
}
return 0;
@@ -6226,7 +6182,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- ret = skl_allocate_plane_ddb(state, crtc);
+ ret = skl_crtc_allocate_plane_ddb(state, crtc);
if (ret)
return ret;
@@ -6803,9 +6759,9 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
* multiple pipes are active.
*/
active->wm[0].enable = true;
- active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
- active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
- active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+ active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
+ active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
+ active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
} else {
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -7229,12 +7185,12 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
*/
static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN);
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN);
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM_LP_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM_LP_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM_LP_ENABLE);
/*
- * Don't touch WM1S_LP_EN here.
+ * Don't touch WM_LP_SPRITE_ENABLE here.
* Doing so could cause underruns.
*/
}
@@ -7437,7 +7393,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
u32 tmp;
tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
- if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
+ if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12)
drm_dbg_kms(&dev_priv->drm,
"Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
tmp);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 3293ac71bcf8..6ed5786bcd29 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -77,6 +77,9 @@ track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
depot_stack_handle_t stack, *stacks;
unsigned long flags;
+ if (rpm->no_wakeref_tracking)
+ return -1;
+
stack = __save_depot_stack();
if (!stack)
return -1;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 47a85fab4130..d9160e3ff4af 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -51,6 +51,7 @@ struct intel_runtime_pm {
bool available;
bool suspended;
bool irqs_enabled;
+ bool no_wakeref_tracking;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
/*
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 850ebfae31af..dd8fdd5863de 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1496,7 +1496,7 @@ ilk_dummy_write(struct intel_uncore *uncore)
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
- __raw_uncore_write32(uncore, MI_MODE, 0);
+ __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
}
static void
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index b5576888cd78..6a7328d9c361 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -6,9 +6,10 @@
#include <linux/random.h>
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_pm.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
-#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 75b709c26dd3..74a1b2ecf48f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -22,6 +22,7 @@
*
*/
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 575705c3bce9..1b508c89468c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -26,6 +26,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_context.h"
#include "gt/intel_gpu_commands.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 92a859b34190..3e673fd1ea4f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -26,6 +26,7 @@
#include <linux/pm_qos.h>
#include <linux/sort.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 2d6d7bd13c3c..c4e932368b37 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -24,6 +24,7 @@
#include <linux/random.h>
#include "gt/intel_gt_pm.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 5c5809dfe9b2..cc6f9af679fb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -25,6 +25,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
#include "gem/selftests/mock_context.h"
#include "i915_scatterlist.h"
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 24d87d0fc747..0c22594ae274 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -6,6 +6,7 @@
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
+#include "gem/i915_gem_internal.h"
#include "gem/selftests/igt_gem_utils.h"
#include "igt_spinner.h"
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 8aa7b1d33865..79e819334b4e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -161,6 +161,8 @@ struct drm_i915_private *mock_gem_device(void)
i915_params_copy(&i915->params, &i915_modparams);
intel_runtime_pm_init_early(&i915->runtime_pm);
+ /* wakeref tracking has significant overhead */
+ i915->runtime_pm.no_wakeref_tracking = true;
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
diff --git a/include/drm/dp/drm_dp_helper.h b/include/drm/dp/drm_dp_helper.h
index 98d020835b49..69487bd8ed56 100644
--- a/include/drm/dp/drm_dp_helper.h
+++ b/include/drm/dp/drm_dp_helper.h
@@ -560,6 +560,7 @@ struct drm_panel;
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_2_CDS 3 /* 2.0 E11 */
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_4 7 /* 1.4 */
# define DP_TRAINING_PATTERN_MASK 0x3
@@ -738,11 +739,13 @@ struct drm_panel;
DP_LANE_CHANNEL_EQ_DONE | \
DP_LANE_SYMBOL_LOCKED)
-#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
-
-#define DP_INTERLANE_ALIGN_DONE (1 << 0)
-#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
-#define DP_LINK_STATUS_UPDATED (1 << 7)
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */
+#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */
+#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
#define DP_SINK_STATUS 0x205
# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
@@ -1112,6 +1115,7 @@ struct drm_panel;
# define DP_UHBR13_5 (1 << 2)
#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT (1 << 7)
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01
@@ -1347,6 +1351,7 @@ struct drm_panel;
# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */
+#define DP_PHY_REPEATER_EQ_DONE 0xf0008 /* 2.0 E11 */
enum drm_dp_phy {
DP_PHY_DPRX,
@@ -1549,6 +1554,15 @@ void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux);
+bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]);
+
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);