summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c1457
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h189
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h21
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c313
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c210
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h67
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c184
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.h56
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c60
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c270
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c296
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h95
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c246
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c69
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c84
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c76
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c155
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c27
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c357
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c114
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c68
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c106
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_query.c125
-rw-r--r--drivers/gpu/drm/i915/i915_query.h15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h118
-rw-r--r--drivers/gpu/drm/i915/i915_request.c (renamed from drivers/gpu/drm/i915/i915_gem_request.c)426
-rw-r--r--drivers/gpu/drm/i915/i915_request.h (renamed from drivers/gpu/drm/i915/i915_gem_request.h)222
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c16
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h128
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h2
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c5
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c1
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c107
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c304
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c239
-rw-r--r--drivers/gpu/drm/i915/intel_color.c97
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c17
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c235
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c290
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h80
-rw-r--r--drivers/gpu/drm/i915/intel_display.c271
-rw-r--r--drivers/gpu/drm/i915/intel_display.h12
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c749
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h41
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c28
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c247
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c118
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c13
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c44
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c10
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c6
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c197
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c25
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c181
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c166
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.h15
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c477
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h3
-rw-r--r--drivers/gpu/drm/i915/intel_lrc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c44
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c30
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c85
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c113
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c185
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c226
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h125
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c164
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c28
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c47
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c180
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h28
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_request.c)125
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c119
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h8
135 files changed, 7114 insertions, 4489 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3bddd8a06806..4eee91a3a236 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -17,6 +17,7 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
subdir-ccflags-y += $(call cc-disable-warning, type-limits)
subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
+subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
@@ -62,13 +63,14 @@ i915-y += i915_cmd_parser.o \
i915_gem.o \
i915_gem_object.o \
i915_gem_render_state.o \
- i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_timeline.o \
i915_gem_userptr.o \
i915_gemfs.o \
+ i915_query.o \
+ i915_request.o \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
@@ -88,7 +90,8 @@ i915-y += intel_uc.o \
intel_guc_fw.o \
intel_guc_log.o \
intel_guc_submission.o \
- intel_huc.o
+ intel_huc.o \
+ intel_huc_fw.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 4950b82f5b49..c73aff163908 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -59,28 +59,28 @@
* This must not be set while VR01_DVO_BYPASS_ENABLE is set.
*/
# define VR01_LCD_ENABLE (1 << 2)
-/** Enables the DVO repeater. */
+/* Enables the DVO repeater. */
# define VR01_DVO_BYPASS_ENABLE (1 << 1)
-/** Enables the DVO clock */
+/* Enables the DVO clock */
# define VR01_DVO_ENABLE (1 << 0)
-/** Enable dithering for 18bpp panels. Not documented. */
+/* Enable dithering for 18bpp panels. Not documented. */
# define VR01_DITHER_ENABLE (1 << 4)
/*
* LCD Interface Format
*/
#define VR10 0x10
-/** Enables LVDS output instead of CMOS */
+/* Enables LVDS output instead of CMOS */
# define VR10_LVDS_ENABLE (1 << 4)
-/** Enables 18-bit LVDS output. */
+/* Enables 18-bit LVDS output. */
# define VR10_INTERFACE_1X18 (0 << 2)
-/** Enables 24-bit LVDS or CMOS output */
+/* Enables 24-bit LVDS or CMOS output */
# define VR10_INTERFACE_1X24 (1 << 2)
-/** Enables 2x18-bit LVDS or CMOS output. */
+/* Enables 2x18-bit LVDS or CMOS output. */
# define VR10_INTERFACE_2X18 (2 << 2)
-/** Enables 2x24-bit LVDS output */
+/* Enables 2x24-bit LVDS output */
# define VR10_INTERFACE_2X24 (3 << 2)
-/** Mask that defines the depth of the pipeline */
+/* Mask that defines the depth of the pipeline */
# define VR10_INTERFACE_DEPTH_MASK (3 << 2)
/*
@@ -97,7 +97,7 @@
* Panel power down status
*/
#define VR30 0x30
-/** Read only bit indicating that the panel is not in a safe poweroff state. */
+/* Read only bit indicating that the panel is not in a safe poweroff state. */
# define VR30_PANEL_ON (1 << 15)
#define VR40 0x40
@@ -183,7 +183,7 @@ struct ivch_priv {
static void ivch_dump_regs(struct intel_dvo_device *dvo);
-/**
+/*
* Reads a register on the ivch.
*
* Each of the 256 registers are 16 bits long.
@@ -230,7 +230,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
return false;
}
-/** Writes a 16-bit register on the ivch */
+/* Writes a 16-bit register on the ivch */
static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
@@ -258,7 +258,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
return false;
}
-/** Probes the given bus and slave address for an ivch */
+/* Probes the given bus and slave address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
@@ -338,7 +338,7 @@ static void ivch_reset(struct intel_dvo_device *dvo)
ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
}
-/** Sets the power state of the panel connected to the ivch */
+/* Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 347116faa558..b016dc753db9 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -3,7 +3,7 @@ GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
- fb_decoder.o dmabuf.o
+ fb_decoder.o dmabuf.o page_track.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 2fb7b34ef561..b555eb26f9ce 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -162,8 +162,8 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
info->size << PAGE_SHIFT);
i915_gem_object_init(obj, &intel_vgpu_gem_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
@@ -459,7 +459,7 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
obj = vgpu_create_gem(dev, dmabuf_obj->info);
if (obj == NULL) {
- gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id);
+ gvt_vgpu_err("create gvt gem obj failed\n");
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 8d5317d0122d..0a100a288e6d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -38,6 +38,12 @@
#include "i915_pvinfo.h"
#include "trace.h"
+#if defined(VERBOSE_DEBUG)
+#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
+#else
+#define gvt_vdbg_mm(fmt, args...)
+#endif
+
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
@@ -264,7 +270,7 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
return readq(addr);
}
-static void gtt_invalidate(struct drm_i915_private *dev_priv)
+static void ggtt_invalidate(struct drm_i915_private *dev_priv)
{
mmio_hw_access_pre(dev_priv);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
@@ -331,20 +337,20 @@ static inline int gtt_set_entry64(void *pt,
#define GTT_HAW 46
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
+#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
+#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
+#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
unsigned long pfn;
if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
- pfn = (e->val64 & ADDR_1G_MASK) >> 12;
+ pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
- pfn = (e->val64 & ADDR_2M_MASK) >> 12;
+ pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
else
- pfn = (e->val64 & ADDR_4K_MASK) >> 12;
+ pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
return pfn;
}
@@ -352,16 +358,16 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
{
if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
e->val64 &= ~ADDR_1G_MASK;
- pfn &= (ADDR_1G_MASK >> 12);
+ pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
e->val64 &= ~ADDR_2M_MASK;
- pfn &= (ADDR_2M_MASK >> 12);
+ pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
} else {
e->val64 &= ~ADDR_4K_MASK;
- pfn &= (ADDR_4K_MASK >> 12);
+ pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
}
- e->val64 |= (pfn << 12);
+ e->val64 |= (pfn << PAGE_SHIFT);
}
static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
@@ -371,7 +377,7 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
return false;
e->type = get_entry_type(e->type);
- if (!(e->val64 & BIT(7)))
+ if (!(e->val64 & _PAGE_PSE))
return false;
e->type = get_pse_type(e->type);
@@ -389,17 +395,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
return (e->val64 != 0);
else
- return (e->val64 & BIT(0));
+ return (e->val64 & _PAGE_PRESENT);
}
static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
{
- e->val64 &= ~BIT(0);
+ e->val64 &= ~_PAGE_PRESENT;
}
static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
{
- e->val64 |= BIT(0);
+ e->val64 |= _PAGE_PRESENT;
}
/*
@@ -447,58 +453,91 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
.gma_to_pml4_index = gen8_gma_to_pml4_index,
};
-static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
- struct intel_gvt_gtt_entry *m)
+/*
+ * MM helpers.
+ */
+static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index,
+ bool guest)
{
- struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- unsigned long gfn, mfn;
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
- *m = *p;
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
- if (!ops->test_present(p))
- return 0;
+ entry->type = mm->ppgtt_mm.root_entry_type;
+ pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
+ mm->ppgtt_mm.shadow_pdps,
+ entry, index, false, 0, mm->vgpu);
- gfn = ops->get_pfn(p);
+ pte_ops->test_pse(entry);
+}
- mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
- if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
- return -ENXIO;
- }
+static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ _ppgtt_get_root_entry(mm, entry, index, true);
+}
- ops->set_pfn(m, mfn);
- return 0;
+static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ _ppgtt_get_root_entry(mm, entry, index, false);
}
-/*
- * MM helpers.
- */
-int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
- void *page_table, struct intel_gvt_gtt_entry *e,
- unsigned long index)
+static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index,
+ bool guest)
{
- struct intel_gvt *gvt = mm->vgpu->gvt;
- struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
- int ret;
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
- e->type = mm->page_table_entry_type;
+ pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
+ mm->ppgtt_mm.shadow_pdps,
+ entry, index, false, 0, mm->vgpu);
+}
- ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
- if (ret)
- return ret;
+static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ _ppgtt_set_root_entry(mm, entry, index, true);
+}
- ops->test_pse(e);
- return 0;
+static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ _ppgtt_set_root_entry(mm, entry, index, false);
}
-int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
- void *page_table, struct intel_gvt_gtt_entry *e,
- unsigned long index)
+static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
{
- struct intel_gvt *gvt = mm->vgpu->gvt;
- struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+ entry->type = GTT_TYPE_GGTT_PTE;
+ pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
+ false, 0, mm->vgpu);
+}
+
+static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+ pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
+ false, 0, mm->vgpu);
+}
+
+static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
- return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
+ pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
}
/*
@@ -520,12 +559,15 @@ static inline int ppgtt_spt_get_entry(
return -EINVAL;
ret = ops->get_entry(page_table, e, index, guest,
- spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
+ spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
if (ret)
return ret;
ops->test_pse(e);
+
+ gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
+ type, e->type, index, e->val64);
return 0;
}
@@ -541,18 +583,21 @@ static inline int ppgtt_spt_set_entry(
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
return -EINVAL;
+ gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
+ type, e->type, index, e->val64);
+
return ops->set_entry(page_table, e, index, guest,
- spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
+ spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
}
#define ppgtt_get_guest_entry(spt, e, index) \
ppgtt_spt_get_entry(spt, NULL, \
- spt->guest_page_type, e, index, true)
+ spt->guest_page.type, e, index, true)
#define ppgtt_set_guest_entry(spt, e, index) \
ppgtt_spt_set_entry(spt, NULL, \
- spt->guest_page_type, e, index, true)
+ spt->guest_page.type, e, index, true)
#define ppgtt_get_shadow_entry(spt, e, index) \
ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
@@ -562,159 +607,6 @@ static inline int ppgtt_spt_set_entry(
ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
spt->shadow_page.type, e, index, false)
-/**
- * intel_vgpu_init_page_track - init a page track data structure
- * @vgpu: a vGPU
- * @t: a page track data structure
- * @gfn: guest memory page frame number
- * @handler: the function will be called when target guest memory page has
- * been modified.
- *
- * This function is called when a user wants to prepare a page track data
- * structure to track a guest memory page.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
- struct intel_vgpu_page_track *t,
- unsigned long gfn,
- int (*handler)(void *, u64, void *, int),
- void *data)
-{
- INIT_HLIST_NODE(&t->node);
-
- t->tracked = false;
- t->gfn = gfn;
- t->handler = handler;
- t->data = data;
-
- hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
- return 0;
-}
-
-/**
- * intel_vgpu_clean_page_track - release a page track data structure
- * @vgpu: a vGPU
- * @t: a page track data structure
- *
- * This function is called before a user frees a page track data structure.
- */
-void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
- struct intel_vgpu_page_track *t)
-{
- if (!hlist_unhashed(&t->node))
- hash_del(&t->node);
-
- if (t->tracked)
- intel_gvt_hypervisor_disable_page_track(vgpu, t);
-}
-
-/**
- * intel_vgpu_find_tracked_page - find a tracked guest page
- * @vgpu: a vGPU
- * @gfn: guest memory page frame number
- *
- * This function is called when the emulation layer wants to figure out if a
- * trapped GFN is a tracked guest page.
- *
- * Returns:
- * Pointer to page track data structure, NULL if not found.
- */
-struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- struct intel_vgpu_page_track *t;
-
- hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
- t, node, gfn) {
- if (t->gfn == gfn)
- return t;
- }
- return NULL;
-}
-
-static int init_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p,
- unsigned long gfn,
- int (*handler)(void *, u64, void *, int),
- void *data)
-{
- p->oos_page = NULL;
- p->write_cnt = 0;
-
- return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
-}
-
-static int detach_oos_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_oos_page *oos_page);
-
-static void clean_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p)
-{
- if (p->oos_page)
- detach_oos_page(vgpu, p->oos_page);
-
- intel_vgpu_clean_page_track(vgpu, &p->track);
-}
-
-static inline int init_shadow_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_shadow_page *p, int type, bool hash)
-{
- struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- dma_addr_t daddr;
-
- daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(kdev, daddr)) {
- gvt_vgpu_err("fail to map dma addr\n");
- return -EINVAL;
- }
-
- p->vaddr = page_address(p->page);
- p->type = type;
-
- INIT_HLIST_NODE(&p->node);
-
- p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
- if (hash)
- hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
- return 0;
-}
-
-static inline void clean_shadow_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_shadow_page *p)
-{
- struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-
- dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
- PCI_DMA_BIDIRECTIONAL);
-
- if (!hlist_unhashed(&p->node))
- hash_del(&p->node);
-}
-
-static inline struct intel_vgpu_shadow_page *find_shadow_page(
- struct intel_vgpu *vgpu, unsigned long mfn)
-{
- struct intel_vgpu_shadow_page *p;
-
- hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
- p, node, mfn) {
- if (p->mfn == mfn)
- return p;
- }
- return NULL;
-}
-
-#define page_track_to_guest_page(ptr) \
- container_of(ptr, struct intel_vgpu_guest_page, track)
-
-#define guest_page_to_ppgtt_spt(ptr) \
- container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
-
-#define shadow_page_to_ppgtt_spt(ptr) \
- container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
-
static void *alloc_spt(gfp_t gfp_mask)
{
struct intel_vgpu_ppgtt_spt *spt;
@@ -737,63 +629,96 @@ static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
kfree(spt);
}
-static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page);
+
+static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{
- trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
+ struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
- clean_shadow_page(spt->vgpu, &spt->shadow_page);
- clean_guest_page(spt->vgpu, &spt->guest_page);
- list_del_init(&spt->post_shadow_list);
+ trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
+
+ dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
+ PCI_DMA_BIDIRECTIONAL);
+ radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
+
+ if (spt->guest_page.oos_page)
+ detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
+
+ intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+
+ list_del_init(&spt->post_shadow_list);
free_spt(spt);
}
-static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
+static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
{
- struct hlist_node *n;
- struct intel_vgpu_shadow_page *sp;
- int i;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct radix_tree_iter iter;
+ void **slot;
- hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
- ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
+ radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
+ spt = radix_tree_deref_slot(slot);
+ ppgtt_free_spt(spt);
+ }
}
static int ppgtt_handle_guest_write_page_table_bytes(
- struct intel_vgpu_guest_page *gpt,
+ struct intel_vgpu_ppgtt_spt *spt,
u64 pa, void *p_data, int bytes);
-static int ppgtt_write_protection_handler(void *data, u64 pa,
- void *p_data, int bytes)
+static int ppgtt_write_protection_handler(
+ struct intel_vgpu_page_track *page_track,
+ u64 gpa, void *data, int bytes)
{
- struct intel_vgpu_page_track *t = data;
- struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
+ struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
+
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
- if (!t->tracked)
- return -EINVAL;
-
- ret = ppgtt_handle_guest_write_page_table_bytes(p,
- pa, p_data, bytes);
+ ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
if (ret)
return ret;
return ret;
}
-static int reclaim_one_mm(struct intel_gvt *gvt);
+/* Find a spt by guest gfn. */
+static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_page_track *track;
+
+ track = intel_vgpu_find_page_track(vgpu, gfn);
+ if (track && track->handler == ppgtt_write_protection_handler)
+ return track->priv_data;
-static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
+ return NULL;
+}
+
+/* Find the spt by shadow page mfn. */
+static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
+}
+
+static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, int type, unsigned long gfn)
{
+ struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
+ dma_addr_t daddr;
int ret;
retry:
spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
if (!spt) {
- if (reclaim_one_mm(vgpu->gvt))
+ if (reclaim_one_ppgtt_mm(vgpu->gvt))
goto retry;
gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
@@ -801,44 +726,48 @@ retry:
}
spt->vgpu = vgpu;
- spt->guest_page_type = type;
atomic_set(&spt->refcount, 1);
INIT_LIST_HEAD(&spt->post_shadow_list);
/*
- * TODO: guest page type may be different with shadow page type,
- * when we support PSE page in future.
+ * Init shadow_page.
*/
- ret = init_shadow_page(vgpu, &spt->shadow_page, type, true);
- if (ret) {
- gvt_vgpu_err("fail to initialize shadow page for spt\n");
- goto err;
+ spt->shadow_page.type = type;
+ daddr = dma_map_page(kdev, spt->shadow_page.page,
+ 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev, daddr)) {
+ gvt_vgpu_err("fail to map dma addr\n");
+ ret = -EINVAL;
+ goto err_free_spt;
}
+ spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
+ spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
- ret = init_guest_page(vgpu, &spt->guest_page,
- gfn, ppgtt_write_protection_handler, NULL);
- if (ret) {
- gvt_vgpu_err("fail to initialize guest page for spt\n");
- goto err;
- }
+ /*
+ * Init guest_page.
+ */
+ spt->guest_page.type = type;
+ spt->guest_page.gfn = gfn;
- trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
- return spt;
-err:
- ppgtt_free_shadow_page(spt);
- return ERR_PTR(ret);
-}
+ ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
+ ppgtt_write_protection_handler, spt);
+ if (ret)
+ goto err_unmap_dma;
-static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
- struct intel_vgpu *vgpu, unsigned long mfn)
-{
- struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
+ ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
+ if (ret)
+ goto err_unreg_page_track;
- if (p)
- return shadow_page_to_ppgtt_spt(p);
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+ return spt;
- gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
- return NULL;
+err_unreg_page_track:
+ intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
+err_unmap_dma:
+ dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+err_free_spt:
+ free_spt(spt);
+ return ERR_PTR(ret);
}
#define pt_entry_size_shift(spt) \
@@ -857,7 +786,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
if (!ppgtt_get_shadow_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
-static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
{
int v = atomic_read(&spt->refcount);
@@ -866,17 +795,16 @@ static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
atomic_inc(&spt->refcount);
}
-static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
-static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
+static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *e)
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
intel_gvt_gtt_type_t cur_pt_type;
- if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
- return -EINVAL;
+ GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -885,16 +813,33 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
return 0;
}
- s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
if (!s) {
gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
ops->get_pfn(e));
return -ENXIO;
}
- return ppgtt_invalidate_shadow_page(s);
+ return ppgtt_invalidate_spt(s);
}
-static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
+ struct intel_gvt_gtt_entry *entry)
+{
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long pfn;
+ int type;
+
+ pfn = ops->get_pfn(entry);
+ type = spt->shadow_page.type;
+
+ if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
+ return;
+
+ intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
+}
+
+static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_entry e;
@@ -903,30 +848,40 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
int v = atomic_read(&spt->refcount);
trace_spt_change(spt->vgpu->id, "die", spt,
- spt->guest_page.track.gfn, spt->shadow_page.type);
+ spt->guest_page.gfn, spt->shadow_page.type);
trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
if (atomic_dec_return(&spt->refcount) > 0)
return 0;
- if (gtt_type_is_pte_pt(spt->shadow_page.type))
- goto release;
-
for_each_present_shadow_entry(spt, &e, index) {
- if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
- gvt_vgpu_err("GVT doesn't support pse bit for now\n");
- return -EINVAL;
+ switch (e.type) {
+ case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+ gvt_vdbg_mm("invalidate 4K entry\n");
+ ppgtt_invalidate_pte(spt, &e);
+ break;
+ case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+ WARN(1, "GVT doesn't support 2M/1GB page\n");
+ continue;
+ case GTT_TYPE_PPGTT_PML4_ENTRY:
+ case GTT_TYPE_PPGTT_PDP_ENTRY:
+ case GTT_TYPE_PPGTT_PDE_ENTRY:
+ gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
+ ret = ppgtt_invalidate_spt_by_shadow_entry(
+ spt->vgpu, &e);
+ if (ret)
+ goto fail;
+ break;
+ default:
+ GEM_BUG_ON(1);
}
- ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
- spt->vgpu, &e);
- if (ret)
- goto fail;
}
-release:
+
trace_spt_change(spt->vgpu->id, "release", spt,
- spt->guest_page.track.gfn, spt->shadow_page.type);
- ppgtt_free_shadow_page(spt);
+ spt->guest_page.gfn, spt->shadow_page.type);
+ ppgtt_free_spt(spt);
return 0;
fail:
gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
@@ -934,52 +889,44 @@ fail:
return ret;
}
-static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
-static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
+static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- struct intel_vgpu_ppgtt_spt *s = NULL;
- struct intel_vgpu_guest_page *g;
- struct intel_vgpu_page_track *t;
+ struct intel_vgpu_ppgtt_spt *spt = NULL;
int ret;
- if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
- ret = -EINVAL;
- goto fail;
- }
+ GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
- t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
- if (t) {
- g = page_track_to_guest_page(t);
- s = guest_page_to_ppgtt_spt(g);
- ppgtt_get_shadow_page(s);
- } else {
+ spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
+ if (spt)
+ ppgtt_get_spt(spt);
+ else {
int type = get_next_pt_type(we->type);
- s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
- if (IS_ERR(s)) {
- ret = PTR_ERR(s);
+ spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
+ if (IS_ERR(spt)) {
+ ret = PTR_ERR(spt);
goto fail;
}
- ret = intel_gvt_hypervisor_enable_page_track(vgpu,
- &s->guest_page.track);
+ ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
if (ret)
goto fail;
- ret = ppgtt_populate_shadow_page(s);
+ ret = ppgtt_populate_spt(spt);
if (ret)
goto fail;
- trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
- s->shadow_page.type);
+ trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
+ spt->shadow_page.type);
}
- return s;
+ return spt;
fail:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
- s, we->val64, we->type);
+ spt, we->val64, we->type);
return ERR_PTR(ret);
}
@@ -994,7 +941,44 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn);
}
-static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *ge)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry se = *ge;
+ unsigned long gfn;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (!pte_ops->test_present(ge))
+ return 0;
+
+ gfn = pte_ops->get_pfn(ge);
+
+ switch (ge->type) {
+ case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+ gvt_vdbg_mm("shadow 4K gtt entry\n");
+ break;
+ case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+ gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
+ return -EINVAL;
+ default:
+ GEM_BUG_ON(1);
+ };
+
+ /* direct shadow */
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
+ if (ret)
+ return -ENXIO;
+
+ pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(spt, &se, index);
+ return 0;
+}
+
+static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
@@ -1005,34 +989,30 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
- spt->guest_page.track.gfn, spt->shadow_page.type);
+ spt->guest_page.gfn, spt->shadow_page.type);
- if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
- for_each_present_guest_entry(spt, &ge, i) {
+ for_each_present_guest_entry(spt, &ge, i) {
+ if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
+ s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &se, i);
+ ppgtt_generate_shadow_entry(&se, s, &ge);
+ ppgtt_set_shadow_entry(spt, &se, i);
+ } else {
gfn = ops->get_pfn(&ge);
- if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
- gtt_entry_p2m(vgpu, &ge, &se))
+ if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
- ppgtt_set_shadow_entry(spt, &se, i);
- }
- return 0;
- }
-
- for_each_present_guest_entry(spt, &ge, i) {
- if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
- gvt_vgpu_err("GVT doesn't support pse bit now\n");
- ret = -EINVAL;
- goto fail;
- }
+ ppgtt_set_shadow_entry(spt, &se, i);
+ continue;
+ }
- s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
- if (IS_ERR(s)) {
- ret = PTR_ERR(s);
- goto fail;
+ ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
+ if (ret)
+ goto fail;
}
- ppgtt_get_shadow_entry(spt, &se, i);
- ppgtt_generate_shadow_entry(&se, s, &ge);
- ppgtt_set_shadow_entry(spt, &se, i);
}
return 0;
fail:
@@ -1041,36 +1021,40 @@ fail:
return ret;
}
-static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
+static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
struct intel_gvt_gtt_entry *se, unsigned long index)
{
- struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
- struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int ret;
- trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
- index);
+ trace_spt_guest_change(spt->vgpu->id, "remove", spt,
+ spt->shadow_page.type, se->val64, index);
+
+ gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
+ se->type, index, se->val64);
if (!ops->test_present(se))
return 0;
- if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
+ if (ops->get_pfn(se) ==
+ vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
return 0;
if (gtt_type_is_pt(get_next_pt_type(se->type))) {
struct intel_vgpu_ppgtt_spt *s =
- ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
+ intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
if (!s) {
gvt_vgpu_err("fail to find guest page\n");
ret = -ENXIO;
goto fail;
}
- ret = ppgtt_invalidate_shadow_page(s);
+ ret = ppgtt_invalidate_spt(s);
if (ret)
goto fail;
- }
+ } else
+ ppgtt_invalidate_pte(spt, se);
+
return 0;
fail:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
@@ -1078,21 +1062,22 @@ fail:
return ret;
}
-static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
+static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
struct intel_gvt_gtt_entry *we, unsigned long index)
{
- struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
- struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_entry m;
struct intel_vgpu_ppgtt_spt *s;
int ret;
- trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
- we->val64, index);
+ trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
+ we->val64, index);
+
+ gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
+ we->type, index, we->val64);
if (gtt_type_is_pt(get_next_pt_type(we->type))) {
- s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
+ s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto fail;
@@ -1101,10 +1086,9 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
ppgtt_generate_shadow_entry(&m, s, we);
ppgtt_set_shadow_entry(spt, &m, index);
} else {
- ret = gtt_entry_p2m(vgpu, we, &m);
+ ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
if (ret)
goto fail;
- ppgtt_set_shadow_entry(spt, &m, index);
}
return 0;
fail:
@@ -1119,41 +1103,39 @@ static int sync_oos_page(struct intel_vgpu *vgpu,
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
- struct intel_vgpu_ppgtt_spt *spt =
- guest_page_to_ppgtt_spt(oos_page->guest_page);
- struct intel_gvt_gtt_entry old, new, m;
+ struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
+ struct intel_gvt_gtt_entry old, new;
int index;
int ret;
trace_oos_change(vgpu->id, "sync", oos_page->id,
- oos_page->guest_page, spt->guest_page_type);
+ spt, spt->guest_page.type);
- old.type = new.type = get_entry_type(spt->guest_page_type);
+ old.type = new.type = get_entry_type(spt->guest_page.type);
old.val64 = new.val64 = 0;
for (index = 0; index < (I915_GTT_PAGE_SIZE >>
info->gtt_entry_size_shift); index++) {
ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
ops->get_entry(NULL, &new, index, true,
- oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
+ spt->guest_page.gfn << PAGE_SHIFT, vgpu);
if (old.val64 == new.val64
&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
continue;
trace_oos_sync(vgpu->id, oos_page->id,
- oos_page->guest_page, spt->guest_page_type,
+ spt, spt->guest_page.type,
new.val64, index);
- ret = gtt_entry_p2m(vgpu, &new, &m);
+ ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
if (ret)
return ret;
ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
- ppgtt_set_shadow_entry(spt, &m, index);
}
- oos_page->guest_page->write_cnt = 0;
+ spt->guest_page.write_cnt = 0;
list_del_init(&spt->post_shadow_list);
return 0;
}
@@ -1162,15 +1144,14 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
struct intel_vgpu_oos_page *oos_page)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_vgpu_ppgtt_spt *spt =
- guest_page_to_ppgtt_spt(oos_page->guest_page);
+ struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
trace_oos_change(vgpu->id, "detach", oos_page->id,
- oos_page->guest_page, spt->guest_page_type);
+ spt, spt->guest_page.type);
- oos_page->guest_page->write_cnt = 0;
- oos_page->guest_page->oos_page = NULL;
- oos_page->guest_page = NULL;
+ spt->guest_page.write_cnt = 0;
+ spt->guest_page.oos_page = NULL;
+ oos_page->spt = NULL;
list_del_init(&oos_page->vm_list);
list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
@@ -1178,51 +1159,49 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
return 0;
}
-static int attach_oos_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_oos_page *oos_page,
- struct intel_vgpu_guest_page *gpt)
+static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
+ struct intel_vgpu_ppgtt_spt *spt)
{
- struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt *gvt = spt->vgpu->gvt;
int ret;
- ret = intel_gvt_hypervisor_read_gpa(vgpu,
- gpt->track.gfn << I915_GTT_PAGE_SHIFT,
+ ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
+ spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret)
return ret;
- oos_page->guest_page = gpt;
- gpt->oos_page = oos_page;
+ oos_page->spt = spt;
+ spt->guest_page.oos_page = oos_page;
list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
- trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
- gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
+ spt, spt->guest_page.type);
return 0;
}
-static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *gpt)
+static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
{
+ struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
int ret;
- ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
+ ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
if (ret)
return ret;
- trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
- gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
+ spt, spt->guest_page.type);
- list_del_init(&gpt->oos_page->vm_list);
- return sync_oos_page(vgpu, gpt->oos_page);
+ list_del_init(&oos_page->vm_list);
+ return sync_oos_page(spt->vgpu, oos_page);
}
-static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *gpt)
+static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
{
- struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt *gvt = spt->vgpu->gvt;
struct intel_gvt_gtt *gtt = &gvt->gtt;
- struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+ struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
int ret;
WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
@@ -1230,31 +1209,30 @@ static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
if (list_empty(&gtt->oos_page_free_list_head)) {
oos_page = container_of(gtt->oos_page_use_list_head.next,
struct intel_vgpu_oos_page, list);
- ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ ret = ppgtt_set_guest_page_sync(oos_page->spt);
if (ret)
return ret;
- ret = detach_oos_page(vgpu, oos_page);
+ ret = detach_oos_page(spt->vgpu, oos_page);
if (ret)
return ret;
} else
oos_page = container_of(gtt->oos_page_free_list_head.next,
struct intel_vgpu_oos_page, list);
- return attach_oos_page(vgpu, oos_page, gpt);
+ return attach_oos_page(oos_page, spt);
}
-static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *gpt)
+static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
{
- struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+ struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
return -EINVAL;
- trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
- gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
+ spt, spt->guest_page.type);
- list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
- return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
+ list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
+ return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
}
/**
@@ -1279,7 +1257,7 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
oos_page = container_of(pos,
struct intel_vgpu_oos_page, vm_list);
- ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ ret = ppgtt_set_guest_page_sync(oos_page->spt);
if (ret)
return ret;
}
@@ -1290,17 +1268,15 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
* The heart of PPGTT shadow page table.
*/
static int ppgtt_handle_guest_write_page_table(
- struct intel_vgpu_guest_page *gpt,
+ struct intel_vgpu_ppgtt_spt *spt,
struct intel_gvt_gtt_entry *we, unsigned long index)
{
- struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
int type = spt->shadow_page.type;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- struct intel_gvt_gtt_entry se;
-
- int ret;
+ struct intel_gvt_gtt_entry old_se;
int new_present;
+ int ret;
new_present = ops->test_present(we);
@@ -1309,21 +1285,21 @@ static int ppgtt_handle_guest_write_page_table(
* guarantee the ppgtt table is validated during the window between
* adding and removal.
*/
- ppgtt_get_shadow_entry(spt, &se, index);
+ ppgtt_get_shadow_entry(spt, &old_se, index);
if (new_present) {
- ret = ppgtt_handle_guest_entry_add(gpt, we, index);
+ ret = ppgtt_handle_guest_entry_add(spt, we, index);
if (ret)
goto fail;
}
- ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
+ ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
if (ret)
goto fail;
if (!new_present) {
- ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
- ppgtt_set_shadow_entry(spt, &se, index);
+ ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
}
return 0;
@@ -1333,12 +1309,13 @@ fail:
return ret;
}
-static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
+
+
+static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
{
return enable_out_of_sync
- && gtt_type_is_pte_pt(
- guest_page_to_ppgtt_spt(gpt)->guest_page_type)
- && gpt->write_cnt >= 2;
+ && gtt_type_is_pte_pt(spt->guest_page.type)
+ && spt->guest_page.write_cnt >= 2;
}
static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
@@ -1378,8 +1355,8 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
GTT_ENTRY_NUM_IN_ONE_PAGE) {
ppgtt_get_guest_entry(spt, &ge, index);
- ret = ppgtt_handle_guest_write_page_table(
- &spt->guest_page, &ge, index);
+ ret = ppgtt_handle_guest_write_page_table(spt,
+ &ge, index);
if (ret)
return ret;
clear_bit(index, spt->post_shadow_bitmap);
@@ -1390,10 +1367,9 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
}
static int ppgtt_handle_guest_write_page_table_bytes(
- struct intel_vgpu_guest_page *gpt,
+ struct intel_vgpu_ppgtt_spt *spt,
u64 pa, void *p_data, int bytes)
{
- struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
@@ -1408,7 +1384,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(
ops->test_pse(&we);
if (bytes == info->gtt_entry_size) {
- ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
+ ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
if (ret)
return ret;
} else {
@@ -1416,7 +1392,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(
int type = spt->shadow_page.type;
ppgtt_get_shadow_entry(spt, &se, index);
- ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
+ ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
if (ret)
return ret;
ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
@@ -1428,128 +1404,54 @@ static int ppgtt_handle_guest_write_page_table_bytes(
if (!enable_out_of_sync)
return 0;
- gpt->write_cnt++;
+ spt->guest_page.write_cnt++;
- if (gpt->oos_page)
- ops->set_entry(gpt->oos_page->mem, &we, index,
+ if (spt->guest_page.oos_page)
+ ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
false, 0, vgpu);
- if (can_do_out_of_sync(gpt)) {
- if (!gpt->oos_page)
- ppgtt_allocate_oos_page(vgpu, gpt);
+ if (can_do_out_of_sync(spt)) {
+ if (!spt->guest_page.oos_page)
+ ppgtt_allocate_oos_page(spt);
- ret = ppgtt_set_guest_page_oos(vgpu, gpt);
+ ret = ppgtt_set_guest_page_oos(spt);
if (ret < 0)
return ret;
}
return 0;
}
-/*
- * mm page table allocation policy for bdw+
- * - for ggtt, only virtual page table will be allocated.
- * - for ppgtt, dedicated virtual/shadow page table will be allocated.
- */
-static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
-{
- struct intel_vgpu *vgpu = mm->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_device_info *info = &gvt->device_info;
- void *mem;
-
- if (mm->type == INTEL_GVT_MM_PPGTT) {
- mm->page_table_entry_cnt = 4;
- mm->page_table_entry_size = mm->page_table_entry_cnt *
- info->gtt_entry_size;
- mem = kzalloc(mm->has_shadow_page_table ?
- mm->page_table_entry_size * 2
- : mm->page_table_entry_size, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
- mm->virtual_page_table = mem;
- if (!mm->has_shadow_page_table)
- return 0;
- mm->shadow_page_table = mem + mm->page_table_entry_size;
- } else if (mm->type == INTEL_GVT_MM_GGTT) {
- mm->page_table_entry_cnt =
- (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
- mm->page_table_entry_size = mm->page_table_entry_cnt *
- info->gtt_entry_size;
- mem = vzalloc(mm->page_table_entry_size);
- if (!mem)
- return -ENOMEM;
- mm->virtual_page_table = mem;
- }
- return 0;
-}
-
-static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
-{
- if (mm->type == INTEL_GVT_MM_PPGTT) {
- kfree(mm->virtual_page_table);
- } else if (mm->type == INTEL_GVT_MM_GGTT) {
- if (mm->virtual_page_table)
- vfree(mm->virtual_page_table);
- }
- mm->virtual_page_table = mm->shadow_page_table = NULL;
-}
-
-static void invalidate_mm(struct intel_vgpu_mm *mm)
+static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
{
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt *gtt = &gvt->gtt;
struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
struct intel_gvt_gtt_entry se;
- int i;
+ int index;
- if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
+ if (!mm->ppgtt_mm.shadowed)
return;
- for (i = 0; i < mm->page_table_entry_cnt; i++) {
- ppgtt_get_shadow_root_entry(mm, &se, i);
+ for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
+ ppgtt_get_shadow_root_entry(mm, &se, index);
+
if (!ops->test_present(&se))
continue;
- ppgtt_invalidate_shadow_page_by_shadow_entry(
- vgpu, &se);
+
+ ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
se.val64 = 0;
- ppgtt_set_shadow_root_entry(mm, &se, i);
+ ppgtt_set_shadow_root_entry(mm, &se, index);
- trace_gpt_change(vgpu->id, "destroy root pointer",
- NULL, se.type, se.val64, i);
+ trace_spt_guest_change(vgpu->id, "destroy root pointer",
+ NULL, se.type, se.val64, index);
}
- mm->shadowed = false;
-}
-/**
- * intel_vgpu_destroy_mm - destroy a mm object
- * @mm: a kref object
- *
- * This function is used to destroy a mm object for vGPU
- *
- */
-void intel_vgpu_destroy_mm(struct kref *mm_ref)
-{
- struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
- struct intel_vgpu *vgpu = mm->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
- struct intel_gvt_gtt *gtt = &gvt->gtt;
-
- if (!mm->initialized)
- goto out;
-
- list_del(&mm->list);
- list_del(&mm->lru_list);
-
- if (mm->has_shadow_page_table)
- invalidate_mm(mm);
-
- gtt->mm_free_page_table(mm);
-out:
- kfree(mm);
+ mm->ppgtt_mm.shadowed = false;
}
-static int shadow_mm(struct intel_vgpu_mm *mm)
+
+static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
{
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
@@ -1557,119 +1459,155 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
struct intel_vgpu_ppgtt_spt *spt;
struct intel_gvt_gtt_entry ge, se;
- int i;
- int ret;
+ int index, ret;
- if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
+ if (mm->ppgtt_mm.shadowed)
return 0;
- mm->shadowed = true;
+ mm->ppgtt_mm.shadowed = true;
+
+ for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
+ ppgtt_get_guest_root_entry(mm, &ge, index);
- for (i = 0; i < mm->page_table_entry_cnt; i++) {
- ppgtt_get_guest_root_entry(mm, &ge, i);
if (!ops->test_present(&ge))
continue;
- trace_gpt_change(vgpu->id, __func__, NULL,
- ge.type, ge.val64, i);
+ trace_spt_guest_change(vgpu->id, __func__, NULL,
+ ge.type, ge.val64, index);
- spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
if (IS_ERR(spt)) {
gvt_vgpu_err("fail to populate guest root pointer\n");
ret = PTR_ERR(spt);
goto fail;
}
ppgtt_generate_shadow_entry(&se, spt, &ge);
- ppgtt_set_shadow_root_entry(mm, &se, i);
+ ppgtt_set_shadow_root_entry(mm, &se, index);
- trace_gpt_change(vgpu->id, "populate root pointer",
- NULL, se.type, se.val64, i);
+ trace_spt_guest_change(vgpu->id, "populate root pointer",
+ NULL, se.type, se.val64, index);
}
+
return 0;
fail:
- invalidate_mm(mm);
+ invalidate_ppgtt_mm(mm);
return ret;
}
+static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_mm *mm;
+
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm)
+ return NULL;
+
+ mm->vgpu = vgpu;
+ kref_init(&mm->ref);
+ atomic_set(&mm->pincount, 0);
+
+ return mm;
+}
+
+static void vgpu_free_mm(struct intel_vgpu_mm *mm)
+{
+ kfree(mm);
+}
+
/**
- * intel_vgpu_create_mm - create a mm object for a vGPU
+ * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
* @vgpu: a vGPU
- * @mm_type: mm object type, should be PPGTT or GGTT
- * @virtual_page_table: page table root pointers. Could be NULL if user wants
- * to populate shadow later.
- * @page_table_level: describe the page table level of the mm object
- * @pde_base_index: pde root pointer base in GGTT MMIO.
+ * @root_entry_type: ppgtt root entry type
+ * @pdps: guest pdps.
*
- * This function is used to create a mm object for a vGPU.
+ * This function is used to create a ppgtt mm object for a vGPU.
*
* Returns:
* Zero on success, negative error code in pointer if failed.
*/
-struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
- int mm_type, void *virtual_page_table, int page_table_level,
- u32 pde_base_index)
+struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
{
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_gvt_gtt *gtt = &gvt->gtt;
struct intel_vgpu_mm *mm;
int ret;
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- ret = -ENOMEM;
- goto fail;
- }
+ mm = vgpu_alloc_mm(vgpu);
+ if (!mm)
+ return ERR_PTR(-ENOMEM);
- mm->type = mm_type;
+ mm->type = INTEL_GVT_MM_PPGTT;
- if (page_table_level == 1)
- mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
- else if (page_table_level == 3)
- mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
- else if (page_table_level == 4)
- mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
- else {
- WARN_ON(1);
- ret = -EINVAL;
- goto fail;
- }
+ GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
+ root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
+ mm->ppgtt_mm.root_entry_type = root_entry_type;
- mm->page_table_level = page_table_level;
- mm->pde_base_index = pde_base_index;
+ INIT_LIST_HEAD(&mm->ppgtt_mm.list);
+ INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
- mm->vgpu = vgpu;
- mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
-
- kref_init(&mm->ref);
- atomic_set(&mm->pincount, 0);
- INIT_LIST_HEAD(&mm->list);
- INIT_LIST_HEAD(&mm->lru_list);
- list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
+ if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
+ mm->ppgtt_mm.guest_pdps[0] = pdps[0];
+ else
+ memcpy(mm->ppgtt_mm.guest_pdps, pdps,
+ sizeof(mm->ppgtt_mm.guest_pdps));
- ret = gtt->mm_alloc_page_table(mm);
+ ret = shadow_ppgtt_mm(mm);
if (ret) {
- gvt_vgpu_err("fail to allocate page table for mm\n");
- goto fail;
+ gvt_vgpu_err("failed to shadow ppgtt mm\n");
+ vgpu_free_mm(mm);
+ return ERR_PTR(ret);
}
- mm->initialized = true;
+ list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+ list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+ return mm;
+}
- if (virtual_page_table)
- memcpy(mm->virtual_page_table, virtual_page_table,
- mm->page_table_entry_size);
+static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_mm *mm;
+ unsigned long nr_entries;
- if (mm->has_shadow_page_table) {
- ret = shadow_mm(mm);
- if (ret)
- goto fail;
- list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+ mm = vgpu_alloc_mm(vgpu);
+ if (!mm)
+ return ERR_PTR(-ENOMEM);
+
+ mm->type = INTEL_GVT_MM_GGTT;
+
+ nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
+ mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
+ vgpu->gvt->device_info.gtt_entry_size);
+ if (!mm->ggtt_mm.virtual_ggtt) {
+ vgpu_free_mm(mm);
+ return ERR_PTR(-ENOMEM);
}
+
return mm;
-fail:
- gvt_vgpu_err("fail to create mm\n");
- if (mm)
- intel_gvt_mm_unreference(mm);
- return ERR_PTR(ret);
+}
+
+/**
+ * _intel_vgpu_mm_release - destroy a mm object
+ * @mm_ref: a kref object
+ *
+ * This function is used to destroy a mm object for vGPU
+ *
+ */
+void _intel_vgpu_mm_release(struct kref *mm_ref)
+{
+ struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
+
+ if (GEM_WARN_ON(atomic_read(&mm->pincount)))
+ gvt_err("vgpu mm pin count bug detected\n");
+
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ list_del(&mm->ppgtt_mm.list);
+ list_del(&mm->ppgtt_mm.lru_list);
+ invalidate_ppgtt_mm(mm);
+ } else {
+ vfree(mm->ggtt_mm.virtual_ggtt);
+ }
+
+ vgpu_free_mm(mm);
}
/**
@@ -1680,9 +1618,6 @@ fail:
*/
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
{
- if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
- return;
-
atomic_dec(&mm->pincount);
}
@@ -1701,36 +1636,34 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
{
int ret;
- if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
- return 0;
+ atomic_inc(&mm->pincount);
- if (!mm->shadowed) {
- ret = shadow_mm(mm);
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ ret = shadow_ppgtt_mm(mm);
if (ret)
return ret;
+
+ list_move_tail(&mm->ppgtt_mm.lru_list,
+ &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
+
}
- atomic_inc(&mm->pincount);
- list_del_init(&mm->lru_list);
- list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
return 0;
}
-static int reclaim_one_mm(struct intel_gvt *gvt)
+static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
{
struct intel_vgpu_mm *mm;
struct list_head *pos, *n;
- list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
- mm = container_of(pos, struct intel_vgpu_mm, lru_list);
+ list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
- if (mm->type != INTEL_GVT_MM_PPGTT)
- continue;
if (atomic_read(&mm->pincount))
continue;
- list_del_init(&mm->lru_list);
- invalidate_mm(mm);
+ list_del_init(&mm->ppgtt_mm.lru_list);
+ invalidate_ppgtt_mm(mm);
return 1;
}
return 0;
@@ -1746,10 +1679,7 @@ static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
- if (WARN_ON(!mm->has_shadow_page_table))
- return -EINVAL;
-
- s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
if (!s)
return -ENXIO;
@@ -1780,85 +1710,72 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
unsigned long gpa = INTEL_GVT_INVALID_ADDR;
unsigned long gma_index[4];
struct intel_gvt_gtt_entry e;
- int i, index;
+ int i, levels = 0;
int ret;
- if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
- return INTEL_GVT_INVALID_ADDR;
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
+ mm->type != INTEL_GVT_MM_PPGTT);
if (mm->type == INTEL_GVT_MM_GGTT) {
if (!vgpu_gmadr_is_valid(vgpu, gma))
goto err;
- ret = ggtt_get_guest_entry(mm, &e,
- gma_ops->gma_to_ggtt_pte_index(gma));
- if (ret)
- goto err;
+ ggtt_get_guest_entry(mm, &e,
+ gma_ops->gma_to_ggtt_pte_index(gma));
+
gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ (gma & ~I915_GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
- return gpa;
- }
-
- switch (mm->page_table_level) {
- case 4:
- ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
- if (ret)
- goto err;
- gma_index[0] = gma_ops->gma_to_pml4_index(gma);
- gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
- gma_index[2] = gma_ops->gma_to_pde_index(gma);
- gma_index[3] = gma_ops->gma_to_pte_index(gma);
- index = 4;
- break;
- case 3:
- ret = ppgtt_get_shadow_root_entry(mm, &e,
- gma_ops->gma_to_l3_pdp_index(gma));
- if (ret)
- goto err;
- gma_index[0] = gma_ops->gma_to_pde_index(gma);
- gma_index[1] = gma_ops->gma_to_pte_index(gma);
- index = 2;
- break;
- case 2:
- ret = ppgtt_get_shadow_root_entry(mm, &e,
- gma_ops->gma_to_pde_index(gma));
- if (ret)
- goto err;
- gma_index[0] = gma_ops->gma_to_pte_index(gma);
- index = 1;
- break;
- default:
- WARN_ON(1);
- goto err;
- }
+ } else {
+ switch (mm->ppgtt_mm.root_entry_type) {
+ case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
+ ppgtt_get_shadow_root_entry(mm, &e, 0);
+
+ gma_index[0] = gma_ops->gma_to_pml4_index(gma);
+ gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
+ gma_index[2] = gma_ops->gma_to_pde_index(gma);
+ gma_index[3] = gma_ops->gma_to_pte_index(gma);
+ levels = 4;
+ break;
+ case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_l3_pdp_index(gma));
+
+ gma_index[0] = gma_ops->gma_to_pde_index(gma);
+ gma_index[1] = gma_ops->gma_to_pte_index(gma);
+ levels = 2;
+ break;
+ default:
+ GEM_BUG_ON(1);
+ }
- /* walk into the shadow page table and get gpa from guest entry */
- for (i = 0; i < index; i++) {
- ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
- (i == index - 1));
- if (ret)
- goto err;
+ /* walk the shadow page table and get gpa from guest entry */
+ for (i = 0; i < levels; i++) {
+ ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
+ (i == levels - 1));
+ if (ret)
+ goto err;
- if (!pte_ops->test_present(&e)) {
- gvt_dbg_core("GMA 0x%lx is not present\n", gma);
- goto err;
+ if (!pte_ops->test_present(&e)) {
+ gvt_dbg_core("GMA 0x%lx is not present\n", gma);
+ goto err;
+ }
}
- }
- gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
- + (gma & ~I915_GTT_PAGE_MASK);
+ gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
+ (gma & ~I915_GTT_PAGE_MASK);
+ trace_gma_translate(vgpu->id, "ppgtt", 0,
+ mm->ppgtt_mm.root_entry_type, gma, gpa);
+ }
- trace_gma_translate(vgpu->id, "ppgtt", 0,
- mm->page_table_level, gma, gpa);
return gpa;
err:
gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
return INTEL_GVT_INVALID_ADDR;
}
-static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes)
{
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
@@ -1887,7 +1804,7 @@ static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
* Returns:
* Zero on success, error code if failed.
*/
-int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
@@ -1897,11 +1814,11 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
return -EINVAL;
off -= info->gtt_start_offset;
- ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
+ ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
return ret;
}
-static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -1911,6 +1828,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma, gfn;
struct intel_gvt_gtt_entry e, m;
+ dma_addr_t dma_addr;
int ret;
if (bytes != 4 && bytes != 8)
@@ -1926,6 +1844,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
+ m = e;
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
@@ -1938,29 +1857,29 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
goto out;
}
- ret = gtt_entry_p2m(vgpu, &e, &m);
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
+ &dma_addr);
if (ret) {
- gvt_vgpu_err("fail to translate guest gtt entry\n");
+ gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail
* settting the shadow entry to point to a scratch page
*/
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- }
- } else {
- m = e;
+ } else
+ ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
+ } else
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- }
out:
- ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
- gtt_invalidate(gvt->dev_priv);
+ ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
+ ggtt_invalidate(gvt->dev_priv);
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
return 0;
}
/*
- * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
+ * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
* @vgpu: a vGPU
* @off: register offset
* @p_data: data from guest write
@@ -1971,8 +1890,8 @@ out:
* Returns:
* Zero on success, error code if failed.
*/
-int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
- void *p_data, unsigned int bytes)
+int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
int ret;
@@ -1981,43 +1900,10 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return -EINVAL;
off -= info->gtt_start_offset;
- ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
+ ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
return ret;
}
-int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
- void *p_data, unsigned int bytes)
-{
- struct intel_gvt *gvt = vgpu->gvt;
- int ret = 0;
-
- if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
- struct intel_vgpu_page_track *t;
-
- mutex_lock(&gvt->lock);
-
- t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
- if (t) {
- if (unlikely(vgpu->failsafe)) {
- /* remove write protection to prevent furture traps */
- intel_vgpu_clean_page_track(vgpu, t);
- } else {
- ret = t->handler(t, pa, p_data, bytes);
- if (ret) {
- gvt_err("guest page write error %d, "
- "gfn 0x%lx, pa 0x%llx, "
- "var 0x%x, len %d\n",
- ret, t->gfn, pa,
- *(u32 *)p_data, bytes);
- }
- }
- }
- mutex_unlock(&gvt->lock);
- }
- return ret;
-}
-
-
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
intel_gvt_gtt_type_t type)
{
@@ -2131,45 +2017,49 @@ err:
int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
- struct intel_vgpu_mm *ggtt_mm;
- hash_init(gtt->tracked_guest_page_hash_table);
- hash_init(gtt->shadow_page_hash_table);
+ INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL);
- INIT_LIST_HEAD(&gtt->mm_list_head);
+ INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
INIT_LIST_HEAD(&gtt->oos_page_list_head);
INIT_LIST_HEAD(&gtt->post_shadow_list_head);
- intel_vgpu_reset_ggtt(vgpu);
-
- ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
- NULL, 1, 0);
- if (IS_ERR(ggtt_mm)) {
+ gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
+ if (IS_ERR(gtt->ggtt_mm)) {
gvt_vgpu_err("fail to create mm for ggtt.\n");
- return PTR_ERR(ggtt_mm);
+ return PTR_ERR(gtt->ggtt_mm);
}
- gtt->ggtt_mm = ggtt_mm;
+ intel_vgpu_reset_ggtt(vgpu);
return create_scratch_page_tree(vgpu);
}
-static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
+static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_mm *mm;
- list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
- mm = container_of(pos, struct intel_vgpu_mm, list);
- if (mm->type == type) {
- vgpu->gvt->gtt.mm_free_page_table(mm);
- list_del(&mm->list);
- list_del(&mm->lru_list);
- kfree(mm);
- }
+ list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
+ intel_vgpu_destroy_mm(mm);
+ }
+
+ if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
+ gvt_err("vgpu ppgtt mm is not fully destoried\n");
+
+ if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
+ gvt_err("Why we still has spt not freed?\n");
+ ppgtt_free_all_spt(vgpu);
}
}
+static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
+{
+ intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
+ vgpu->gtt.ggtt_mm = NULL;
+}
+
/**
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
* @vgpu: a vGPU
@@ -2182,11 +2072,9 @@ static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
*/
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
{
- ppgtt_free_all_shadow_page(vgpu);
+ intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+ intel_vgpu_destroy_ggtt_mm(vgpu);
release_scratch_page_tree(vgpu);
-
- intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
- intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
}
static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2248,99 +2136,78 @@ fail:
* pointer to mm object on success, NULL if failed.
*/
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level, void *root_entry)
+ u64 pdps[])
{
- struct list_head *pos;
struct intel_vgpu_mm *mm;
- u64 *src, *dst;
-
- list_for_each(pos, &vgpu->gtt.mm_list_head) {
- mm = container_of(pos, struct intel_vgpu_mm, list);
- if (mm->type != INTEL_GVT_MM_PPGTT)
- continue;
-
- if (mm->page_table_level != page_table_level)
- continue;
+ struct list_head *pos;
- src = root_entry;
- dst = mm->virtual_page_table;
+ list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
- if (page_table_level == 3) {
- if (src[0] == dst[0]
- && src[1] == dst[1]
- && src[2] == dst[2]
- && src[3] == dst[3])
+ switch (mm->ppgtt_mm.root_entry_type) {
+ case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
+ if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
return mm;
- } else {
- if (src[0] == dst[0])
+ break;
+ case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
+ if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
+ sizeof(mm->ppgtt_mm.guest_pdps)))
return mm;
+ break;
+ default:
+ GEM_BUG_ON(1);
}
}
return NULL;
}
/**
- * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
- * g2v notification
+ * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
* @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
+ * @root_entry_type: ppgtt root entry type
+ * @pdps: guest pdps
*
- * This function is used to create a PPGTT mm object from a guest to GVT-g
- * notification.
+ * This function is used to find or create a PPGTT mm object from a guest.
*
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level)
+struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
{
- u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
- if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
- return -EINVAL;
-
- mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
if (mm) {
- intel_gvt_mm_reference(mm);
+ intel_vgpu_mm_get(mm);
} else {
- mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
- pdp, page_table_level, 0);
- if (IS_ERR(mm)) {
+ mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
+ if (IS_ERR(mm))
gvt_vgpu_err("fail to create mm\n");
- return PTR_ERR(mm);
- }
}
- return 0;
+ return mm;
}
/**
- * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
- * g2v notification
+ * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
* @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
+ * @pdps: guest pdps
*
- * This function is used to create a PPGTT mm object from a guest to GVT-g
- * notification.
+ * This function is used to find a PPGTT mm object from a guest and destroy it.
*
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level)
+int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
{
- u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
- if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
- return -EINVAL;
-
- mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
if (!mm) {
gvt_vgpu_err("fail to find ppgtt instance.\n");
return -EINVAL;
}
- intel_gvt_mm_unreference(mm);
+ intel_vgpu_mm_put(mm);
return 0;
}
@@ -2367,8 +2234,6 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|| IS_KABYLAKE(gvt->dev_priv)) {
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
- gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
- gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
} else {
return -ENODEV;
}
@@ -2399,7 +2264,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
return ret;
}
}
- INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
+ INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
return 0;
}
@@ -2437,28 +2302,25 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
u32 index;
- u32 offset;
u32 num_entries;
- struct intel_gvt_gtt_entry e;
- memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
- e.type = GTT_TYPE_GGTT_PTE;
- ops->set_pfn(&e, gvt->gtt.scratch_mfn);
- e.val64 |= _PAGE_PRESENT;
+ pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
+ pte_ops->set_present(&entry);
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
- for (offset = 0; offset < num_entries; offset++)
- ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+ while (num_entries--)
+ ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
- for (offset = 0; offset < num_entries; offset++)
- ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+ while (num_entries--)
+ ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
- gtt_invalidate(dev_priv);
+ ggtt_invalidate(dev_priv);
}
/**
@@ -2471,13 +2333,10 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
- ppgtt_free_all_shadow_page(vgpu);
-
/* Shadow pages are only created when there is no page
* table tracking data, so remove page tracking data after
* removing the shadow pages.
*/
- intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
-
+ intel_vgpu_destroy_all_ppgtt_mm(vgpu);
intel_vgpu_reset_ggtt(vgpu);
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 4cc13b5934f1..e831507e17c3 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -39,7 +39,6 @@
struct intel_vgpu_mm;
-#define INTEL_GVT_GTT_HASH_BITS 8
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
@@ -84,17 +83,12 @@ struct intel_gvt_gtt {
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
- struct list_head mm_lru_list_head;
+ struct list_head ppgtt_mm_lru_list_head;
struct page *scratch_page;
unsigned long scratch_mfn;
};
-enum {
- INTEL_GVT_MM_GGTT = 0,
- INTEL_GVT_MM_PPGTT,
-};
-
typedef enum {
GTT_TYPE_INVALID = -1,
@@ -125,66 +119,60 @@ typedef enum {
GTT_TYPE_MAX,
} intel_gvt_gtt_type_t;
-struct intel_vgpu_mm {
- int type;
- bool initialized;
- bool shadowed;
+enum intel_gvt_mm_type {
+ INTEL_GVT_MM_GGTT,
+ INTEL_GVT_MM_PPGTT,
+};
- int page_table_entry_type;
- u32 page_table_entry_size;
- u32 page_table_entry_cnt;
- void *virtual_page_table;
- void *shadow_page_table;
+#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
- int page_table_level;
- bool has_shadow_page_table;
- u32 pde_base_index;
+struct intel_vgpu_mm {
+ enum intel_gvt_mm_type type;
+ struct intel_vgpu *vgpu;
- struct list_head list;
struct kref ref;
atomic_t pincount;
- struct list_head lru_list;
- struct intel_vgpu *vgpu;
-};
-
-extern int intel_vgpu_mm_get_entry(
- struct intel_vgpu_mm *mm,
- void *page_table, struct intel_gvt_gtt_entry *e,
- unsigned long index);
-extern int intel_vgpu_mm_set_entry(
- struct intel_vgpu_mm *mm,
- void *page_table, struct intel_gvt_gtt_entry *e,
- unsigned long index);
-
-#define ggtt_get_guest_entry(mm, e, index) \
- intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
-
-#define ggtt_set_guest_entry(mm, e, index) \
- intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
-
-#define ggtt_get_shadow_entry(mm, e, index) \
- intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
-
-#define ggtt_set_shadow_entry(mm, e, index) \
- intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+ union {
+ struct {
+ intel_gvt_gtt_type_t root_entry_type;
+ /*
+ * The 4 PDPs in ring context. For 48bit addressing,
+ * only PDP0 is valid and point to PML4. For 32it
+ * addressing, all 4 are used as true PDPs.
+ */
+ u64 guest_pdps[GVT_RING_CTX_NR_PDPS];
+ u64 shadow_pdps[GVT_RING_CTX_NR_PDPS];
+ bool shadowed;
+
+ struct list_head list;
+ struct list_head lru_list;
+ } ppgtt_mm;
+ struct {
+ void *virtual_ggtt;
+ } ggtt_mm;
+ };
+};
-#define ppgtt_get_guest_root_entry(mm, e, index) \
- intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
-#define ppgtt_set_guest_root_entry(mm, e, index) \
- intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
+{
+ kref_get(&mm->ref);
+}
-#define ppgtt_get_shadow_root_entry(mm, e, index) \
- intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+void _intel_vgpu_mm_release(struct kref *mm_ref);
-#define ppgtt_set_shadow_root_entry(mm, e, index) \
- intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
+{
+ kref_put(&mm->ref, _intel_vgpu_mm_release);
+}
-extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
- int mm_type, void *virtual_page_table, int page_table_level,
- u32 pde_base_index);
-extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
+static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
+{
+ intel_vgpu_mm_put(mm);
+}
struct intel_vgpu_guest_page;
@@ -196,10 +184,8 @@ struct intel_vgpu_scratch_pt {
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
- struct list_head mm_list_head;
- DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
- DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
- atomic_t n_tracked_guest_page;
+ struct list_head ppgtt_mm_list_head;
+ struct radix_tree_root spt_tree;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
@@ -216,32 +202,8 @@ extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
-struct intel_vgpu_oos_page;
-
-struct intel_vgpu_shadow_page {
- void *vaddr;
- struct page *page;
- int type;
- struct hlist_node node;
- unsigned long mfn;
-};
-
-struct intel_vgpu_page_track {
- struct hlist_node node;
- bool tracked;
- unsigned long gfn;
- int (*handler)(void *, u64, void *, int);
- void *data;
-};
-
-struct intel_vgpu_guest_page {
- struct intel_vgpu_page_track track;
- unsigned long write_cnt;
- struct intel_vgpu_oos_page *oos_page;
-};
-
struct intel_vgpu_oos_page {
- struct intel_vgpu_guest_page *guest_page;
+ struct intel_vgpu_ppgtt_spt *spt;
struct list_head list;
struct list_head vm_list;
int id;
@@ -250,42 +212,33 @@ struct intel_vgpu_oos_page {
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
+/* Represent a vgpu shadow page table. */
struct intel_vgpu_ppgtt_spt {
- struct intel_vgpu_shadow_page shadow_page;
- struct intel_vgpu_guest_page guest_page;
- int guest_page_type;
atomic_t refcount;
struct intel_vgpu *vgpu;
- DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
- struct list_head post_shadow_list;
-};
-int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
- struct intel_vgpu_page_track *t,
- unsigned long gfn,
- int (*handler)(void *gp, u64, void *, int),
- void *data);
+ struct {
+ intel_gvt_gtt_type_t type;
+ void *vaddr;
+ struct page *page;
+ unsigned long mfn;
+ } shadow_page;
-void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
- struct intel_vgpu_page_track *t);
+ struct {
+ intel_gvt_gtt_type_t type;
+ unsigned long gfn;
+ unsigned long write_cnt;
+ struct intel_vgpu_oos_page *oos_page;
+ } guest_page;
-struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
- struct intel_vgpu *vgpu, unsigned long gfn);
+ DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
+ struct list_head post_shadow_list;
+};
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
-static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
-{
- kref_get(&mm->ref);
-}
-
-static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
-{
- kref_put(&mm->ref, intel_vgpu_destroy_mm);
-}
-
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
@@ -294,21 +247,17 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
unsigned long gma);
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level, void *root_entry);
+ u64 pdps[]);
-int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level);
+struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
-int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level);
+int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
-int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
-int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
+int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
-int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
- void *p_data, unsigned int bytes);
-
#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index fac54f32d33f..61bd14fcb649 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -183,7 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
- .write_protect_handler = intel_vgpu_write_protect_handler,
+ .write_protect_handler = intel_vgpu_page_track_handler,
};
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index c6197d990818..efacd8abbedc 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -48,6 +48,7 @@
#include "cmd_parser.h"
#include "fb_decoder.h"
#include "dmabuf.h"
+#include "page_track.h"
#define GVT_MAX_VGPU 8
@@ -131,11 +132,9 @@ struct intel_vgpu_opregion {
#define vgpu_opregion(vgpu) (&(vgpu->opregion))
-#define INTEL_GVT_MAX_PORT 5
-
struct intel_vgpu_display {
struct intel_vgpu_i2c_edid i2c_edid;
- struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
+ struct intel_vgpu_port ports[I915_MAX_PORTS];
struct intel_vgpu_sbi sbi;
};
@@ -190,6 +189,7 @@ struct intel_vgpu {
struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display;
struct intel_vgpu_submission submission;
+ struct radix_tree_root page_track_tree;
u32 hws_pga[I915_NUM_ENGINES];
struct dentry *debugfs;
@@ -201,8 +201,16 @@ struct intel_vgpu {
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
- struct rb_root cache;
+
+ /*
+ * Two caches are used to avoid mapping duplicated pages (eg.
+ * scratch pages). This help to reduce dma setup overhead.
+ */
+ struct rb_root gfn_cache;
+ struct rb_root dma_addr_cache;
+ unsigned long nr_cache_entries;
struct mutex cache_lock;
+
struct notifier_block iommu_notifier;
struct notifier_block group_notifier;
struct kvm *kvm;
@@ -308,7 +316,10 @@ struct intel_gvt {
wait_queue_head_t service_thread_wq;
unsigned long service_request;
- struct engine_mmio *engine_mmio_list;
+ struct {
+ struct engine_mmio *mmio;
+ int ctx_mmio_count[I915_NUM_ENGINES];
+ } engine_mmio_list;
struct dentry *debugfs_root;
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 9be639aa3b55..112f2ec7c25f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -188,7 +188,9 @@ void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int fence_num, void *p_data, unsigned int bytes)
{
- if (fence_num >= vgpu_fence_sz(vgpu)) {
+ unsigned int max_fence = vgpu_fence_sz(vgpu);
+
+ if (fence_num >= max_fence) {
/* When guest access oob fence regs without access
* pv_info first, we treat guest not supporting GVT,
@@ -201,7 +203,7 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
if (!vgpu->mmio.disable_warn_untrack) {
gvt_vgpu_err("found oob fence register access\n");
gvt_vgpu_err("total fence %d, access fence %d\n",
- vgpu_fence_sz(vgpu), fence_num);
+ max_fence, fence_num);
}
memset(p_data, 0, bytes);
return -EINVAL;
@@ -320,7 +322,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
/* sw will wait for the device to ack the reset request */
- vgpu_vreg(vgpu, offset) = 0;
+ vgpu_vreg(vgpu, offset) = 0;
return 0;
}
@@ -1139,21 +1141,21 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{
- int ret = 0;
+ intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ struct intel_vgpu_mm *mm;
+ u64 *pdps;
+
+ pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
- ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
- break;
- case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
- ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
- break;
+ root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
- ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
- break;
+ mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
+ return PTR_ERR_OR_ZERO(mm);
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
- ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
- break;
+ return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
case VGT_G2V_EXECLIST_CONTEXT_CREATE:
case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
case 1: /* Remove this in guest driver. */
@@ -1161,7 +1163,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
default:
gvt_vgpu_err("Invalid PV notification %d\n", notification);
}
- return ret;
+ return 0;
}
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
@@ -1389,8 +1391,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
- gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n",
- vgpu->id, offset, value);
+ gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
+ offset, value);
return -EINVAL;
}
/*
@@ -1399,8 +1401,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
* support BDW, SKL or other platforms with same HWSP registers.
*/
if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
- gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
- vgpu->id, offset);
+ gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
+ offset);
return -EINVAL;
}
vgpu->hws_pga[ring_id] = value;
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index f8e77e166246..f6dd9f717888 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -44,13 +44,18 @@ struct intel_gvt_mpt {
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
- int (*set_wp_page)(unsigned long handle, u64 gfn);
- int (*unset_wp_page)(unsigned long handle, u64 gfn);
+ int (*enable_page_track)(unsigned long handle, u64 gfn);
+ int (*disable_page_track)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
+
+ int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
+ dma_addr_t *dma_addr);
+ void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 909499b73d03..8a428678e4b5 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -41,6 +41,7 @@
#include <linux/kvm_host.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
+#include <linux/debugfs.h>
#include "i915_drv.h"
#include "gvt.h"
@@ -84,12 +85,16 @@ struct kvmgt_guest_info {
#define NR_BKT (1 << 18)
struct hlist_head ptable[NR_BKT];
#undef NR_BKT
+ struct dentry *debugfs_cache_entries;
};
struct gvt_dma {
- struct rb_node node;
+ struct intel_vgpu *vgpu;
+ struct rb_node gfn_node;
+ struct rb_node dma_addr_node;
gfn_t gfn;
- unsigned long iova;
+ dma_addr_t dma_addr;
+ struct kref ref;
};
static inline bool handle_valid(unsigned long handle)
@@ -101,165 +106,167 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
-static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
- unsigned long *iova)
+static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t *dma_addr)
{
- struct page *page;
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- dma_addr_t daddr;
+ struct page *page;
+ unsigned long pfn;
+ int ret;
- if (unlikely(!pfn_valid(pfn)))
- return -EFAULT;
+ /* Pin the page first. */
+ ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (ret != 1) {
+ gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
+ gfn, ret);
+ return -EINVAL;
+ }
+ /* Setup DMA mapping. */
page = pfn_to_page(pfn);
- daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, daddr))
+ *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, *dma_addr)) {
+ gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
+ vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
return -ENOMEM;
+ }
- *iova = (unsigned long)(daddr >> PAGE_SHIFT);
return 0;
}
-static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
+static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t dma_addr)
{
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- dma_addr_t daddr;
+ int ret;
- daddr = (dma_addr_t)(iova << PAGE_SHIFT);
- dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
+ WARN_ON(ret != 1);
}
-static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
{
- struct rb_node *node = vgpu->vdev.cache.rb_node;
- struct gvt_dma *ret = NULL;
+ struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+ struct gvt_dma *itr;
while (node) {
- struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
+ itr = rb_entry(node, struct gvt_dma, dma_addr_node);
- if (gfn < itr->gfn)
+ if (dma_addr < itr->dma_addr)
node = node->rb_left;
- else if (gfn > itr->gfn)
+ else if (dma_addr > itr->dma_addr)
node = node->rb_right;
- else {
- ret = itr;
- goto out;
- }
+ else
+ return itr;
}
-
-out:
- return ret;
+ return NULL;
}
-static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct gvt_dma *entry;
- unsigned long iova;
-
- mutex_lock(&vgpu->vdev.cache_lock);
+ struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
+ struct gvt_dma *itr;
- entry = __gvt_cache_find(vgpu, gfn);
- iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
+ while (node) {
+ itr = rb_entry(node, struct gvt_dma, gfn_node);
- mutex_unlock(&vgpu->vdev.cache_lock);
- return iova;
+ if (gfn < itr->gfn)
+ node = node->rb_left;
+ else if (gfn > itr->gfn)
+ node = node->rb_right;
+ else
+ return itr;
+ }
+ return NULL;
}
-static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
- unsigned long iova)
+static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
+ dma_addr_t dma_addr)
{
struct gvt_dma *new, *itr;
- struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
+ struct rb_node **link, *parent = NULL;
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
return;
+ new->vgpu = vgpu;
new->gfn = gfn;
- new->iova = iova;
+ new->dma_addr = dma_addr;
+ kref_init(&new->ref);
- mutex_lock(&vgpu->vdev.cache_lock);
+ /* gfn_cache maps gfn to struct gvt_dma. */
+ link = &vgpu->vdev.gfn_cache.rb_node;
while (*link) {
parent = *link;
- itr = rb_entry(parent, struct gvt_dma, node);
+ itr = rb_entry(parent, struct gvt_dma, gfn_node);
- if (gfn == itr->gfn)
- goto out;
- else if (gfn < itr->gfn)
+ if (gfn < itr->gfn)
link = &parent->rb_left;
else
link = &parent->rb_right;
}
+ rb_link_node(&new->gfn_node, parent, link);
+ rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
- rb_link_node(&new->node, parent, link);
- rb_insert_color(&new->node, &vgpu->vdev.cache);
- mutex_unlock(&vgpu->vdev.cache_lock);
- return;
+ /* dma_addr_cache maps dma addr to struct gvt_dma. */
+ parent = NULL;
+ link = &vgpu->vdev.dma_addr_cache.rb_node;
+ while (*link) {
+ parent = *link;
+ itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
-out:
- mutex_unlock(&vgpu->vdev.cache_lock);
- kfree(new);
+ if (dma_addr < itr->dma_addr)
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+ rb_link_node(&new->dma_addr_node, parent, link);
+ rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+
+ vgpu->vdev.nr_cache_entries++;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry)
{
- rb_erase(&entry->node, &vgpu->vdev.cache);
+ rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
+ rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
kfree(entry);
-}
-
-static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
-{
- struct device *dev = mdev_dev(vgpu->vdev.mdev);
- struct gvt_dma *this;
- unsigned long g1;
- int rc;
-
- mutex_lock(&vgpu->vdev.cache_lock);
- this = __gvt_cache_find(vgpu, gfn);
- if (!this) {
- mutex_unlock(&vgpu->vdev.cache_lock);
- return;
- }
-
- g1 = gfn;
- gvt_dma_unmap_iova(vgpu, this->iova);
- rc = vfio_unpin_pages(dev, &g1, 1);
- WARN_ON(rc != 1);
- __gvt_cache_remove_entry(vgpu, this);
- mutex_unlock(&vgpu->vdev.cache_lock);
-}
-
-static void gvt_cache_init(struct intel_vgpu *vgpu)
-{
- vgpu->vdev.cache = RB_ROOT;
- mutex_init(&vgpu->vdev.cache_lock);
+ vgpu->vdev.nr_cache_entries--;
}
static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
- struct device *dev = mdev_dev(vgpu->vdev.mdev);
- unsigned long gfn;
for (;;) {
mutex_lock(&vgpu->vdev.cache_lock);
- node = rb_first(&vgpu->vdev.cache);
+ node = rb_first(&vgpu->vdev.gfn_cache);
if (!node) {
mutex_unlock(&vgpu->vdev.cache_lock);
break;
}
- dma = rb_entry(node, struct gvt_dma, node);
- gvt_dma_unmap_iova(vgpu, dma->iova);
- gfn = dma->gfn;
+ dma = rb_entry(node, struct gvt_dma, gfn_node);
+ gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
__gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vgpu->vdev.cache_lock);
- vfio_unpin_pages(dev, &gfn, 1);
}
}
+static void gvt_cache_init(struct intel_vgpu *vgpu)
+{
+ vgpu->vdev.gfn_cache = RB_ROOT;
+ vgpu->vdev.dma_addr_cache = RB_ROOT;
+ vgpu->vdev.nr_cache_entries = 0;
+ mutex_init(&vgpu->vdev.cache_lock);
+}
+
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
{
hash_init(info->ptable);
@@ -452,7 +459,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
- gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
+ gvt_err("failed to create intel vgpu: %d\n", ret);
goto out;
}
@@ -489,13 +496,22 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
- unsigned long gfn, end_gfn;
+ struct gvt_dma *entry;
+ unsigned long iov_pfn, end_iov_pfn;
- gfn = unmap->iova >> PAGE_SHIFT;
- end_gfn = gfn + unmap->size / PAGE_SIZE;
+ iov_pfn = unmap->iova >> PAGE_SHIFT;
+ end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
- while (gfn < end_gfn)
- gvt_cache_remove(vgpu, gfn++);
+ mutex_lock(&vgpu->vdev.cache_lock);
+ for (; iov_pfn < end_iov_pfn; iov_pfn++) {
+ entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
+ if (!entry)
+ continue;
+
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
+ __gvt_cache_remove_entry(vgpu, entry);
+ }
+ mutex_unlock(&vgpu->vdev.cache_lock);
}
return NOTIFY_OK;
@@ -1321,7 +1337,7 @@ static void kvmgt_host_exit(struct device *dev, void *gvt)
mdev_unregister_device(dev);
}
-static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
+static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
@@ -1355,7 +1371,7 @@ out:
return 0;
}
-static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
+static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
@@ -1483,11 +1499,20 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
+ info->debugfs_cache_entries = debugfs_create_ulong(
+ "kvmgt_nr_cache_entries",
+ 0444, vgpu->debugfs,
+ &vgpu->vdev.nr_cache_entries);
+ if (!info->debugfs_cache_entries)
+ gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
+
return 0;
}
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
+ debugfs_remove(info->debugfs_cache_entries);
+
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvm_put_kvm(info->kvm);
kvmgt_protect_table_destroy(info);
@@ -1527,39 +1552,77 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
{
- unsigned long iova, pfn;
struct kvmgt_guest_info *info;
- struct device *dev;
- struct intel_vgpu *vgpu;
- int rc;
+ kvm_pfn_t pfn;
if (!handle_valid(handle))
return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle;
- vgpu = info->vgpu;
- iova = gvt_cache_find(info->vgpu, gfn);
- if (iova != INTEL_GVT_INVALID_ADDR)
- return iova;
-
- pfn = INTEL_GVT_INVALID_ADDR;
- dev = mdev_dev(info->vgpu->vdev.mdev);
- rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
- if (rc != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
- gfn, rc);
- return INTEL_GVT_INVALID_ADDR;
- }
- /* transfer to host iova for GFX to use DMA */
- rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
- if (rc) {
- gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
- vfio_unpin_pages(dev, &gfn, 1);
+
+ pfn = gfn_to_pfn(info->kvm, gfn);
+ if (is_error_noslot_pfn(pfn))
return INTEL_GVT_INVALID_ADDR;
+
+ return pfn;
+}
+
+int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+ dma_addr_t *dma_addr)
+{
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct gvt_dma *entry;
+ int ret;
+
+ if (!handle_valid(handle))
+ return -EINVAL;
+
+ info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
+
+ mutex_lock(&info->vgpu->vdev.cache_lock);
+
+ entry = __gvt_cache_find_gfn(info->vgpu, gfn);
+ if (!entry) {
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
+ if (ret) {
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
+ return ret;
+ }
+ __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+ } else {
+ kref_get(&entry->ref);
+ *dma_addr = entry->dma_addr;
}
- gvt_cache_add(info->vgpu, gfn, iova);
- return iova;
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
+ return 0;
+}
+
+static void __gvt_dma_release(struct kref *ref)
+{
+ struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
+
+ gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
+ __gvt_cache_remove_entry(entry->vgpu, entry);
+}
+
+void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
+{
+ struct kvmgt_guest_info *info;
+ struct gvt_dma *entry;
+
+ if (!handle_valid(handle))
+ return;
+
+ info = (struct kvmgt_guest_info *)handle;
+
+ mutex_lock(&info->vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ if (entry)
+ kref_put(&entry->ref, __gvt_dma_release);
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
}
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
@@ -1629,11 +1692,13 @@ struct intel_gvt_mpt kvmgt_mpt = {
.detach_vgpu = kvmgt_detach_vgpu,
.inject_msi = kvmgt_inject_msi,
.from_virt_to_mfn = kvmgt_virt_to_pfn,
- .set_wp_page = kvmgt_write_protect_add,
- .unset_wp_page = kvmgt_write_protect_remove,
+ .enable_page_track = kvmgt_page_track_add,
+ .disable_page_track = kvmgt_page_track_remove,
.read_gpa = kvmgt_read_gpa,
.write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn,
+ .dma_map_guest_page = kvmgt_dma_map_guest_page,
+ .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
.set_opregion = kvmgt_set_opregion,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 5c869e3fdf3b..11b71b33f1c0 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -76,10 +76,9 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
else
intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
- } else if (reg_is_gtt(gvt, offset) &&
- vgpu->gtt.ggtt_mm->virtual_page_table) {
+ } else if (reg_is_gtt(gvt, offset)) {
offset -= gvt->device_info.gtt_start_offset;
- pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
+ pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
if (read)
memcpy(p_data, pt, bytes);
else
@@ -125,7 +124,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
- ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
+ ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
@@ -198,7 +197,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
- ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
+ ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 73ad6e90e49d..74a9c7b5516e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -50,6 +50,8 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
+#define GEN9_MOCS_SIZE 64
+
/* Raw offset is appened to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
@@ -151,8 +153,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
static struct {
bool initialized;
- u32 control_table[I915_NUM_ENGINES][64];
- u32 l3cc_table[32];
+ u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
+ u32 l3cc_table[GEN9_MOCS_SIZE / 2];
} gen9_render_mocs;
static void load_render_mocs(struct drm_i915_private *dev_priv)
@@ -169,7 +171,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
offset.reg = regs[ring_id];
- for (i = 0; i < 64; i++) {
+ for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] =
I915_READ_FW(offset);
offset.reg += 4;
@@ -177,7 +179,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
}
offset.reg = 0xb020;
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
gen9_render_mocs.l3cc_table[i] =
I915_READ_FW(offset);
offset.reg += 4;
@@ -185,6 +187,153 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
gen9_render_mocs.initialized = true;
}
+static int
+restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
+ struct i915_request *req)
+{
+ u32 *cs;
+ int ret;
+ struct engine_mmio *mmio;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = req->engine->id;
+ int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
+
+ if (count == 0)
+ return 0;
+
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ cs = intel_ring_begin(req, count * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(count);
+ for (mmio = gvt->engine_mmio_list.mmio;
+ i915_mmio_reg_valid(mmio->reg); mmio++) {
+ if (mmio->ring_id != ring_id ||
+ !mmio->in_context)
+ continue;
+
+ *cs++ = i915_mmio_reg_offset(mmio->reg);
+ *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
+ (mmio->mask << 16);
+ gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
+ *(cs-2), *(cs-1), vgpu->id, ring_id);
+ }
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
+ struct i915_request *req)
+{
+ unsigned int index;
+ u32 *cs;
+
+ cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
+
+ for (index = 0; index < GEN9_MOCS_SIZE; index++) {
+ *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
+ *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
+ gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
+ *(cs-2), *(cs-1), vgpu->id, req->engine->id);
+
+ }
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ return 0;
+}
+
+static int
+restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
+ struct i915_request *req)
+{
+ unsigned int index;
+ u32 *cs;
+
+ cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
+
+ for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
+ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
+ *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
+ gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
+ *(cs-2), *(cs-1), vgpu->id, req->engine->id);
+
+ }
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ return 0;
+}
+
+/*
+ * Use lri command to initialize the mmio which is in context state image for
+ * inhibit context, it contains tracked engine mmio, render_mocs and
+ * render_mocs_l3cc.
+ */
+int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
+ struct i915_request *req)
+{
+ int ret;
+ u32 *cs;
+
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ ret = restore_context_mmio_for_inhibit(vgpu, req);
+ if (ret)
+ goto out;
+
+ /* no MOCS register in context except render engine */
+ if (req->engine->id != RCS)
+ goto out;
+
+ ret = restore_render_mocs_control_for_inhibit(vgpu, req);
+ if (ret)
+ goto out;
+
+ ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
+ if (ret)
+ goto out;
+
+out:
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ return ret;
+}
+
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -251,11 +400,14 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
+ if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
+ return;
+
if (!pre && !gen9_render_mocs.initialized)
load_render_mocs(dev_priv);
offset.reg = regs[ring_id];
- for (i = 0; i < 64; i++) {
+ for (i = 0; i < GEN9_MOCS_SIZE; i++) {
if (pre)
old_v = vgpu_vreg_t(pre, offset);
else
@@ -273,7 +425,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
old_v = vgpu_vreg_t(pre, l3_offset);
else
@@ -293,6 +445,16 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
#define CTX_CONTEXT_CONTROL_VAL 0x03
+bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+{
+ u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
+ u32 inhibit_mask =
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+
+ return inhibit_mask ==
+ (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
+}
+
/* Switch ring mmio values (context). */
static void switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next,
@@ -300,9 +462,6 @@ static void switch_mmio(struct intel_vgpu *pre,
{
struct drm_i915_private *dev_priv;
struct intel_vgpu_submission *s;
- u32 *reg_state, ctx_ctrl;
- u32 inhibit_mask =
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
struct engine_mmio *mmio;
u32 old_v, new_v;
@@ -310,10 +469,18 @@ static void switch_mmio(struct intel_vgpu *pre,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
switch_mocs(pre, next, ring_id);
- for (mmio = dev_priv->gvt->engine_mmio_list;
+ for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id)
continue;
+ /*
+ * No need to do save or restore of the mmio which is in context
+ * state image on kabylake, it's initialized by lri command and
+ * save or restore with context together.
+ */
+ if (IS_KABYLAKE(dev_priv) && mmio->in_context)
+ continue;
+
// save
if (pre) {
vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
@@ -327,16 +494,13 @@ static void switch_mmio(struct intel_vgpu *pre,
// restore
if (next) {
s = &next->submission;
- reg_state =
- s->shadow_ctx->engine[ring_id].lrc_reg_state;
- ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
/*
- * if it is an inhibit context, load in_context mmio
- * into HW by mmio write. If it is not, skip this mmio
- * write.
+ * No need to restore the mmio which is in context state
+ * image if it's not inhibit context, it will restore
+ * itself.
*/
if (mmio->in_context &&
- (ctx_ctrl & inhibit_mask) != inhibit_mask)
+ !is_inhibit_context(s->shadow_ctx, ring_id))
continue;
if (mmio->mask)
@@ -405,8 +569,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
*/
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
+ struct engine_mmio *mmio;
+
if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
- gvt->engine_mmio_list = gen9_engine_mmio_list;
+ gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else
- gvt->engine_mmio_list = gen8_engine_mmio_list;
+ gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
+
+ for (mmio = gvt->engine_mmio_list.mmio;
+ i915_mmio_reg_valid(mmio->reg); mmio++) {
+ if (mmio->in_context)
+ gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ }
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index ca2c6a745673..0439eb8057a8 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -49,4 +49,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
+bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+
+int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
+ struct i915_request *req);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 81aff4eacbfe..32ffcd566cdd 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -154,54 +154,31 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
}
/**
- * intel_gvt_hypervisor_enable - set a guest page to write-protected
+ * intel_gvt_hypervisor_enable_page_track - track a guest page
* @vgpu: a vGPU
- * @t: page track data structure
+ * @gfn: the gfn of guest
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_enable_page_track(
- struct intel_vgpu *vgpu,
- struct intel_vgpu_page_track *t)
+ struct intel_vgpu *vgpu, unsigned long gfn)
{
- int ret;
-
- if (t->tracked)
- return 0;
-
- ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn);
- if (ret)
- return ret;
- t->tracked = true;
- atomic_inc(&vgpu->gtt.n_tracked_guest_page);
- return 0;
+ return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
}
/**
- * intel_gvt_hypervisor_disable_page_track - remove the write-protection of a
- * guest page
+ * intel_gvt_hypervisor_disable_page_track - untrack a guest page
* @vgpu: a vGPU
- * @t: page track data structure
+ * @gfn: the gfn of guest
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_disable_page_track(
- struct intel_vgpu *vgpu,
- struct intel_vgpu_page_track *t)
+ struct intel_vgpu *vgpu, unsigned long gfn)
{
- int ret;
-
- if (!t->tracked)
- return 0;
-
- ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn);
- if (ret)
- return ret;
- t->tracked = false;
- atomic_dec(&vgpu->gtt.n_tracked_guest_page);
- return 0;
+ return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
}
/**
@@ -251,6 +228,34 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
}
/**
+ * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
+ * @vgpu: a vGPU
+ * @gpfn: guest pfn
+ * @dma_addr: retrieve allocated dma addr
+ *
+ * Returns:
+ * 0 on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_dma_map_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t *dma_addr)
+{
+ return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
+ dma_addr);
+}
+
+/**
+ * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
+ * @vgpu: a vGPU
+ * @dma_addr: the mapped dma addr
+ */
+static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
+ struct intel_vgpu *vgpu, dma_addr_t dma_addr)
+{
+ intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
+}
+
+/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
* @gfn: guest PFN
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
new file mode 100644
index 000000000000..53e2bd79c97d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "i915_drv.h"
+#include "gvt.h"
+
+/**
+ * intel_vgpu_find_page_track - find page track rcord of guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * A pointer to struct intel_vgpu_page_track if found, else NULL returned.
+ */
+struct intel_vgpu_page_track *intel_vgpu_find_page_track(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ return radix_tree_lookup(&vgpu->page_track_tree, gfn);
+}
+
+/**
+ * intel_vgpu_register_page_track - register a guest page to be tacked
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn,
+ gvt_page_track_handler_t handler, void *priv)
+{
+ struct intel_vgpu_page_track *track;
+ int ret;
+
+ track = intel_vgpu_find_page_track(vgpu, gfn);
+ if (track)
+ return -EEXIST;
+
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (!track)
+ return -ENOMEM;
+
+ track->handler = handler;
+ track->priv_data = priv;
+
+ ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track);
+ if (ret) {
+ kfree(track);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_vgpu_unregister_page_track - unregister the tracked guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ */
+void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
+ unsigned long gfn)
+{
+ struct intel_vgpu_page_track *track;
+
+ track = radix_tree_delete(&vgpu->page_track_tree, gfn);
+ if (track) {
+ if (track->tracked)
+ intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+ kfree(track);
+ }
+}
+
+/**
+ * intel_vgpu_enable_page_track - set write-protection on guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_page_track *track;
+ int ret;
+
+ track = intel_vgpu_find_page_track(vgpu, gfn);
+ if (!track)
+ return -ENXIO;
+
+ if (track->tracked)
+ return 0;
+
+ ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
+ if (ret)
+ return ret;
+ track->tracked = true;
+ return 0;
+}
+
+/**
+ * intel_vgpu_enable_page_track - cancel write-protection on guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_page_track *track;
+ int ret;
+
+ track = intel_vgpu_find_page_track(vgpu, gfn);
+ if (!track)
+ return -ENXIO;
+
+ if (!track->tracked)
+ return 0;
+
+ ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+ if (ret)
+ return ret;
+ track->tracked = false;
+ return 0;
+}
+
+/**
+ * intel_vgpu_page_track_handler - called when write to write-protected page
+ * @vgpu: a vGPU
+ * @gpa: the gpa of this write
+ * @data: the writed data
+ * @bytes: the length of this write
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
+ void *data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_vgpu_page_track *page_track;
+ int ret = 0;
+
+ mutex_lock(&gvt->lock);
+
+ page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
+ if (!page_track) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (unlikely(vgpu->failsafe)) {
+ /* Remove write protection to prevent furture traps. */
+ intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
+ } else {
+ ret = page_track->handler(page_track, gpa, data, bytes);
+ if (ret)
+ gvt_err("guest page write error, gpa %llx\n", gpa);
+ }
+
+out:
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/page_track.h b/drivers/gpu/drm/i915/gvt/page_track.h
new file mode 100644
index 000000000000..fa607a71c3c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/page_track.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _GVT_PAGE_TRACK_H_
+#define _GVT_PAGE_TRACK_H_
+
+struct intel_vgpu_page_track;
+
+typedef int (*gvt_page_track_handler_t)(
+ struct intel_vgpu_page_track *page_track,
+ u64 gpa, void *data, int bytes);
+
+/* Track record for a write-protected guest page. */
+struct intel_vgpu_page_track {
+ gvt_page_track_handler_t handler;
+ bool tracked;
+ void *priv_data;
+};
+
+struct intel_vgpu_page_track *intel_vgpu_find_page_track(
+ struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_register_page_track(struct intel_vgpu *vgpu,
+ unsigned long gfn, gvt_page_track_handler_t handler,
+ void *priv);
+void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
+ unsigned long gfn);
+
+int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
+int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
+ void *data, unsigned int bytes);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index cc1ce361cd76..75b7bc7b344c 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -103,9 +103,8 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
list_for_each(pos, &sched_data->lru_runq_head) {
vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
- fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
- vgpu_data->sched_ctl.weight /
- total_weight;
+ fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
+ total_weight) * vgpu_data->sched_ctl.weight;
vgpu_data->allocated_ts = fair_timeslice;
vgpu_data->left_ts = vgpu_data->allocated_ts;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b55b3580ca1d..9b92b4e25a20 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -113,7 +113,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
#undef COPY_REG
set_context_pdp_root_pointer(shadow_ring_context,
- workload->shadow_mm->shadow_page_table);
+ (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
@@ -126,7 +126,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return 0;
}
-static inline bool is_gvt_request(struct drm_i915_gem_request *req)
+static inline bool is_gvt_request(struct i915_request *req)
{
return i915_gem_context_force_single_submission(req->ctx);
}
@@ -148,7 +148,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+ struct i915_request *req = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
@@ -225,6 +225,11 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
void *shadow_ring_buffer_va;
u32 *cs;
+ struct i915_request *req = workload->req;
+
+ if (IS_KABYLAKE(req->i915) &&
+ is_inhibit_context(req->ctx, req->engine->id))
+ intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
@@ -333,13 +338,13 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret;
- rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
@@ -348,7 +353,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
- workload->req = i915_gem_request_get(rq);
+ workload->req = i915_request_get(rq);
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
@@ -582,7 +587,7 @@ out:
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
- i915_add_request(workload->req);
+ i915_request_add(workload->req);
workload->dispatched = true;
}
@@ -769,7 +774,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- i915_gem_request_put(fetch_and_zero(&workload->req));
+ i915_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
@@ -886,7 +891,7 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
- i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
@@ -1132,7 +1137,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &workload->vgpu->submission;
if (workload->shadow_mm)
- intel_gvt_mm_unreference(workload->shadow_mm);
+ intel_vgpu_mm_put(workload->shadow_mm);
kmem_cache_free(s->workloads, workload);
}
@@ -1181,32 +1186,27 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
- int page_table_level;
- u32 pdp[8];
-
- if (desc->addressing_mode == 1) { /* legacy 32-bit */
- page_table_level = 3;
- } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
- page_table_level = 4;
- } else {
+ intel_gvt_gtt_type_t root_entry_type;
+ u64 pdps[GVT_RING_CTX_NR_PDPS];
+
+ switch (desc->addressing_mode) {
+ case 1: /* legacy 32-bit */
+ root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+ break;
+ case 3: /* legacy 64-bit */
+ root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ break;
+ default:
gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL;
}
- read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+ read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
- mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
- if (mm) {
- intel_gvt_mm_reference(mm);
- } else {
+ mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
- mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
- pdp, page_table_level, 0);
- if (IS_ERR(mm)) {
- gvt_vgpu_err("fail to create mm object.\n");
- return PTR_ERR(mm);
- }
- }
workload->shadow_mm = mm;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index ff175a98b19e..bab4097aa6d7 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -80,7 +80,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
int ring_id;
- struct drm_i915_gem_request *req;
+ struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
bool shadowed;
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 7a2511538f34..fc7831a62121 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -113,10 +113,10 @@ TRACE_EVENT(gma_index,
);
TRACE_EVENT(gma_translate,
- TP_PROTO(int id, char *type, int ring_id, int pt_level,
+ TP_PROTO(int id, char *type, int ring_id, int root_entry_type,
unsigned long gma, unsigned long gpa),
- TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
+ TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
@@ -124,8 +124,8 @@ TRACE_EVENT(gma_translate,
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
- "VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
- id, type, ring_id, pt_level, gma, gpa);
+ "VM%d %s ring %d root_entry_type %d gma 0x%lx -> gpa 0x%lx\n",
+ id, type, ring_id, root_entry_type, gma, gpa);
),
TP_printk("%s", __entry->buf)
@@ -168,7 +168,7 @@ TRACE_EVENT(spt_change,
TP_printk("%s", __entry->buf)
);
-TRACE_EVENT(gpt_change,
+TRACE_EVENT(spt_guest_change,
TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
unsigned long index),
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index b87b19d8443c..41f76e86aa1f 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -354,6 +354,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
+ INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3849ded354e3..89f7ff2c652e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -49,6 +49,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
intel_device_info_dump_flags(info, &p);
intel_device_info_dump_runtime(info, &p);
+ intel_driver_caps_print(&dev_priv->caps, &p);
kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915_modparams, &p);
@@ -149,8 +150,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_global_flag(obj),
get_pin_mapped_flag(obj),
obj->base.size / 1024,
- obj->base.read_domains,
- obj->base.write_domain,
+ obj->read_domains,
+ obj->write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
obj->mm.dirty ? " dirty" : "",
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -518,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
struct task_struct *task;
mutex_lock(&dev->struct_mutex);
@@ -535,7 +536,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
* Therefore, we need to protect this ->comm access using RCU.
*/
request = list_first_entry_or_null(&file_priv->mm.request_list,
- struct drm_i915_gem_request,
+ struct i915_request,
client_link);
rcu_read_lock();
task = pid_task(request && request->ctx->pid ?
@@ -645,6 +646,56 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
+static void gen8_display_interrupt_info(struct seq_file *m)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ int pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
+ seq_printf(m, "Pipe %c power disabled\n",
+ pipe_name(pipe));
+ continue;
+ }
+ seq_printf(m, "Pipe %c IMR:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IMR(pipe)));
+ seq_printf(m, "Pipe %c IIR:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IIR(pipe)));
+ seq_printf(m, "Pipe %c IER:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IER(pipe)));
+
+ intel_display_power_put(dev_priv, power_domain);
+ }
+
+ seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IMR));
+ seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IIR));
+ seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IER));
+
+ seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IMR));
+ seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IIR));
+ seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IER));
+
+ seq_printf(m, "PCU interrupt mask:\t%08x\n",
+ I915_READ(GEN8_PCU_IMR));
+ seq_printf(m, "PCU interrupt identity:\t%08x\n",
+ I915_READ(GEN8_PCU_IIR));
+ seq_printf(m, "PCU interrupt enable:\t%08x\n",
+ I915_READ(GEN8_PCU_IER));
+}
+
static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -708,6 +759,27 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(GEN8_PCU_IIR));
seq_printf(m, "PCU interrupt enable:\t%08x\n",
I915_READ(GEN8_PCU_IER));
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ seq_printf(m, "Master Interrupt Control: %08x\n",
+ I915_READ(GEN11_GFX_MSTR_IRQ));
+
+ seq_printf(m, "Render/Copy Intr Enable: %08x\n",
+ I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
+ seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
+ I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
+ seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
+ I915_READ(GEN11_GUC_SG_INTR_ENABLE));
+ seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
+ I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
+ seq_printf(m, "Crypto Intr Enable:\t %08x\n",
+ I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
+ seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
+ I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
+
+ seq_printf(m, "Display Interrupt Control:\t%08x\n",
+ I915_READ(GEN11_DISPLAY_INT_CTL));
+
+ gen8_display_interrupt_info(m);
} else if (INTEL_GEN(dev_priv) >= 8) {
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -721,49 +793,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
i, I915_READ(GEN8_GT_IER(i)));
}
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain)) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
- seq_printf(m, "Pipe %c IMR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IMR(pipe)));
- seq_printf(m, "Pipe %c IIR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IIR(pipe)));
- seq_printf(m, "Pipe %c IER:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IER(pipe)));
-
- intel_display_power_put(dev_priv, power_domain);
- }
-
- seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IMR));
- seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IIR));
- seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IER));
-
- seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IMR));
- seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IIR));
- seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IER));
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
+ gen8_display_interrupt_info(m);
} else if (IS_VALLEYVIEW(dev_priv)) {
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
@@ -845,13 +875,35 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- if (INTEL_GEN(dev_priv) >= 6) {
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ seq_printf(m, "RCS Intr Mask:\t %08x\n",
+ I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
+ seq_printf(m, "BCS Intr Mask:\t %08x\n",
+ I915_READ(GEN11_BCS_RSVD_INTR_MASK));
+ seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
+ I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
+ seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
+ I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
+ seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
+ I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
+ seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
+ I915_READ(GEN11_GUC_SG_INTR_MASK));
+ seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
+ I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
+ seq_printf(m, "Crypto Intr Mask:\t %08x\n",
+ I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
+ seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
+ I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
+
+ } else if (INTEL_GEN(dev_priv) >= 6) {
for_each_engine(engine, dev_priv, id) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, I915_READ_IMR(engine));
}
}
+
intel_runtime_pm_put(dev_priv);
return 0;
@@ -1460,19 +1512,6 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 gt_core_status, rcctl1, rc6vids = 0;
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
- unsigned forcewake_count;
- int count = 0;
-
- forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
- if (forcewake_count) {
- seq_puts(m, "RC information inaccurate because somebody "
- "holds a forcewake reference \n");
- } else {
- /* NB: we cannot use forcewake, else we read the wrong values */
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
- udelay(10);
- seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
- }
gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
@@ -1483,9 +1522,12 @@ static int gen6_drpc_info(struct seq_file *m)
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
}
- mutex_lock(&dev_priv->pcu_lock);
- sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- mutex_unlock(&dev_priv->pcu_lock);
+ if (INTEL_GEN(dev_priv) <= 7) {
+ mutex_lock(&dev_priv->pcu_lock);
+ sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids);
+ mutex_unlock(&dev_priv->pcu_lock);
+ }
seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -1541,12 +1583,15 @@ static int gen6_drpc_info(struct seq_file *m)
print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
- seq_printf(m, "RC6 voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
- seq_printf(m, "RC6+ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
- seq_printf(m, "RC6++ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+ if (INTEL_GEN(dev_priv) <= 7) {
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+ }
+
return i915_forcewake_domains(m, NULL);
}
@@ -1599,7 +1644,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
if (fbc->work.scheduled)
- seq_printf(m, "FBC worker scheduled on vblank %u, now %llu\n",
+ seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
fbc->work.scheduled_vblank,
drm_crtc_vblank_count(&fbc->crtc->base));
@@ -2338,7 +2383,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
return -ENODEV;
GEM_BUG_ON(!guc->execbuf_client);
- GEM_BUG_ON(!guc->preempt_client);
seq_printf(m, "Doorbell map:\n");
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
@@ -2346,8 +2390,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
i915_guc_client_info(m, dev_priv, guc->execbuf_client);
- seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client);
- i915_guc_client_info(m, dev_priv, guc->preempt_client);
+ if (guc->preempt_client) {
+ seq_printf(m, "\nGuC preempt client @ %p:\n",
+ guc->preempt_client);
+ i915_guc_client_info(m, dev_priv, guc->preempt_client);
+ }
i915_guc_log_info(m, dev_priv);
@@ -3154,6 +3201,16 @@ static int i915_engine_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_rcs_topology(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
+
+ return 0;
+}
+
static int i915_shrinker_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -3930,7 +3987,8 @@ i915_wedged_set(void *data, u64 val)
engine->hangcheck.stalled = true;
}
- i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
+ i915_handle_error(i915, val, "Manually set wedged engine mask = %llx",
+ val);
wait_on_bit(&i915->gpu_error.flags,
I915_RESET_HANDOFF,
@@ -4064,7 +4122,7 @@ i915_drop_caches_set(void *data, u64 val)
I915_WAIT_LOCKED);
if (val & DROP_RETIRE)
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
@@ -4083,10 +4141,8 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_IDLE)
drain_delayed_work(&dev_priv->gt.idle_work);
- if (val & DROP_FREED) {
- synchronize_rcu();
+ if (val & DROP_FREED)
i915_gem_drain_freed_objects(dev_priv);
- }
return ret;
}
@@ -4277,7 +4333,7 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
continue;
sseu->slice_mask = BIT(0);
- sseu->subslice_mask |= BIT(ss);
+ sseu->subslice_mask[0] |= BIT(ss);
eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
@@ -4292,11 +4348,11 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
const struct intel_device_info *info = INTEL_INFO(dev_priv);
- int s_max = 6, ss_max = 4;
int s, ss;
- u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2];
+ u32 s_reg[info->sseu.max_slices];
+ u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
- for (s = 0; s < s_max; s++) {
+ for (s = 0; s < info->sseu.max_slices; s++) {
/*
* FIXME: Valid SS Mask respects the spec and read
* only valid bits for those registers, excluding reserverd
@@ -4318,15 +4374,15 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
GEN9_PGCTL_SSB_EU210_ACK |
GEN9_PGCTL_SSB_EU311_ACK;
- for (s = 0; s < s_max; s++) {
+ for (s = 0; s < info->sseu.max_slices; s++) {
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
/* skip disabled slice */
continue;
sseu->slice_mask |= BIT(s);
- sseu->subslice_mask = info->sseu.subslice_mask;
+ sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
- for (ss = 0; ss < ss_max; ss++) {
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
@@ -4346,17 +4402,12 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
- int s_max = 3, ss_max = 4;
+ const struct intel_device_info *info = INTEL_INFO(dev_priv);
int s, ss;
- u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
-
- /* BXT has a single slice and at most 3 subslices. */
- if (IS_GEN9_LP(dev_priv)) {
- s_max = 1;
- ss_max = 3;
- }
+ u32 s_reg[info->sseu.max_slices];
+ u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
- for (s = 0; s < s_max; s++) {
+ for (s = 0; s < info->sseu.max_slices; s++) {
s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
@@ -4371,7 +4422,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
GEN9_PGCTL_SSB_EU210_ACK |
GEN9_PGCTL_SSB_EU311_ACK;
- for (s = 0; s < s_max; s++) {
+ for (s = 0; s < info->sseu.max_slices; s++) {
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
/* skip disabled slice */
continue;
@@ -4379,10 +4430,10 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(dev_priv))
- sseu->subslice_mask =
- INTEL_INFO(dev_priv)->sseu.subslice_mask;
+ sseu->subslice_mask[s] =
+ INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
- for (ss = 0; ss < ss_max; ss++) {
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
if (IS_GEN9_LP(dev_priv)) {
@@ -4390,7 +4441,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
/* skip disabled subslice */
continue;
- sseu->subslice_mask |= BIT(ss);
+ sseu->subslice_mask[s] |= BIT(ss);
}
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
@@ -4412,9 +4463,12 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
if (sseu->slice_mask) {
- sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
sseu->eu_per_subslice =
INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ sseu->subslice_mask[s] =
+ INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+ }
sseu->eu_total = sseu->eu_per_subslice *
sseu_subslice_total(sseu);
@@ -4433,6 +4487,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const char *type = is_available_info ? "Available" : "Enabled";
+ int s;
seq_printf(m, " %s Slice Mask: %04x\n", type,
sseu->slice_mask);
@@ -4440,10 +4495,10 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
hweight8(sseu->slice_mask));
seq_printf(m, " %s Subslice Total: %u\n", type,
sseu_subslice_total(sseu));
- seq_printf(m, " %s Subslice Mask: %04x\n", type,
- sseu->subslice_mask);
- seq_printf(m, " %s Subslice Per Slice: %u\n", type,
- hweight8(sseu->subslice_mask));
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ seq_printf(m, " %s Slice%i subslices: %u\n", type,
+ s, hweight8(sseu->subslice_mask[s]));
+ }
seq_printf(m, " %s EU Total: %u\n", type,
sseu->eu_total);
seq_printf(m, " %s EU Per Subslice: %u\n", type,
@@ -4477,6 +4532,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
seq_puts(m, "SSEU Device Status\n");
memset(&sseu, 0, sizeof(sseu));
+ sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
+ sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
+ sseu.max_eus_per_subslice =
+ INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
intel_runtime_pm_get(dev_priv);
@@ -4684,6 +4743,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0},
+ {"i915_rcs_topology", i915_rcs_topology, 0},
{"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e9f1daf258fe..d7c4de45644d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "i915_pmu.h"
+#include "i915_query.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
@@ -122,10 +123,90 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN5(dev_priv));
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+ WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+ /* PantherPoint is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
+ return PCH_KBP;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Ice Lake PCH\n");
+ WARN_ON(!IS_ICELAKE(dev_priv));
+ return PCH_ICP;
+ default:
+ return PCH_NONE;
+ }
+}
-static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
{
- enum intel_pch ret = PCH_NOP;
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static unsigned short
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+{
+ unsigned short id = 0;
/*
* In a virtualized passthrough environment we can be in a
@@ -134,28 +215,25 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN5(dev_priv)) {
- ret = PCH_IBX;
- DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
- } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
- ret = PCH_CPT;
- DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- ret = PCH_LPT;
- if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
- dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- else
- dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
- } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- ret = PCH_SPT;
- DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
- } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- ret = PCH_CNP;
- DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
- }
+ if (IS_GEN5(dev_priv))
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+ else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+
+ if (id)
+ DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
+ else
+ DRM_DEBUG_KMS("Assuming no PCH\n");
- return ret;
+ return id;
}
static void intel_detect_pch(struct drm_i915_private *dev_priv)
@@ -183,102 +261,32 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
*/
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
unsigned short id;
+ enum intel_pch pch_type;
if (pch->vendor != PCI_VENDOR_ID_INTEL)
continue;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
- dev_priv->pch_id = id;
-
- if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_IBX;
- DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN5(dev_priv));
- } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_CPT;
- DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN6(dev_priv) &&
- !IS_IVYBRIDGE(dev_priv));
- } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
- /* PantherPoint is CPT compatible */
- dev_priv->pch_type = PCH_CPT;
- DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN6(dev_priv) &&
- !IS_IVYBRIDGE(dev_priv));
- } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_LPT;
- DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) &&
- !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) ||
- IS_BDW_ULT(dev_priv));
- } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_LPT;
- DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) &&
- !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) &&
- !IS_BDW_ULT(dev_priv));
- } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
- /* WildcatPoint is LPT compatible */
- dev_priv->pch_type = PCH_LPT;
- DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) &&
- !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) ||
- IS_BDW_ULT(dev_priv));
- } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
- /* WildcatPoint is LPT compatible */
- dev_priv->pch_type = PCH_LPT;
- DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) &&
- !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) &&
- !IS_BDW_ULT(dev_priv));
- } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_SPT;
- DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv));
- } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_SPT;
- DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv));
- } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_KBP;
- DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
- } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_CNP;
- DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
- } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_CNP;
- DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
- } else if (id == INTEL_PCH_ICP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_ICP;
- DRM_DEBUG_KMS("Found Ice Lake PCH\n");
- WARN_ON(!IS_ICELAKE(dev_priv));
- } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
- id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
- pch->subsystem_vendor ==
- PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
- pch->subsystem_device ==
- PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_type = intel_virt_detect_pch(dev_priv);
- } else {
- continue;
+ pch_type = intel_pch_type(dev_priv, id);
+ if (pch_type != PCH_NONE) {
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ id = intel_virt_detect_pch(dev_priv);
+ if (id) {
+ pch_type = intel_pch_type(dev_priv, id);
+ if (WARN_ON(pch_type == PCH_NONE))
+ pch_type = PCH_NOP;
+ } else {
+ pch_type = PCH_NOP;
+ }
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
}
-
- break;
}
if (!pch)
DRM_DEBUG_KMS("No PCH found.\n");
@@ -286,8 +294,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
pci_dev_put(pch);
}
-static int i915_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int i915_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -381,13 +389,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = i915_gem_mmap_gtt_version();
break;
case I915_PARAM_HAS_SCHEDULER:
- value = 0;
- if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
- value |= I915_SCHEDULER_CAP_ENABLED;
- value |= I915_SCHEDULER_CAP_PRIORITY;
- if (HAS_LOGICAL_RING_PREEMPTION(dev_priv))
- value |= I915_SCHEDULER_CAP_PREEMPTION;
- }
+ value = dev_priv->caps.scheduler;
break;
case I915_PARAM_MMAP_VERSION:
@@ -427,7 +429,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
- value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
+ value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
if (!value)
return -ENODEV;
break;
@@ -807,7 +809,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
/*
* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
- * by the GPU. i915_gem_retire_requests() is called directly when we
+ * by the GPU. i915_retire_requests() is called directly when we
* need high-priority retirement, such as waiting for an explicit
* bo.
*
@@ -879,6 +881,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
/**
* i915_driver_init_early - setup state not requiring device access
* @dev_priv: device private
+ * @ent: the matching pci_device_id
*
* Initialize everything that is a "SW-only" state, that is state not
* requiring accessing the device or exposing the driver via kernel internal
@@ -904,11 +907,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
sizeof(device_info->platform_mask) * BITS_PER_BYTE);
- device_info->platform_mask = BIT(device_info->platform);
-
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
- device_info->gen_mask = BIT(device_info->gen - 1);
-
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
@@ -1446,19 +1445,7 @@ void i915_driver_unload(struct drm_device *dev)
intel_modeset_cleanup(dev);
- /*
- * free the memory space allocated for the child device
- * config parsed from VBT
- */
- if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
- }
- kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
- dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
- kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
- dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+ intel_bios_cleanup(dev_priv);
vga_switcheroo_unregister_client(pdev);
vga_client_register(pdev, NULL, NULL, NULL);
@@ -1925,7 +1912,6 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
ret = i915_gem_reset_prepare(i915);
if (ret) {
dev_err(i915->drm.dev, "GPU recovery failed\n");
- intel_gpu_reset(i915, ALL_ENGINES);
goto taint;
}
@@ -1957,7 +1943,8 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
*/
ret = i915_ggtt_enable_hw(i915);
if (ret) {
- DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret);
+ DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
+ ret);
goto error;
}
@@ -1974,7 +1961,8 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
*/
ret = i915_gem_init_hw(i915);
if (ret) {
- DRM_ERROR("Failed hw init on reset %d\n", ret);
+ DRM_ERROR("Failed to initialise HW following reset (%d)\n",
+ ret);
goto error;
}
@@ -2005,7 +1993,8 @@ taint:
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
i915_gem_set_wedged(i915);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
+ intel_gpu_reset(i915, ALL_ENGINES);
goto finish;
}
@@ -2031,7 +2020,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
{
struct i915_gpu_error *error = &engine->i915->gpu_error;
- struct drm_i915_gem_request *active_request;
+ struct i915_request *active_request;
int ret;
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
@@ -2587,7 +2576,7 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_guc_suspend(dev_priv);
+ intel_uc_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
@@ -2609,7 +2598,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_guc_resume(dev_priv);
+ intel_uc_resume(dev_priv);
i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
@@ -2695,7 +2684,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_guc_resume(dev_priv);
+ intel_uc_resume(dev_priv);
/*
* No point of rolling back things in case of an error, as the best
@@ -2795,7 +2784,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -2807,8 +2796,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -2827,11 +2816,11 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
@@ -2844,6 +2833,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ad1fc845cd1b..6e740f6fe33f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -71,9 +71,9 @@
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
-#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+#include "i915_request.h"
#include "i915_vma.h"
#include "intel_gvt.h"
@@ -83,8 +83,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180207"
-#define DRIVER_TIMESTAMP 1517988364
+#define DRIVER_DATE "20180308"
+#define DRIVER_TIMESTAMP 1520513379
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -472,6 +472,7 @@ struct i915_gpu_state {
u32 reset_count;
u32 suspend_count;
struct intel_device_info device_info;
+ struct intel_driver_caps driver_caps;
struct i915_params params;
struct i915_error_uc {
@@ -666,6 +667,7 @@ struct intel_fbc {
*/
struct intel_fbc_state_cache {
struct i915_vma *vma;
+ unsigned long flags;
struct {
unsigned int mode_flags;
@@ -704,6 +706,7 @@ struct intel_fbc {
*/
struct intel_fbc_reg_params {
struct i915_vma *vma;
+ unsigned long flags;
struct {
enum pipe pipe;
@@ -722,7 +725,7 @@ struct intel_fbc {
struct intel_fbc_work {
bool scheduled;
- u32 scheduled_vblank;
+ u64 scheduled_vblank;
struct work_struct work;
} work;
@@ -946,6 +949,8 @@ struct intel_rps {
struct intel_rc6 {
bool enabled;
+ u64 prev_hw_residency[4];
+ u64 cur_residency[4];
};
struct intel_llc_pstate {
@@ -1092,6 +1097,11 @@ struct i915_gem_mm {
struct llist_head free_list;
struct work_struct free_work;
spinlock_t free_lock;
+ /**
+ * Count of objects pending destructions. Used to skip needlessly
+ * waiting on an RCU barrier if no objects are waiting to be freed.
+ */
+ atomic_t free_count;
/**
* Small stash of WC pages
@@ -1221,7 +1231,7 @@ struct i915_gpu_error {
*
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
- * i915_gem_request_alloc(), this bit is checked and the sequence
+ * i915_request_alloc(), this bit is checked and the sequence
* aborted (with -EIO reported to userspace) if set.
*/
unsigned long flags;
@@ -1356,6 +1366,7 @@ struct intel_vbt_data {
u32 size;
u8 *data;
const u8 *sequence[MIPI_SEQ_MAX];
+ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
} dsi;
int crt_ddc_pin;
@@ -1815,6 +1826,7 @@ struct drm_i915_private {
struct kmem_cache *priorities;
const struct intel_device_info info;
+ struct intel_driver_caps caps;
/**
* Data Stolen Memory - aka "i915 stolen memory" gives us the start and
@@ -2091,6 +2103,7 @@ struct drm_i915_private {
*/
struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts;
u32 fdi_rx_config;
@@ -2419,12 +2432,16 @@ enum hdmi_force_audio {
* We have one bit per pipe and per scanout plane type.
*/
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER(pipe, plane_id) \
- (1 << ((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
+ BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
+ BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
+})
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
- (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+ BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
- (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+ GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
+ INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
/*
* Optimised SGL iterator for GEM objects
@@ -2730,6 +2747,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define BLT_RING ENGINE_MASK(BCS)
#define VEBOX_RING ENGINE_MASK(VECS)
#define BSD2_RING ENGINE_MASK(VCS2)
+#define BSD3_RING ENGINE_MASK(VCS3)
+#define BSD4_RING ENGINE_MASK(VCS4)
+#define VEBOX2_RING ENGINE_MASK(VECS2)
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
@@ -2752,6 +2772,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
+#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_elsq)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
((dev_priv)->info.has_logical_ring_preemption)
@@ -2772,9 +2794,10 @@ intel_info(const struct drm_i915_private *dev_priv)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
-/* WaRsDisableCoarsePowerGating:skl,bxt */
+/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+ (IS_CANNONLAKE(dev_priv) || \
+ IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -2799,7 +2822,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
-#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
+#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
@@ -2862,19 +2885,20 @@ intel_info(const struct drm_i915_private *dev_priv)
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
#define HAS_PCH_CNP_LP(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
- (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_LPT_H(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
- (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE)
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
@@ -3081,10 +3105,10 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -3128,6 +3152,9 @@ void i915_gem_free_object(struct drm_gem_object *obj);
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
+ if (!atomic_read(&i915->mm.free_count))
+ return;
+
/* A single pass should suffice to release all the freed objects (along
* most call paths) , but be a little more paranoid in that freeing
* the objects does take a little amount of time, during which the rcu
@@ -3309,7 +3336,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -3324,11 +3351,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-
static inline bool i915_reset_backoff(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
@@ -3360,7 +3385,7 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return READ_ONCE(error->reset_engine_count[engine->id]);
}
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv);
@@ -3369,7 +3394,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request);
+ struct i915_request *request);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -3399,7 +3424,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- const struct i915_ggtt_view *view);
+ const struct i915_ggtt_view *view,
+ unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
@@ -3675,6 +3701,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
/* intel_bios.c */
void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_cleanup(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
@@ -3986,9 +4013,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
static inline bool
-__i915_request_irq_complete(const struct drm_i915_gem_request *req)
+__i915_request_irq_complete(const struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 seqno;
/* Note that the engine may have wrapped around the seqno, and
@@ -3997,7 +4024,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* this by kicking all the waiters before resetting the seqno
* in hardware, and also signal the fence.
*/
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
return true;
/* The request was dequeued before we were awoken. We check after
@@ -4006,14 +4033,14 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* the request execution are sufficient to ensure that a check
* after reading the value from hw matches this request.
*/
- seqno = i915_gem_request_global_seqno(req);
+ seqno = i915_request_global_seqno(rq);
if (!seqno)
return false;
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
- if (__i915_gem_request_completed(req, seqno))
+ if (__i915_request_completed(rq, seqno))
return true;
/* Ensure our read of the seqno is coherent so that we
@@ -4062,7 +4089,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
wake_up_process(b->irq_wait->tsk);
spin_unlock_irq(&b->irq_lock);
- if (__i915_gem_request_completed(req, seqno))
+ if (__i915_request_completed(rq, seqno))
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1632f18e6a64..a5bd07338b46 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -240,8 +240,8 @@ err_phys:
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
if (cpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
@@ -257,7 +257,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
obj->mm.dirty = false;
if (needs_clflush &&
- (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_sg(pages);
@@ -353,7 +353,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
long timeout,
struct intel_rps_client *rps_client)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
@@ -366,7 +366,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
timeout);
rq = to_request(fence);
- if (i915_gem_request_completed(rq))
+ if (i915_request_completed(rq))
goto out;
/*
@@ -385,16 +385,16 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
- if (rps_client && !i915_gem_request_started(rq)) {
+ if (rps_client && !i915_request_started(rq)) {
if (INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq, rps_client);
}
- timeout = i915_wait_request(rq, flags, timeout);
+ timeout = i915_request_wait(rq, flags, timeout);
out:
- if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
- i915_gem_request_retire_upto(rq);
+ if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
+ i915_request_retire_upto(rq);
return timeout;
}
@@ -463,7 +463,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
static void __fence_set_priority(struct dma_fence *fence, int prio)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct intel_engine_cs *engine;
if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
@@ -703,10 +703,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
- if (!(obj->base.write_domain & flush_domains))
+ if (!(obj->write_domain & flush_domains))
return;
- switch (obj->base.write_domain) {
+ switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
i915_gem_flush_ggtt_writes(dev_priv);
@@ -731,7 +731,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
break;
}
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
static inline int
@@ -831,7 +831,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
* anyway again before the next pread happens.
*/
if (!obj->cache_dirty &&
- !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ !(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = CLFLUSH_BEFORE;
out:
@@ -890,7 +890,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
* Same trick applies to invalidate partially written
* cachelines read before writing.
*/
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= CLFLUSH_BEFORE;
}
@@ -2391,8 +2391,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
@@ -2856,10 +2856,10 @@ static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
atomic_inc(&ctx->active_count);
}
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request, *active = NULL;
+ struct i915_request *request, *active = NULL;
unsigned long flags;
/* We are called by the error capture and reset at a random
@@ -2872,8 +2872,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
*/
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link) {
- if (__i915_gem_request_completed(request,
- request->global_seqno))
+ if (__i915_request_completed(request, request->global_seqno))
continue;
GEM_BUG_ON(request->engine != engine);
@@ -2906,10 +2905,10 @@ static bool engine_stalled(struct intel_engine_cs *engine)
* Ensure irq handler finishes, and not run again.
* Also return the active request so that we only search for it once.
*/
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request = NULL;
+ struct i915_request *request = NULL;
/*
* During the reset sequence, we must prevent the engine from
@@ -2967,7 +2966,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
enum intel_engine_id id;
int err = 0;
@@ -2986,7 +2985,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
return err;
}
-static void skip_request(struct drm_i915_gem_request *request)
+static void skip_request(struct i915_request *request)
{
void *vaddr = request->ring->vaddr;
u32 head;
@@ -3005,7 +3004,7 @@ static void skip_request(struct drm_i915_gem_request *request)
dma_fence_set_error(&request->fence, -EIO);
}
-static void engine_skip_context(struct drm_i915_gem_request *request)
+static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct i915_gem_context *hung_ctx = request->ctx;
@@ -3029,9 +3028,9 @@ static void engine_skip_context(struct drm_i915_gem_request *request)
}
/* Returns the request if it was guilty of the hang */
-static struct drm_i915_gem_request *
+static struct i915_request *
i915_gem_reset_request(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/* The guilty request will get skipped on a hung engine.
*
@@ -3085,7 +3084,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
}
void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/*
* Make sure this write is visible before we re-enable the interrupt
@@ -3113,7 +3112,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx;
@@ -3134,12 +3133,12 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
* empty request appears sufficient to paper over the glitch.
*/
if (intel_engine_is_idle(engine)) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
- rq = i915_gem_request_alloc(engine,
- dev_priv->kernel_context);
+ rq = i915_request_alloc(engine,
+ dev_priv->kernel_context);
if (!IS_ERR(rq))
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
}
}
@@ -3174,21 +3173,21 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
}
}
-static void nop_submit_request(struct drm_i915_gem_request *request)
+static void nop_submit_request(struct i915_request *request)
{
dma_fence_set_error(&request->fence, -EIO);
- i915_gem_request_submit(request);
+ i915_request_submit(request);
}
-static void nop_complete_submit_request(struct drm_i915_gem_request *request)
+static void nop_complete_submit_request(struct i915_request *request)
{
unsigned long flags;
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&request->engine->timeline->lock, flags);
- __i915_gem_request_submit(request);
+ __i915_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
}
@@ -3205,13 +3204,18 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
+ set_bit(I915_WEDGED, &i915->gpu_error.flags);
+ smp_mb__after_atomic();
+
/*
* First, stop submission to hw, but do not yet complete requests by
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
+ i915_gem_reset_prepare_engine(engine);
engine->submit_request = nop_submit_request;
+ }
/*
* Make sure no one is running the old callback before we proceed with
@@ -3229,8 +3233,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* start to complete all requests.
*/
engine->submit_request = nop_complete_submit_request;
+ engine->schedule = NULL;
}
+ i915->caps.scheduler = 0;
+
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_init_global_seqno, or the one
@@ -3241,7 +3248,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) {
unsigned long flags;
- /* Mark all pending requests as complete so that any concurrent
+ /*
+ * Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
@@ -3249,9 +3257,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_engine_init_global_seqno(engine,
intel_engine_last_submit(engine));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
+ i915_gem_reset_finish_engine(engine);
}
- set_bit(I915_WEDGED, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
}
@@ -3275,7 +3284,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
*/
list_for_each_entry(tl, &i915->gt.timelines, link) {
for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
rq = i915_gem_active_peek(&tl->engine[i].last_request,
&i915->drm.struct_mutex);
@@ -3324,7 +3333,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
if (mutex_trylock(&dev->struct_mutex)) {
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
@@ -3412,25 +3421,22 @@ i915_gem_idle_work_handler(struct work_struct *work)
container_of(work, typeof(*dev_priv), gt.idle_work.work);
unsigned int epoch = I915_EPOCH_INVALID;
bool rearm_hangcheck;
- ktime_t end;
if (!READ_ONCE(dev_priv->gt.awake))
return;
/*
* Wait for last execlists context complete, but bail out in case a
- * new request is submitted.
+ * new request is submitted. As we don't trust the hardware, we
+ * continue on if the wait times out. This is necessary to allow
+ * the machine to suspend even if the hardware dies, and we will
+ * try to recover in resume (after depriving the hardware of power,
+ * it may be in a better mmod).
*/
- end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
- do {
- if (new_requests_since_last_retire(dev_priv))
- return;
-
- if (intel_engines_are_idle(dev_priv))
- break;
-
- usleep_range(100, 500);
- } while (ktime_before(ktime_get(), end));
+ __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
+ intel_engines_are_idle(dev_priv),
+ I915_IDLE_ENGINES_TIMEOUT * 1000,
+ 10, 500);
rearm_hangcheck =
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -3678,7 +3684,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
if (ret)
return ret;
}
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
ret = wait_for_engines(i915);
} else {
@@ -3697,7 +3703,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
@@ -3734,7 +3740,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
+ if (obj->write_domain == I915_GEM_DOMAIN_WC)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
@@ -3755,17 +3761,17 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the
* WC domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_WC;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_WC;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_WC;
- obj->base.write_domain = I915_GEM_DOMAIN_WC;
+ obj->read_domains = I915_GEM_DOMAIN_WC;
+ obj->write_domain = I915_GEM_DOMAIN_WC;
obj->mm.dirty = true;
}
@@ -3797,7 +3803,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ if (obj->write_domain == I915_GEM_DOMAIN_GTT)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
@@ -3818,17 +3824,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
}
@@ -4072,7 +4078,8 @@ out:
struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- const struct i915_ggtt_view *view)
+ const struct i915_ggtt_view *view,
+ unsigned int flags)
{
struct i915_vma *vma;
int ret;
@@ -4109,25 +4116,14 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* try to preserve the existing ABI).
*/
vma = ERR_PTR(-ENOSPC);
- if (!view || view->type == I915_GGTT_VIEW_NORMAL)
+ if ((flags & PIN_MAPPABLE) == 0 &&
+ (!view || view->type == I915_GGTT_VIEW_NORMAL))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
- PIN_MAPPABLE | PIN_NONBLOCK);
- if (IS_ERR(vma)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned int flags;
-
- /* Valleyview is definitely limited to scanning out the first
- * 512MiB. Lets presume this behaviour was inherited from the
- * g4x display engine and that all earlier gen are similarly
- * limited. Testing suggests that it is a little more
- * complicated than this. For example, Cherryview appears quite
- * happy to scanout from anywhere within its global aperture.
- */
- flags = 0;
- if (HAS_GMCH_DISPLAY(i915))
- flags = PIN_MAPPABLE;
+ flags |
+ PIN_MAPPABLE |
+ PIN_NONBLOCK);
+ if (IS_ERR(vma))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
- }
if (IS_ERR(vma))
goto err_unpin_global;
@@ -4140,7 +4136,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
@@ -4193,15 +4189,15 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
@@ -4228,7 +4224,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
- struct drm_i915_gem_request *request, *target = NULL;
+ struct i915_request *request, *target = NULL;
long ret;
/* ABI: return -EIO if already wedged */
@@ -4248,16 +4244,16 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request;
}
if (target)
- i915_gem_request_get(target);
+ i915_request_get(target);
spin_unlock(&file_priv->mm.lock);
if (target == NULL)
return 0;
- ret = i915_wait_request(target,
+ ret = i915_request_wait(target,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(target);
+ i915_request_put(target);
return ret < 0 ? ret : 0;
}
@@ -4276,7 +4272,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
- if (!view && flags & PIN_MAPPABLE) {
+ if (flags & PIN_MAPPABLE &&
+ (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
@@ -4370,7 +4367,7 @@ static __always_inline unsigned int
__busy_set_if_active(const struct dma_fence *fence,
unsigned int (*flag)(unsigned int id))
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
/* We have to check the current hw status of the fence as the uABI
* guarantees forward progress. We could rely on the idle worker
@@ -4383,8 +4380,8 @@ __busy_set_if_active(const struct dma_fence *fence,
return 0;
/* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, struct drm_i915_gem_request, fence);
- if (i915_gem_request_completed(rq))
+ rq = container_of(fence, struct i915_request, fence);
+ if (i915_request_completed(rq))
return 0;
return flag(rq->engine->uabi_id);
@@ -4529,8 +4526,7 @@ out:
}
static void
-frontbuffer_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
+frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
{
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
@@ -4637,8 +4633,8 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
i915_gem_object_init(obj, &i915_gem_object_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
if (HAS_LLC(dev_priv))
/* On some devices, we can have the GPU use the LLC (the CPU
@@ -4752,6 +4748,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
kfree(obj->bit_17);
i915_gem_object_free(obj);
+ GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+ atomic_dec(&i915->mm.free_count);
+
if (on)
cond_resched();
}
@@ -4840,6 +4839,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
* i915_gem_busy_ioctl(). For the corresponding synchronized
* lookup see i915_gem_object_lookup_rcu().
*/
+ atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
@@ -4882,10 +4882,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- if (INTEL_GEN(i915) >= 5) {
- int reset = intel_gpu_reset(i915, ALL_ENGINES);
- WARN_ON(reset && reset != -ENODEV);
- }
+ if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
+ WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
}
int i915_gem_suspend(struct drm_i915_private *dev_priv)
@@ -4922,7 +4920,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
- intel_guc_suspend(dev_priv);
+ intel_uc_suspend(dev_priv);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
@@ -4989,7 +4987,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
if (i915_gem_init_hw(i915))
goto err_wedged;
- intel_guc_resume(i915);
+ intel_uc_resume(i915);
/* Always reload a context for powersaving. */
if (i915_gem_switch_to_kernel_context(i915))
@@ -5065,8 +5063,11 @@ static int __i915_gem_restart_engines(void *data)
for_each_engine(engine, i915, id) {
err = engine->init_hw(engine);
- if (err)
+ if (err) {
+ DRM_ERROR("Failed to restart %s (%d)\n",
+ engine->name, err);
return err;
+ }
}
return 0;
@@ -5118,14 +5119,16 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
- DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+ DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
goto out;
}
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(dev_priv);
- if (ret)
+ if (ret) {
+ DRM_ERROR("Enabling uc failed (%d)\n", ret);
goto out;
+ }
intel_mocs_init_l3cc_table(dev_priv);
@@ -5157,9 +5160,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
return PTR_ERR(ctx);
for_each_engine(engine, i915, id) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ctx;
@@ -5169,7 +5172,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (engine->init_context)
err = engine->init_context(rq);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
if (err)
goto err_active;
}
@@ -5415,10 +5418,10 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
int i;
- if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
+ if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
dev_priv->num_fence_regs = 32;
- else if (INTEL_INFO(dev_priv)->gen >= 4 ||
+ else if (INTEL_GEN(dev_priv) >= 4 ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
dev_priv->num_fence_regs = 16;
@@ -5475,7 +5478,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (!dev_priv->luts)
goto err_vmas;
- dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
+ dev_priv->requests = KMEM_CACHE(i915_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
@@ -5537,7 +5540,8 @@ err_out:
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
{
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+ GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
+ GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.object_count);
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -5607,7 +5611,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
@@ -5693,7 +5697,7 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
if (IS_ERR(obj))
return obj;
- GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
offset = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index e920dab7f1b8..f54c4ff74ded 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -29,7 +29,10 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
- printk(KERN_ERR "GEM_BUG_ON(%s)\n", __stringify(condition)); \
+ pr_err("%s:%d GEM_BUG_ON(%s)\n", \
+ __func__, __LINE__, __stringify(condition)); \
+ GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
+ __func__, __LINE__, __stringify(condition)); \
BUG(); \
} \
} while(0)
@@ -54,6 +57,6 @@
#define GEM_TRACE(...) do { } while (0)
#endif
-#define I915_NUM_ENGINES 5
+#define I915_NUM_ENGINES 8
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index c93005c2e0fb..d3cbe8432f48 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (!reservation_object_test_signaled_rcu(resv, true))
break;
- i915_gem_retire_requests(pool->engine->i915);
+ i915_retire_requests(pool->engine->i915);
GEM_BUG_ON(i915_gem_object_is_active(obj));
/*
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index b9b53ac14176..f5c570d35b2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -177,7 +177,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
} else if (obj->mm.pages) {
__i915_do_clflush(obj);
} else {
- GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}
obj->cache_dirty = false;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 648e7536ff51..f2cbea7cf940 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -211,17 +211,23 @@ static void context_close(struct i915_gem_context *ctx)
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
int ret;
+ unsigned int max;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ max = GEN11_MAX_CONTEXT_HW_ID;
+ else
+ max = MAX_CONTEXT_HW_ID;
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
- 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ 0, max, GFP_KERNEL);
if (ret < 0) {
/* Contexts are only released when no longer active.
* Flush any pending retires to hopefully release some
* stale contexts and try again.
*/
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
- 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ 0, max, GFP_KERNEL);
if (ret < 0)
return ret;
}
@@ -338,11 +344,6 @@ static void __destroy_hw_context(struct i915_gem_context *ctx,
context_close(ctx);
}
-/**
- * The default context needs to exist per ring that uses contexts. It stores the
- * context state of the GPU for applications that don't utilize HW contexts, as
- * well as an idle case.
- */
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
@@ -449,12 +450,18 @@ destroy_kernel_context(struct i915_gem_context **ctxp)
i915_gem_context_free(ctx);
}
+static bool needs_preempt_context(struct drm_i915_private *i915)
+{
+ return HAS_LOGICAL_RING_PREEMPTION(i915);
+}
+
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- int err;
+ /* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
+ GEM_BUG_ON(dev_priv->preempt_context);
INIT_LIST_HEAD(&dev_priv->contexts.list);
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
@@ -462,14 +469,14 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+ BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida);
/* lowest priority; idle task */
ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n");
- err = PTR_ERR(ctx);
- goto err;
+ return PTR_ERR(ctx);
}
/*
* For easy recognisablity, we want the kernel context to be 0 and then
@@ -479,23 +486,18 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
dev_priv->kernel_context = ctx;
/* highest priority; preempting task */
- ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
- if (IS_ERR(ctx)) {
- DRM_ERROR("Failed to create default preempt context\n");
- err = PTR_ERR(ctx);
- goto err_kernel_context;
+ if (needs_preempt_context(dev_priv)) {
+ ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
+ if (!IS_ERR(ctx))
+ dev_priv->preempt_context = ctx;
+ else
+ DRM_ERROR("Failed to create preempt context; disabling preemption\n");
}
- dev_priv->preempt_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
dev_priv->engine[RCS]->context_size ? "logical" :
"fake");
return 0;
-
-err_kernel_context:
- destroy_kernel_context(&dev_priv->kernel_context);
-err:
- return err;
}
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
@@ -521,7 +523,8 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
- destroy_kernel_context(&i915->preempt_context);
+ if (i915->preempt_context)
+ destroy_kernel_context(&i915->preempt_context);
destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
@@ -594,28 +597,28 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
if (engine_has_idle_kernel_context(engine))
continue;
- req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
struct intel_timeline *tl;
tl = &timeline->engine[engine->id];
prev = i915_gem_active_raw(&tl->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
- i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
I915_FENCE_GFP);
}
@@ -627,7 +630,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
* but an extra layer of paranoia before we declare the system
* idle (on suspend etc) is advisable!
*/
- __i915_add_request(req, true);
+ __i915_request_add(rq, true);
}
return 0;
@@ -803,11 +806,11 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_PRIORITY:
{
- int priority = args->value;
+ s64 priority = args->value;
if (args->size)
ret = -EINVAL;
- else if (!to_i915(dev)->engine[RCS]->schedule)
+ else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
ret = -ENODEV;
else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
priority < I915_CONTEXT_MIN_USER_PRIORITY)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 4bfb72f8e1cb..7854262ddfd9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -29,6 +29,8 @@
#include <linux/list.h>
#include <linux/radix-tree.h>
+#include "i915_gem.h"
+
struct pid;
struct drm_device;
@@ -37,6 +39,7 @@ struct drm_file;
struct drm_i915_private;
struct drm_i915_file_private;
struct i915_hw_ppgtt;
+struct i915_request;
struct i915_vma;
struct intel_ring;
@@ -273,7 +276,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
void i915_gem_context_close(struct drm_file *file);
-int i915_switch_context(struct drm_i915_gem_request *req);
+int i915_switch_context(struct i915_request *rq);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
void i915_gem_context_release(struct kref *ctx_ref);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 864439a214c8..69a7aec49e84 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -330,8 +330,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
* write-combined buffer or a delay through the chipset for GTT
* writes that do require us to treat GTT as a separate cache domain.)
*/
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = 0;
return &obj->base;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 60ca4f05ae94..54814a196ee4 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -168,7 +168,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
else
phases[1] = NULL;
@@ -293,7 +293,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
- i915_gem_retire_requests(vm->i915);
+ i915_retire_requests(vm->i915);
check_color = vm->mm.color_adjust;
if (check_color) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4401068ff468..8c170db8495d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -200,7 +200,7 @@ struct i915_execbuffer {
struct i915_gem_context *ctx; /** context for building the request */
struct i915_address_space *vm; /** GTT and vma for the request */
- struct drm_i915_gem_request *request; /** our request to build */
+ struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
/** actual size of execobj[] as we may extend it for the cmdparser */
@@ -227,7 +227,7 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
u32 *rq_cmd;
unsigned int rq_size;
} reloc_cache;
@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
list_add_tail(&vma->exec_link, &eb->unbound);
if (drm_mm_node_allocated(&vma->node))
err = i915_vma_unbind(vma);
+ if (unlikely(err))
+ vma->exec_flags = NULL;
}
return err;
}
@@ -884,7 +886,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915);
- __i915_add_request(cache->rq, true);
+ __i915_request_add(cache->rq, true);
cache->rq = NULL;
}
@@ -1068,12 +1070,12 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
{
struct reloc_cache *cache = &eb->reloc_cache;
struct drm_i915_gem_object *obj;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
- GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj))
@@ -1101,13 +1103,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = i915_gem_request_alloc(eb->engine, eb->ctx);
+ rq = i915_request_alloc(eb->engine, eb->ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
- err = i915_gem_request_await_object(rq, vma->obj, true);
+ err = i915_request_await_object(rq, vma->obj, true);
if (err)
goto err_request;
@@ -1139,7 +1141,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
return 0;
err_request:
- i915_add_request(rq);
+ i915_request_add(rq);
err_unpin:
i915_vma_unpin(batch);
err_unmap:
@@ -1725,7 +1727,7 @@ slow:
}
static void eb_export_fence(struct i915_vma *vma,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
unsigned int flags)
{
struct reservation_object *resv = vma->resv;
@@ -1737,9 +1739,9 @@ static void eb_export_fence(struct i915_vma *vma,
*/
reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &req->fence);
+ reservation_object_add_excl_fence(resv, &rq->fence);
else if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &req->fence);
+ reservation_object_add_shared_fence(resv, &rq->fence);
reservation_object_unlock(resv);
}
@@ -1755,7 +1757,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
struct drm_i915_gem_object *obj = vma->obj;
if (flags & EXEC_OBJECT_CAPTURE) {
- struct i915_gem_capture_list *capture;
+ struct i915_capture_list *capture;
capture = kmalloc(sizeof(*capture), GFP_KERNEL);
if (unlikely(!capture))
@@ -1786,7 +1788,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (flags & EXEC_OBJECT_ASYNC)
continue;
- err = i915_gem_request_await_object
+ err = i915_request_await_object
(eb->request, obj, flags & EXEC_OBJECT_WRITE);
if (err)
return err;
@@ -1838,13 +1840,13 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
}
void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
- const unsigned int idx = req->engine->id;
+ const unsigned int idx = rq->engine->id;
- lockdep_assert_held(&req->i915->drm.struct_mutex);
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
/*
@@ -1858,35 +1860,35 @@ void i915_vma_move_to_active(struct i915_vma *vma,
if (!i915_vma_is_active(vma))
obj->active_count++;
i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], req);
+ i915_gem_active_set(&vma->last_read[idx], rq);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
- obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- i915_gem_active_set(&obj->frontbuffer_write, req);
+ i915_gem_active_set(&obj->frontbuffer_write, rq);
- obj->base.read_domains = 0;
+ obj->read_domains = 0;
}
- obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
+ obj->read_domains |= I915_GEM_GPU_DOMAINS;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
- i915_gem_active_set(&vma->last_fence, req);
+ i915_gem_active_set(&vma->last_fence, rq);
}
-static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
+static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
{
u32 *cs;
int i;
- if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
+ if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
- cs = intel_ring_begin(req, 4 * 2 + 2);
+ cs = intel_ring_begin(rq, 4 * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1896,7 +1898,7 @@ static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
*cs++ = 0;
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1942,10 +1944,10 @@ out:
}
static void
-add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
+add_to_client(struct i915_request *rq, struct drm_file *file)
{
- req->file_priv = file->driver_priv;
- list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
+ rq->file_priv = file->driver_priv;
+ list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
}
static int eb_submit(struct i915_execbuffer *eb)
@@ -1973,7 +1975,7 @@ static int eb_submit(struct i915_execbuffer *eb)
return 0;
}
-/**
+/*
* Find one BSD ring to dispatch the corresponding BSD command.
* The engine index is returned.
*/
@@ -2149,7 +2151,7 @@ await_fence_array(struct i915_execbuffer *eb,
if (!fence)
return -EINVAL;
- err = i915_gem_request_await_dma_fence(eb->request, fence);
+ err = i915_request_await_dma_fence(eb->request, fence);
dma_fence_put(fence);
if (err < 0)
return err;
@@ -2363,14 +2365,14 @@ i915_gem_do_execbuffer(struct drm_device *dev,
GEM_BUG_ON(eb.reloc_cache.rq);
/* Allocate a request for this batch buffer nice and early. */
- eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
+ eb.request = i915_request_alloc(eb.engine, eb.ctx);
if (IS_ERR(eb.request)) {
err = PTR_ERR(eb.request);
goto err_batch_unpin;
}
if (in_fence) {
- err = i915_gem_request_await_dma_fence(eb.request, in_fence);
+ err = i915_request_await_dma_fence(eb.request, in_fence);
if (err < 0)
goto err_request;
}
@@ -2398,10 +2400,10 @@ i915_gem_do_execbuffer(struct drm_device *dev,
*/
eb.request->batch = eb.batch;
- trace_i915_gem_request_queue(eb.request, eb.batch_flags);
+ trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
- __i915_add_request(eb.request, err == 0);
+ __i915_request_add(eb.request, err == 0);
add_to_client(eb.request, file);
if (fences)
@@ -2410,7 +2412,7 @@ err_request:
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
- args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
+ args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
args->rsvd2 |= (u64)out_fence_fd << 32;
out_fence_fd = -1;
} else {
@@ -2463,8 +2465,8 @@ static bool check_buffer_count(size_t count)
* list array and passes it to the real function.
*/
int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
@@ -2554,8 +2556,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
}
int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index b8338d75c6f3..d548ac05ccd7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -64,7 +64,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
int fence_pitch_shift;
u64 val;
- if (INTEL_INFO(fence->i915)->gen >= 6) {
+ if (INTEL_GEN(fence->i915) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 955ce7bee448..21d72f695adb 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -673,27 +673,22 @@ static void free_pd(struct i915_address_space *vm,
static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
- unsigned int i;
-
fill_px(vm, pd,
gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
- for (i = 0; i < I915_PDES; i++)
- pd->page_table[i] = vm->scratch_pt;
+ memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
}
static int __pdp_init(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp)
{
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
- unsigned int i;
pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pdp->page_directory))
return -ENOMEM;
- for (i = 0; i < pdpes; i++)
- pdp->page_directory[i] = vm->scratch_pd;
+ memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
return 0;
}
@@ -715,7 +710,7 @@ alloc_pdp(struct i915_address_space *vm)
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- WARN_ON(!use_4lvl(vm));
+ GEM_BUG_ON(!use_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
@@ -764,25 +759,22 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
static void gen8_initialize_pml4(struct i915_address_space *vm,
struct i915_pml4 *pml4)
{
- unsigned int i;
-
fill_px(vm, pml4,
gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
- for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
- pml4->pdps[i] = vm->scratch_pdp;
+ memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct drm_i915_gem_request *req,
+static int gen8_write_pdp(struct i915_request *rq,
unsigned entry,
dma_addr_t addr)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
BUG_ON(entry >= 4);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -792,20 +784,20 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
*cs++ = lower_32_bits(addr);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
int i, ret;
for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- ret = gen8_write_pdp(req, i, pd_daddr);
+ ret = gen8_write_pdp(rq, i, pd_daddr);
if (ret)
return ret;
}
@@ -814,9 +806,9 @@ static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
}
static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
+ return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
}
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
@@ -1740,13 +1732,13 @@ static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1756,19 +1748,19 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1778,16 +1770,16 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = req->i915;
+ struct intel_engine_cs *engine = rq->engine;
+ struct drm_i915_private *dev_priv = rq->i915;
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
@@ -2109,7 +2101,7 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
ppgtt->base.i915 = dev_priv;
ppgtt->base.dma = &dev_priv->drm.pdev->dev;
- if (INTEL_INFO(dev_priv)->gen < 8)
+ if (INTEL_GEN(dev_priv) < 8)
return gen6_ppgtt_init(ppgtt);
else
return gen8_ppgtt_init(ppgtt);
@@ -2257,9 +2249,9 @@ void i915_ppgtt_release(struct kref *kref)
trace_i915_ppgtt_release(&ppgtt->base);
/* vmas should already be unbound and destroyed */
- WARN_ON(!list_empty(&ppgtt->base.active_list));
- WARN_ON(!list_empty(&ppgtt->base.inactive_list));
- WARN_ON(!list_empty(&ppgtt->base.unbound_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
ppgtt->base.cleanup(&ppgtt->base);
i915_address_space_fini(&ppgtt->base);
@@ -2822,10 +2814,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
i915->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
+ GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
ggtt->base.bind_vma = aliasing_gtt_bind_vma;
- WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
+ GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
return 0;
@@ -2916,7 +2908,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
ggtt->base.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
- WARN_ON(!list_empty(&ggtt->base.active_list));
+ GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
WARN_ON(i915_vma_unbind(vma));
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3809,6 +3801,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
switch (vma->ggtt_view.type) {
+ default:
+ GEM_BUG_ON(vma->ggtt_view.type);
+ /* fall through */
case I915_GGTT_VIEW_NORMAL:
vma->pages = vma->obj->mm.pages;
return 0;
@@ -3821,11 +3816,6 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
case I915_GGTT_VIEW_PARTIAL:
vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
break;
-
- default:
- WARN_ONCE(1, "GGTT view %u not implemented!\n",
- vma->ggtt_view.type);
- return -EINVAL;
}
ret = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index a42890d9af38..6efc017e8bb3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -39,7 +39,8 @@
#include <linux/pagevec.h>
#include "i915_gem_timeline.h"
-#include "i915_gem_request.h"
+
+#include "i915_request.h"
#include "i915_selftest.h"
#define I915_GTT_PAGE_SIZE_4K BIT(12)
@@ -398,7 +399,7 @@ struct i915_hw_ppgtt {
gen6_pte_t __iomem *pd_addr;
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req);
+ struct i915_request *rq);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index a1d6956734f7..0d0144b2104c 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -167,6 +167,10 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
};
/**
+ * i915_gem_object_create_internal: create an object with volatile pages
+ * @i915: the i915 device
+ * @size: the size in bytes of backing storage to allocate for the object
+ *
* Creates a new object that wraps some internal memory for private use.
* This object is not backed by swappable storage, and as such its contents
* are volatile and only valid whilst pinned. If the object is reaped by the
@@ -197,8 +201,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 05e89e1c0a08..54f00b350779 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -33,7 +33,7 @@
#include <drm/i915_drm.h>
-#include "i915_gem_request.h"
+#include "i915_request.h"
#include "i915_selftest.h"
struct drm_i915_gem_object;
@@ -148,6 +148,21 @@ struct drm_i915_gem_object {
#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
unsigned int cache_dirty:1;
+ /**
+ * @read_domains: Read memory domains.
+ *
+ * These monitor which caches contain read/write data related to the
+ * object. When transitioning from one set of domains to another,
+ * the driver is called to ensure that caches are suitably flushed and
+ * invalidated.
+ */
+ u16 read_domains;
+
+ /**
+ * @write_domain: Corresponding unique write memory domain.
+ */
+ u16 write_domain;
+
atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */
struct i915_gem_active frontbuffer_write;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index f7fc0df251ac..1036e8686916 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -177,7 +177,7 @@ err:
#undef OUT_BATCH
-int i915_gem_render_state_emit(struct drm_i915_gem_request *rq)
+int i915_gem_render_state_emit(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct intel_render_state so = {}; /* keep the compiler happy */
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 86369520482e..112cda8fa1a8 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -24,8 +24,8 @@
#ifndef _I915_GEM_RENDER_STATE_H_
#define _I915_GEM_RENDER_STATE_H_
-struct drm_i915_gem_request;
+struct i915_request;
-int i915_gem_render_state_emit(struct drm_i915_gem_request *rq);
+int i915_gem_render_state_emit(struct i915_request *rq);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 0e158f9287c4..5757fb7c4b5a 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -175,7 +175,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
trace_i915_gem_shrink(i915, target, flags);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
@@ -267,7 +267,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(i915);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
shrinker_unlock(i915, unlock);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d3f222fa6356..62aa67960bf4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -356,7 +356,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_base = 0;
reserved_size = 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 2:
case 3:
break;
@@ -516,7 +516,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->stolen = stolen;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index b5a22400a01f..33e01bf6aa36 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -27,9 +27,9 @@
#include <linux/list.h>
-#include "i915_utils.h"
-#include "i915_gem_request.h"
+#include "i915_request.h"
#include "i915_syncmap.h"
+#include "i915_utils.h"
struct i915_gem_timeline;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 382a77a1097e..d596a8302ca3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -721,7 +721,7 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.release = i915_gem_userptr_release,
};
-/**
+/*
* Creates a new mm object that wraps some normal memory from the process
* context - user memory.
*
@@ -757,7 +757,9 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
* dma-buf instead.
*/
int
-i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+i915_gem_userptr_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
@@ -796,8 +798,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 67c902412193..f89ac7a8f95f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -579,11 +579,14 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
}
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
- const struct intel_device_info *info)
+ const struct intel_device_info *info,
+ const struct intel_driver_caps *caps)
{
struct drm_printer p = i915_error_printer(m);
intel_device_info_dump_flags(info, &p);
+ intel_driver_caps_print(caps, &p);
+ intel_device_info_dump_topology(&info->sseu, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -808,7 +811,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (error->display)
intel_display_print_error_state(m, error->display);
- err_print_capabilities(m, &error->device_info);
+ err_print_capabilities(m, &error->device_info, &error->driver_caps);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
@@ -989,7 +992,7 @@ out:
static inline uint32_t
__active_get_seqno(struct i915_gem_active *active)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = __i915_gem_active_peek(active);
return request ? request->global_seqno : 0;
@@ -998,7 +1001,7 @@ __active_get_seqno(struct i915_gem_active *active)
static inline int
__active_get_engine_id(struct i915_gem_active *active)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = __i915_gem_active_peek(active);
return request ? request->engine->id : -1;
@@ -1019,8 +1022,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
+ err->read_domains = obj->read_domains;
+ err->write_domain = obj->write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj);
err->dirty = obj->mm.dirty;
@@ -1082,9 +1085,9 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
return error_code;
}
-static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void gem_record_fences(struct i915_gpu_state *error)
{
+ struct drm_i915_private *dev_priv = error->i915;
int i;
if (INTEL_GEN(dev_priv) >= 6) {
@@ -1100,27 +1103,6 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
error->nfence = i;
}
-static inline u32
-gen8_engine_sync_index(struct intel_engine_cs *engine,
- struct intel_engine_cs *other)
-{
- int idx;
-
- /*
- * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
- * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
- * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
- * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
- * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
- */
-
- idx = (other - engine) - 1;
- if (idx < 0)
- idx += I915_NUM_ENGINES;
-
- return idx;
-}
-
static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
@@ -1291,7 +1273,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
}
}
-static void record_request(struct drm_i915_gem_request *request,
+static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
@@ -1308,10 +1290,10 @@ static void record_request(struct drm_i915_gem_request *request,
}
static void engine_record_requests(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *first,
+ struct i915_request *first,
struct drm_i915_error_engine *ee)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int count;
count = 0;
@@ -1361,7 +1343,7 @@ static void error_record_engine_execlists(struct intel_engine_cs *engine,
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
+ struct i915_request *rq = port_request(&execlists->port[n]);
if (!rq)
break;
@@ -1396,10 +1378,10 @@ static void record_context(struct drm_i915_error_context *e,
e->active = atomic_read(&ctx->active_count);
}
-static void request_record_user_bo(struct drm_i915_gem_request *request,
+static void request_record_user_bo(struct i915_request *request,
struct drm_i915_error_engine *ee)
{
- struct i915_gem_capture_list *c;
+ struct i915_capture_list *c;
struct drm_i915_error_object **bo;
long count;
@@ -1443,16 +1425,16 @@ capture_object(struct drm_i915_private *dev_priv,
}
}
-static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void gem_record_rings(struct i915_gpu_state *error)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *i915 = error->i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
int i;
for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = dev_priv->engine[i];
+ struct intel_engine_cs *engine = i915->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
ee->engine_id = -1;
@@ -1479,17 +1461,16 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
* by userspace.
*/
ee->batchbuffer =
- i915_error_object_create(dev_priv,
- request->batch);
+ i915_error_object_create(i915, request->batch);
- if (HAS_BROKEN_CS_TLB(dev_priv))
+ if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
- i915_error_object_create(dev_priv,
+ i915_error_object_create(i915,
engine->scratch);
request_record_user_bo(request, ee);
ee->ctx =
- i915_error_object_create(dev_priv,
+ i915_error_object_create(i915,
request->ctx->engine[i].state);
error->simulated |=
@@ -1503,27 +1484,24 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
ee->ringbuffer =
- i915_error_object_create(dev_priv, ring->vma);
+ i915_error_object_create(i915, ring->vma);
engine_record_requests(engine, request, ee);
}
ee->hws_page =
- i915_error_object_create(dev_priv,
+ i915_error_object_create(i915,
engine->status_page.vma);
- ee->wa_ctx =
- i915_error_object_create(dev_priv, engine->wa_ctx.vma);
+ ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma);
- ee->default_state =
- capture_object(dev_priv, engine->default_state);
+ ee->default_state = capture_object(i915, engine->default_state);
}
}
-static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error,
- struct i915_address_space *vm,
- int idx)
+static void gem_capture_vm(struct i915_gpu_state *error,
+ struct i915_address_space *vm,
+ int idx)
{
struct drm_i915_error_buffer *active_bo;
struct i915_vma *vma;
@@ -1546,8 +1524,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
error->active_bo_count[idx] = count;
}
-static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_active_buffers(struct i915_gpu_state *error)
{
int cnt = 0, i, j;
@@ -1567,14 +1544,13 @@ static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
for (j = 0; j < i && !found; j++)
found = error->engine[j].vm == ee->vm;
if (!found)
- i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
+ gem_capture_vm(error, ee->vm, cnt++);
}
}
-static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_pinned_buffers(struct i915_gpu_state *error)
{
- struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct i915_address_space *vm = &error->i915->ggtt.base;
struct drm_i915_error_buffer *bo;
struct i915_vma *vma;
int count_inactive, count_active;
@@ -1624,9 +1600,9 @@ static void capture_uc_state(struct i915_gpu_state *error)
}
/* Capture all registers which don't fit into another category. */
-static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_reg_state(struct i915_gpu_state *error)
{
+ struct drm_i915_private *dev_priv = error->i915;
int i;
/* General organization
@@ -1723,23 +1699,25 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
engine_mask ? "reset" : "continue");
}
-static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_gen_state(struct i915_gpu_state *error)
{
- error->awake = dev_priv->gt.awake;
- error->wakelock = atomic_read(&dev_priv->runtime_pm.wakeref_count);
- error->suspended = dev_priv->runtime_pm.suspended;
+ struct drm_i915_private *i915 = error->i915;
+
+ error->awake = i915->gt.awake;
+ error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
+ error->suspended = i915->runtime_pm.suspended;
error->iommu = -1;
#ifdef CONFIG_INTEL_IOMMU
error->iommu = intel_iommu_gfx_mapped;
#endif
- error->reset_count = i915_reset_count(&dev_priv->gpu_error);
- error->suspend_count = dev_priv->suspend_count;
+ error->reset_count = i915_reset_count(&i915->gpu_error);
+ error->suspend_count = i915->suspend_count;
memcpy(&error->device_info,
- INTEL_INFO(dev_priv),
+ INTEL_INFO(i915),
sizeof(error->device_info));
+ error->driver_caps = i915->caps;
}
static __always_inline void dup_param(const char *type, void *x)
@@ -1766,14 +1744,13 @@ static int capture(void *data)
error->i915->gt.last_init_time);
capture_params(error);
+ capture_gen_state(error);
capture_uc_state(error);
-
- i915_capture_gen_state(error->i915, error);
- i915_capture_reg_state(error->i915, error);
- i915_gem_record_fences(error->i915, error);
- i915_gem_record_rings(error->i915, error);
- i915_capture_active_buffers(error->i915, error);
- i915_capture_pinned_buffers(error->i915, error);
+ capture_reg_state(error);
+ gem_record_fences(error);
+ gem_record_rings(error);
+ capture_active_buffers(error);
+ capture_pinned_buffers(error);
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);
@@ -1802,14 +1779,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
/**
* i915_capture_error_state - capture an error record for later analysis
- * @dev: drm device
+ * @i915: i915 device
+ * @engine_mask: the mask of engines triggering the hang
+ * @error_msg: a message to insert into the error capture header
*
* Should be called when an error is detected (either a hang or an error
* interrupt) to capture error state from the time of the error. Fills
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_i915_private *dev_priv,
+void i915_capture_error_state(struct drm_i915_private *i915,
u32 engine_mask,
const char *error_msg)
{
@@ -1820,25 +1799,25 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
if (!i915_modparams.error_capture)
return;
- if (READ_ONCE(dev_priv->gpu_error.first_error))
+ if (READ_ONCE(i915->gpu_error.first_error))
return;
- error = i915_capture_gpu_state(dev_priv);
+ error = i915_capture_gpu_state(i915);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
- i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
+ i915_error_capture_msg(i915, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
if (!error->simulated) {
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- if (!dev_priv->gpu_error.first_error) {
- dev_priv->gpu_error.first_error = error;
+ spin_lock_irqsave(&i915->gpu_error.lock, flags);
+ if (!i915->gpu_error.first_error) {
+ i915->gpu_error.first_error = error;
error = NULL;
}
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ spin_unlock_irqrestore(&i915->gpu_error.lock, flags);
}
if (error) {
@@ -1853,7 +1832,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- dev_priv->drm.primary->index);
+ i915->drm.primary->index);
warned = true;
}
}
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 97f3a5640289..0e5c580d117c 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -1,11 +1,6 @@
-/**
- * \file i915_ioc32.c
- *
+/*
* 32-bit ioctl compatibility routines for the i915 DRM.
*
- * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
- *
- *
* Copyright (C) Paul Mackerras 2005
* Copyright (C) Alan Hourihane 2005
* All Rights Reserved.
@@ -28,6 +23,8 @@
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
+ *
+ * Author: Alan Hourihane <alanh@fairlite.demon.co.uk>
*/
#include <linux/compat.h>
@@ -55,10 +52,10 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
- || __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+ __put_user(req32.param, &request->param) ||
+ __put_user((void __user *)(unsigned long)req32.value,
+ &request->value))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
@@ -70,13 +67,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
};
/**
+ * i915_compat_ioctl - handle the mistakes of the past
+ * @filp: the file pointer
+ * @cmd: the ioctl command (and encoded flags)
+ * @arg: the ioctl argument (from userspace)
+ *
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
- *
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
*/
long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b886bd459acc..633c18785c1e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -415,6 +415,9 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
if (READ_ONCE(rps->interrupts_enabled))
return;
+ if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
+ return;
+
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON_ONCE(rps->pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
@@ -431,6 +434,9 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
if (!READ_ONCE(rps->interrupts_enabled))
return;
+ if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
+ return;
+
spin_lock_irq(&dev_priv->irq_lock);
rps->interrupts_enabled = false;
@@ -1071,7 +1077,7 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *rq = NULL;
+ struct i915_request *rq = NULL;
struct intel_wait *wait;
if (!engine->breadcrumbs.irq_armed)
@@ -1098,13 +1104,13 @@ static void notify_ring(struct intel_engine_cs *engine)
*/
if (i915_seqno_passed(intel_engine_get_seqno(engine),
wait->seqno)) {
- struct drm_i915_gem_request *waiter = wait->request;
+ struct i915_request *waiter = wait->request;
wakeup = true;
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&waiter->fence.flags) &&
intel_wait_check_request(wait, waiter))
- rq = i915_gem_request_get(waiter);
+ rq = i915_request_get(waiter);
}
if (wakeup)
@@ -1117,7 +1123,8 @@ static void notify_ring(struct intel_engine_cs *engine)
if (rq) {
dma_fence_signal(&rq->fence);
- i915_gem_request_put(rq);
+ GEM_BUG_ON(!i915_request_completed(rq));
+ i915_request_put(rq);
}
trace_intel_engine_notify(engine, wait);
@@ -1413,64 +1420,73 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
tasklet_hi_schedule(&execlists->tasklet);
}
-static void gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
+static void gen8_gt_irq_ack(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
+ void __iomem * const regs = i915->regs;
+
+#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
+ GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS1_IRQ | \
+ GEN8_GT_VCS2_IRQ | \
+ GEN8_GT_VECS_IRQ | \
+ GEN8_GT_PM_IRQ | \
+ GEN8_GT_GUC_IRQ)
+
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
- if (gt_iir[0])
- I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
+ gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
+ if (likely(gt_iir[0]))
+ raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
}
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
- gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
- if (gt_iir[1])
- I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
+ gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
+ if (likely(gt_iir[1]))
+ raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
}
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
- if (gt_iir[3])
- I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
+ if (likely(gt_iir[2] & (i915->pm_rps_events |
+ i915->pm_guc_events)))
+ raw_reg_write(regs, GEN8_GT_IIR(2),
+ gt_iir[2] & (i915->pm_rps_events |
+ i915->pm_guc_events));
}
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
- if (gt_iir[2] & (dev_priv->pm_rps_events |
- dev_priv->pm_guc_events)) {
- I915_WRITE_FW(GEN8_GT_IIR(2),
- gt_iir[2] & (dev_priv->pm_rps_events |
- dev_priv->pm_guc_events));
- }
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
+ if (likely(gt_iir[3]))
+ raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
}
}
-static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 gt_iir[4])
+static void gen8_gt_irq_handler(struct drm_i915_private *i915,
+ u32 master_ctl, u32 gt_iir[4])
{
- if (gt_iir[0]) {
- gen8_cs_irq_handler(dev_priv->engine[RCS],
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ gen8_cs_irq_handler(i915->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(dev_priv->engine[BCS],
+ gen8_cs_irq_handler(i915->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
- if (gt_iir[1]) {
- gen8_cs_irq_handler(dev_priv->engine[VCS],
+ if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
+ gen8_cs_irq_handler(i915->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(dev_priv->engine[VCS2],
+ gen8_cs_irq_handler(i915->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
- if (gt_iir[3])
- gen8_cs_irq_handler(dev_priv->engine[VECS],
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ gen8_cs_irq_handler(i915->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
+ }
- if (gt_iir[2] & dev_priv->pm_rps_events)
- gen6_rps_irq_handler(dev_priv, gt_iir[2]);
-
- if (gt_iir[2] & dev_priv->pm_guc_events)
- gen9_guc_irq_handler(dev_priv, gt_iir[2]);
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ gen6_rps_irq_handler(i915, gt_iir[2]);
+ gen9_guc_irq_handler(i915, gt_iir[2]);
+ }
}
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@@ -2085,9 +2101,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
do {
u32 master_ctl, iir;
- u32 gt_iir[4] = {};
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
+ u32 gt_iir[4];
u32 ier = 0;
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
@@ -2140,7 +2156,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
- gen8_gt_irq_handler(dev_priv, gt_iir);
+ gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2675,10 +2691,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(arg);
u32 master_ctl;
- u32 gt_iir[4] = {};
+ u32 gt_iir[4];
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -2690,18 +2705,19 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
- /* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
-
/* Find, clear, then process each source of interrupt */
gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
- gen8_gt_irq_handler(dev_priv, gt_iir);
- gen8_de_irq_handler(dev_priv, master_ctl);
+
+ /* IRQs are synced during runtime_suspend, we don't require a wakeref */
+ if (master_ctl & ~GEN8_GT_IRQS) {
+ disable_rpm_wakeref_asserts(dev_priv);
+ gen8_de_irq_handler(dev_priv, master_ctl);
+ enable_rpm_wakeref_asserts(dev_priv);
+ }
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ_FW(GEN8_MASTER_IRQ);
- enable_rpm_wakeref_asserts(dev_priv);
+ gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
return IRQ_HANDLED;
}
@@ -2746,6 +2762,156 @@ static void __fini_wedge(struct wedge_me *w)
(W)->i915; \
__fini_wedge((W)))
+static __always_inline void
+gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir)
+{
+ gen8_cs_irq_handler(engine, iir, 0);
+}
+
+static void
+gen11_gt_engine_irq_handler(struct drm_i915_private * const i915,
+ const unsigned int bank,
+ const unsigned int engine_n,
+ const u16 iir)
+{
+ struct intel_engine_cs ** const engine = i915->engine;
+
+ switch (bank) {
+ case 0:
+ switch (engine_n) {
+
+ case GEN11_RCS0:
+ return gen11_cs_irq_handler(engine[RCS], iir);
+
+ case GEN11_BCS:
+ return gen11_cs_irq_handler(engine[BCS], iir);
+ }
+ case 1:
+ switch (engine_n) {
+
+ case GEN11_VCS(0):
+ return gen11_cs_irq_handler(engine[_VCS(0)], iir);
+ case GEN11_VCS(1):
+ return gen11_cs_irq_handler(engine[_VCS(1)], iir);
+ case GEN11_VCS(2):
+ return gen11_cs_irq_handler(engine[_VCS(2)], iir);
+ case GEN11_VCS(3):
+ return gen11_cs_irq_handler(engine[_VCS(3)], iir);
+
+ case GEN11_VECS(0):
+ return gen11_cs_irq_handler(engine[_VECS(0)], iir);
+ case GEN11_VECS(1):
+ return gen11_cs_irq_handler(engine[_VECS(1)], iir);
+ }
+ }
+}
+
+static u32
+gen11_gt_engine_intr(struct drm_i915_private * const i915,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = i915->regs;
+ u32 timeout_ts;
+ u32 ident;
+
+ raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
+
+ /*
+ * NB: Specs do not specify how long to spin wait,
+ * so we do ~100us as an educated guess.
+ */
+ timeout_ts = (local_clock() >> 10) + 100;
+ do {
+ ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
+ } while (!(ident & GEN11_INTR_DATA_VALID) &&
+ !time_after32(local_clock() >> 10, timeout_ts));
+
+ if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
+ DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+ bank, bit, ident);
+ return 0;
+ }
+
+ raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
+ GEN11_INTR_DATA_VALID);
+
+ return ident & GEN11_INTR_ENGINE_MASK;
+}
+
+static void
+gen11_gt_irq_handler(struct drm_i915_private * const i915,
+ const u32 master_ctl)
+{
+ void __iomem * const regs = i915->regs;
+ unsigned int bank;
+
+ for (bank = 0; bank < 2; bank++) {
+ unsigned long intr_dw;
+ unsigned int bit;
+
+ if (!(master_ctl & GEN11_GT_DW_IRQ(bank)))
+ continue;
+
+ intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+
+ if (unlikely(!intr_dw)) {
+ DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
+ continue;
+ }
+
+ for_each_set_bit(bit, &intr_dw, 32) {
+ const u16 iir = gen11_gt_engine_intr(i915, bank, bit);
+
+ if (unlikely(!iir))
+ continue;
+
+ gen11_gt_engine_irq_handler(i915, bank, bit, iir);
+ }
+
+ /* Clear must be after shared has been served for engine */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+ }
+}
+
+static irqreturn_t gen11_irq_handler(int irq, void *arg)
+{
+ struct drm_i915_private * const i915 = to_i915(arg);
+ void __iomem * const regs = i915->regs;
+ u32 master_ctl;
+
+ if (!intel_irqs_enabled(i915))
+ return IRQ_NONE;
+
+ master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
+ master_ctl &= ~GEN11_MASTER_IRQ;
+ if (!master_ctl)
+ return IRQ_NONE;
+
+ /* Disable interrupts. */
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+
+ /* Find, clear, then process each source of interrupt. */
+ gen11_gt_irq_handler(i915, master_ctl);
+
+ /* IRQs are synced during runtime_suspend, we don't require a wakeref */
+ if (master_ctl & GEN11_DISPLAY_IRQ) {
+ const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
+
+ disable_rpm_wakeref_asserts(i915);
+ /*
+ * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
+ * for the display related bits.
+ */
+ gen8_de_irq_handler(i915, disp_ctl);
+ enable_rpm_wakeref_asserts(i915);
+ }
+
+ /* Acknowledge and enable interrupts. */
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+
+ return IRQ_HANDLED;
+}
+
/**
* i915_reset_device - do process context error handling work
* @dev_priv: i915 device private
@@ -2951,6 +3117,12 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ /* Even though there is no DMC, frame counter can get stuck when
+ * PSR is active as no frames are generated.
+ */
+ if (HAS_PSR(dev_priv))
+ drm_vblank_restore(dev, pipe);
+
return 0;
}
@@ -2963,6 +3135,12 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ /* Even if there is no DMC, frame counter can get stuck when
+ * PSR is active as no frames are generated, so check only for PSR.
+ */
+ if (HAS_PSR(dev_priv))
+ drm_vblank_restore(dev, pipe);
+
return 0;
}
@@ -3159,6 +3337,42 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev_priv);
}
+static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
+{
+ /* Disable RCS, BCS, VCS and VECS class engines. */
+ I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
+ I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
+
+ /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
+ I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
+ I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
+ I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
+ I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
+ I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
+}
+
+static void gen11_irq_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
+ POSTING_READ(GEN11_GFX_MSTR_IRQ);
+
+ gen11_gt_irq_reset(dev_priv);
+
+ I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+
+ for_each_pipe(dev_priv, pipe)
+ if (intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+
+ GEN3_IRQ_RESET(GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(GEN8_PCU_);
+}
+
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
@@ -3656,6 +3870,41 @@ static int gen8_irq_postinstall(struct drm_device *dev)
return 0;
}
+static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+
+ BUILD_BUG_ON(irqs & 0xffff0000);
+
+ /* Enable RCS, BCS, VCS and VECS class interrupts. */
+ I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
+ I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
+
+ /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
+ I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
+ I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
+ I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
+ I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
+ I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
+
+ dev_priv->pm_imr = 0xffffffff; /* TODO */
+}
+
+static int gen11_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen11_gt_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(dev_priv);
+
+ I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+
+ I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
+ POSTING_READ(GEN11_GFX_MSTR_IRQ);
+
+ return 0;
+}
+
static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4104,6 +4353,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ dev->driver->irq_handler = gen11_irq_handler;
+ dev->driver->irq_preinstall = gen11_irq_reset;
+ dev->driver->irq_postinstall = gen11_irq_postinstall;
+ dev->driver->irq_uninstall = gen11_irq_reset;
+ dev->driver->enable_vblank = gen8_enable_vblank;
+ dev->driver->disable_vblank = gen8_disable_vblank;
+ dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
} else if (INTEL_GEN(dev_priv) >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 42ff06fe54a3..792facdb6702 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ff0ac3627cc4..ba9140c87cc0 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 4e7a10c89782..062e91b39085 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -29,6 +29,9 @@
#include "i915_drv.h"
#include "i915_selftest.h"
+#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
+#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
+
#define GEN_DEFAULT_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
@@ -63,7 +66,8 @@
.page_sizes = I915_GTT_PAGE_SIZE_4K
#define GEN2_FEATURES \
- .gen = 2, .num_pipes = 1, \
+ GEN(2), \
+ .num_pipes = 1, \
.has_overlay = 1, .overlay_needs_physical = 1, \
.has_gmch_display = 1, \
.hws_needs_physical = 1, \
@@ -76,19 +80,20 @@
static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
- .platform = INTEL_I830,
+ PLATFORM(INTEL_I830),
.is_mobile = 1, .cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
static const struct intel_device_info intel_i845g_info = {
GEN2_FEATURES,
- .platform = INTEL_I845G,
+ PLATFORM(INTEL_I845G),
};
static const struct intel_device_info intel_i85x_info = {
GEN2_FEATURES,
- .platform = INTEL_I85X, .is_mobile = 1,
+ PLATFORM(INTEL_I85X),
+ .is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
.cursor_needs_physical = 1,
.has_fbc = 1,
@@ -96,11 +101,12 @@ static const struct intel_device_info intel_i85x_info = {
static const struct intel_device_info intel_i865g_info = {
GEN2_FEATURES,
- .platform = INTEL_I865G,
+ PLATFORM(INTEL_I865G),
};
#define GEN3_FEATURES \
- .gen = 3, .num_pipes = 2, \
+ GEN(3), \
+ .num_pipes = 2, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
@@ -110,7 +116,8 @@ static const struct intel_device_info intel_i865g_info = {
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
- .platform = INTEL_I915G, .cursor_needs_physical = 1,
+ PLATFORM(INTEL_I915G),
+ .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
@@ -118,7 +125,7 @@ static const struct intel_device_info intel_i915g_info = {
static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
- .platform = INTEL_I915GM,
+ PLATFORM(INTEL_I915GM),
.is_mobile = 1,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -130,7 +137,7 @@ static const struct intel_device_info intel_i915gm_info = {
static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
- .platform = INTEL_I945G,
+ PLATFORM(INTEL_I945G),
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
@@ -139,7 +146,8 @@ static const struct intel_device_info intel_i945g_info = {
static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
- .platform = INTEL_I945GM, .is_mobile = 1,
+ PLATFORM(INTEL_I945GM),
+ .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
@@ -150,20 +158,22 @@ static const struct intel_device_info intel_i945gm_info = {
static const struct intel_device_info intel_g33_info = {
GEN3_FEATURES,
- .platform = INTEL_G33,
+ PLATFORM(INTEL_G33),
.has_hotplug = 1,
.has_overlay = 1,
};
static const struct intel_device_info intel_pineview_info = {
GEN3_FEATURES,
- .platform = INTEL_PINEVIEW, .is_mobile = 1,
+ PLATFORM(INTEL_PINEVIEW),
+ .is_mobile = 1,
.has_hotplug = 1,
.has_overlay = 1,
};
#define GEN4_FEATURES \
- .gen = 4, .num_pipes = 2, \
+ GEN(4), \
+ .num_pipes = 2, \
.has_hotplug = 1, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
@@ -174,7 +184,7 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
- .platform = INTEL_I965G,
+ PLATFORM(INTEL_I965G),
.has_overlay = 1,
.hws_needs_physical = 1,
.has_snoop = false,
@@ -182,7 +192,7 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
- .platform = INTEL_I965GM,
+ PLATFORM(INTEL_I965GM),
.is_mobile = 1, .has_fbc = 1,
.has_overlay = 1,
.supports_tv = 1,
@@ -192,20 +202,21 @@ static const struct intel_device_info intel_i965gm_info = {
static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
- .platform = INTEL_G45,
+ PLATFORM(INTEL_G45),
.ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
- .platform = INTEL_GM45,
+ PLATFORM(INTEL_GM45),
.is_mobile = 1, .has_fbc = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
#define GEN5_FEATURES \
- .gen = 5, .num_pipes = 2, \
+ GEN(5), \
+ .num_pipes = 2, \
.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
@@ -217,17 +228,18 @@ static const struct intel_device_info intel_gm45_info = {
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
- .platform = INTEL_IRONLAKE,
+ PLATFORM(INTEL_IRONLAKE),
};
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
- .platform = INTEL_IRONLAKE,
+ PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1, .has_fbc = 1,
};
#define GEN6_FEATURES \
- .gen = 6, .num_pipes = 2, \
+ GEN(6), \
+ .num_pipes = 2, \
.has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
@@ -241,7 +253,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
- .platform = INTEL_SANDYBRIDGE
+ PLATFORM(INTEL_SANDYBRIDGE)
static const struct intel_device_info intel_sandybridge_d_gt1_info = {
SNB_D_PLATFORM,
@@ -255,7 +267,7 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info = {
#define SNB_M_PLATFORM \
GEN6_FEATURES, \
- .platform = INTEL_SANDYBRIDGE, \
+ PLATFORM(INTEL_SANDYBRIDGE), \
.is_mobile = 1
@@ -270,7 +282,8 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
};
#define GEN7_FEATURES \
- .gen = 7, .num_pipes = 3, \
+ GEN(7), \
+ .num_pipes = 3, \
.has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
@@ -285,7 +298,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
- .platform = INTEL_IVYBRIDGE, \
+ PLATFORM(INTEL_IVYBRIDGE), \
.has_l3_dpf = 1
static const struct intel_device_info intel_ivybridge_d_gt1_info = {
@@ -300,7 +313,7 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info = {
#define IVB_M_PLATFORM \
GEN7_FEATURES, \
- .platform = INTEL_IVYBRIDGE, \
+ PLATFORM(INTEL_IVYBRIDGE), \
.is_mobile = 1, \
.has_l3_dpf = 1
@@ -316,15 +329,15 @@ static const struct intel_device_info intel_ivybridge_m_gt2_info = {
static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
- .platform = INTEL_IVYBRIDGE,
+ PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
.num_pipes = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
static const struct intel_device_info intel_valleyview_info = {
- .platform = INTEL_VALLEYVIEW,
- .gen = 7,
+ PLATFORM(INTEL_VALLEYVIEW),
+ GEN(7),
.is_lp = 1,
.num_pipes = 2,
.has_psr = 1,
@@ -355,7 +368,7 @@ static const struct intel_device_info intel_valleyview_info = {
#define HSW_PLATFORM \
G75_FEATURES, \
- .platform = INTEL_HASWELL, \
+ PLATFORM(INTEL_HASWELL), \
.has_l3_dpf = 1
static const struct intel_device_info intel_haswell_gt1_info = {
@@ -375,6 +388,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
#define GEN8_FEATURES \
G75_FEATURES, \
+ GEN(8), \
BDW_COLORS, \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
@@ -385,8 +399,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
#define BDW_PLATFORM \
GEN8_FEATURES, \
- .gen = 8, \
- .platform = INTEL_BROADWELL
+ PLATFORM(INTEL_BROADWELL)
static const struct intel_device_info intel_broadwell_gt1_info = {
BDW_PLATFORM,
@@ -413,11 +426,12 @@ static const struct intel_device_info intel_broadwell_gt3_info = {
};
static const struct intel_device_info intel_cherryview_info = {
- .gen = 8, .num_pipes = 3,
+ PLATFORM(INTEL_CHERRYVIEW),
+ GEN(8),
+ .num_pipes = 3,
.has_hotplug = 1,
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .platform = INTEL_CHERRYVIEW,
.has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1,
@@ -443,6 +457,7 @@ static const struct intel_device_info intel_cherryview_info = {
#define GEN9_FEATURES \
GEN8_FEATURES, \
+ GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
.has_csr = 1, \
@@ -452,8 +467,7 @@ static const struct intel_device_info intel_cherryview_info = {
#define SKL_PLATFORM \
GEN9_FEATURES, \
- .gen = 9, \
- .platform = INTEL_SKYLAKE
+ PLATFORM(INTEL_SKYLAKE)
static const struct intel_device_info intel_skylake_gt1_info = {
SKL_PLATFORM,
@@ -481,7 +495,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
};
#define GEN9_LP_FEATURES \
- .gen = 9, \
+ GEN(9), \
.is_lp = 1, \
.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
@@ -513,21 +527,20 @@ static const struct intel_device_info intel_skylake_gt4_info = {
static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
- .platform = INTEL_BROXTON,
+ PLATFORM(INTEL_BROXTON),
.ddb_size = 512,
};
static const struct intel_device_info intel_geminilake_info = {
GEN9_LP_FEATURES,
- .platform = INTEL_GEMINILAKE,
+ PLATFORM(INTEL_GEMINILAKE),
.ddb_size = 1024,
GLK_COLORS,
};
#define KBL_PLATFORM \
GEN9_FEATURES, \
- .gen = 9, \
- .platform = INTEL_KABYLAKE
+ PLATFORM(INTEL_KABYLAKE)
static const struct intel_device_info intel_kabylake_gt1_info = {
KBL_PLATFORM,
@@ -547,8 +560,7 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
#define CFL_PLATFORM \
GEN9_FEATURES, \
- .gen = 9, \
- .platform = INTEL_COFFEELAKE
+ PLATFORM(INTEL_COFFEELAKE)
static const struct intel_device_info intel_coffeelake_gt1_info = {
CFL_PLATFORM,
@@ -568,30 +580,33 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
#define GEN10_FEATURES \
GEN9_FEATURES, \
+ GEN(10), \
.ddb_size = 1024, \
GLK_COLORS
static const struct intel_device_info intel_cannonlake_info = {
GEN10_FEATURES,
- .is_alpha_support = 1,
- .platform = INTEL_CANNONLAKE,
- .gen = 10,
+ PLATFORM(INTEL_CANNONLAKE),
.gt = 2,
};
#define GEN11_FEATURES \
GEN10_FEATURES, \
- .gen = 11, \
+ GEN(11), \
.ddb_size = 2048, \
- .has_csr = 0
+ .has_csr = 0, \
+ .has_logical_ring_elsq = 1
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
- .platform = INTEL_ICELAKE,
+ PLATFORM(INTEL_ICELAKE),
.is_alpha_support = 1,
.has_resource_streamer = 0,
};
+#undef GEN
+#undef PLATFORM
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -650,6 +665,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
+ INTEL_ICL_11_IDS(&intel_icelake_11_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0be50e43507d..abaca6edeb71 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
*/
mutex_lock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.exclusive_stream = NULL;
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
free_oa_buffer(dev_priv);
@@ -1630,10 +1629,10 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
* Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
* is only used by the kernel context.
*/
-static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
+static int gen8_emit_oa_config(struct i915_request *rq,
const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *dev_priv = req->i915;
+ struct drm_i915_private *dev_priv = rq->i915;
/* The MMIO offsets for Flex EU registers aren't contiguous */
u32 flex_mmio[] = {
i915_mmio_reg_offset(EU_PERF_CNTL0),
@@ -1647,7 +1646,7 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
u32 *cs;
int i;
- cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
+ cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1685,7 +1684,7 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1695,38 +1694,38 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
{
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct i915_gem_timeline *timeline;
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
- req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- ret = gen8_emit_oa_config(req, oa_config);
+ ret = gen8_emit_oa_config(rq, oa_config);
if (ret) {
- i915_add_request(req);
+ i915_request_add(rq);
return ret;
}
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
struct intel_timeline *tl;
tl = &timeline->engine[engine->id];
prev = i915_gem_active_raw(&tl->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
- i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
GFP_KERNEL);
}
- i915_add_request(req);
+ i915_request_add(rq);
return 0;
}
@@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
* Note: it's only the RCS/Render context that has any OA state.
*/
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config,
- bool interruptible)
+ const struct i915_oa_config *oa_config)
{
struct i915_gem_context *ctx;
int ret;
unsigned int wait_flags = I915_WAIT_LOCKED;
- if (interruptible) {
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
-
- wait_flags |= I915_WAIT_INTERRUPTIBLE;
- } else {
- mutex_lock(&dev_priv->drm.struct_mutex);
- }
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Switch away from any user context. */
ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
@@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
}
out:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
return ret;
}
@@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
+ ret = gen8_configure_all_contexts(dev_priv, oa_config);
if (ret)
return ret;
@@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
{
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL, false);
+ gen8_configure_all_contexts(dev_priv, NULL);
I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
~GT_NOA_ENABLE));
@@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
{
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL, false);
+ gen8_configure_all_contexts(dev_priv, NULL);
/* Make sure we disable noa to save power. */
I915_WRITE(RPM_CONFIG1,
@@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret)
goto err_oa_buf_alloc;
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ goto err_lock;
+
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
stream->oa_config);
if (ret)
@@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->ops = &i915_oa_stream_ops;
- /* Lock device for exclusive_stream access late because
- * enable_metric_set() might lock as well on gen8+.
- */
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- goto err_lock;
-
dev_priv->perf.oa.exclusive_stream = stream;
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
-err_lock:
+err_enable:
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
-err_enable:
+err_lock:
free_oa_buffer(dev_priv);
err_oa_buf_alloc:
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 1c440460255d..964467b03e4d 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -415,7 +415,94 @@ static int i915_pmu_event_init(struct perf_event *event)
return 0;
}
-static u64 __i915_pmu_event_read(struct perf_event *event)
+static u64 __get_rc6(struct drm_i915_private *i915)
+{
+ u64 val;
+
+ val = intel_rc6_residency_ns(i915,
+ IS_VALLEYVIEW(i915) ?
+ VLV_GT_RENDER_RC6 :
+ GEN6_GT_GFX_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
+
+ return val;
+}
+
+static u64 get_rc6(struct drm_i915_private *i915, bool locked)
+{
+#if IS_ENABLED(CONFIG_PM)
+ unsigned long flags;
+ u64 val;
+
+ if (intel_runtime_pm_get_if_in_use(i915)) {
+ val = __get_rc6(i915);
+ intel_runtime_pm_put(i915);
+
+ /*
+ * If we are coming back from being runtime suspended we must
+ * be careful not to report a larger value than returned
+ * previously.
+ */
+
+ if (!locked)
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+ i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ }
+
+ if (!locked)
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ } else {
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct device *kdev = &pdev->dev;
+ unsigned long flags2;
+
+ /*
+ * We are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
+ */
+ if (!locked)
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ spin_lock_irqsave(&kdev->power.lock, flags2);
+
+ if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ i915->pmu.suspended_jiffies_last =
+ kdev->power.suspended_jiffies;
+
+ val = kdev->power.suspended_jiffies -
+ i915->pmu.suspended_jiffies_last;
+ val += jiffies - kdev->power.accounting_timestamp;
+
+ spin_unlock_irqrestore(&kdev->power.lock, flags2);
+
+ val = jiffies_to_nsecs(val);
+ val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+
+ if (!locked)
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ }
+
+ return val;
+#else
+ return __get_rc6(i915);
+#endif
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
@@ -453,18 +540,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
val = count_interrupts(i915);
break;
case I915_PMU_RC6_RESIDENCY:
- intel_runtime_pm_get(i915);
- val = intel_rc6_residency_ns(i915,
- IS_VALLEYVIEW(i915) ?
- VLV_GT_RENDER_RC6 :
- GEN6_GT_GFX_RC6);
- if (HAS_RC6p(i915))
- val += intel_rc6_residency_ns(i915,
- GEN6_GT_GFX_RC6p);
- if (HAS_RC6pp(i915))
- val += intel_rc6_residency_ns(i915,
- GEN6_GT_GFX_RC6pp);
- intel_runtime_pm_put(i915);
+ val = get_rc6(i915, locked);
break;
}
}
@@ -479,7 +555,7 @@ static void i915_pmu_event_read(struct perf_event *event)
again:
prev = local64_read(&hwc->prev_count);
- new = __i915_pmu_event_read(event);
+ new = __i915_pmu_event_read(event, false);
if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
goto again;
@@ -534,7 +610,7 @@ static void i915_pmu_enable(struct perf_event *event)
* for all listeners. Even when the event was already enabled and has
* an existing non-zero value.
*/
- local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 5a2e013a56bb..aa1b1a987ea1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -27,6 +27,8 @@
enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
+ __I915_SAMPLE_RC6,
+ __I915_SAMPLE_RC6_ESTIMATED,
__I915_NUM_PMU_SAMPLERS
};
@@ -95,6 +97,10 @@ struct i915_pmu {
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
+ * @suspended_jiffies_last: Cached suspend time from PM core.
+ */
+ unsigned long suspended_jiffies_last;
+ /**
* @i915_attr: Memory block holding device attributes.
*/
void *i915_attr;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
new file mode 100644
index 000000000000..3ace929dd90f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -0,0 +1,125 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_query.h"
+#include <uapi/drm/i915_drm.h>
+
+static int query_topology_info(struct drm_i915_private *dev_priv,
+ struct drm_i915_query_item *query_item)
+{
+ const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ struct drm_i915_query_topology_info topo;
+ u32 slice_length, subslice_length, eu_length, total_length;
+
+ if (query_item->flags != 0)
+ return -EINVAL;
+
+ if (sseu->max_slices == 0)
+ return -ENODEV;
+
+ BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
+
+ slice_length = sizeof(sseu->slice_mask);
+ subslice_length = sseu->max_slices *
+ DIV_ROUND_UP(sseu->max_subslices,
+ sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+ eu_length = sseu->max_slices * sseu->max_subslices *
+ DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
+
+ total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
+
+ if (query_item->length == 0)
+ return total_length;
+
+ if (query_item->length < total_length)
+ return -EINVAL;
+
+ if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr),
+ sizeof(topo)))
+ return -EFAULT;
+
+ if (topo.flags != 0)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr),
+ total_length))
+ return -EFAULT;
+
+ memset(&topo, 0, sizeof(topo));
+ topo.max_slices = sseu->max_slices;
+ topo.max_subslices = sseu->max_subslices;
+ topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
+
+ topo.subslice_offset = slice_length;
+ topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
+ topo.eu_offset = slice_length + subslice_length;
+ topo.eu_stride =
+ DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
+
+ if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+ &topo, sizeof(topo)))
+ return -EFAULT;
+
+ if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
+ &sseu->slice_mask, slice_length))
+ return -EFAULT;
+
+ if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ sizeof(topo) + slice_length),
+ sseu->subslice_mask, subslice_length))
+ return -EFAULT;
+
+ if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ sizeof(topo) +
+ slice_length + subslice_length),
+ sseu->eu_mask, eu_length))
+ return -EFAULT;
+
+ return total_length;
+}
+
+static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
+ struct drm_i915_query_item *query_item) = {
+ query_topology_info,
+};
+
+int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_query *args = data;
+ struct drm_i915_query_item __user *user_item_ptr =
+ u64_to_user_ptr(args->items_ptr);
+ u32 i;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ for (i = 0; i < args->num_items; i++, user_item_ptr++) {
+ struct drm_i915_query_item item;
+ u64 func_idx;
+ int ret;
+
+ if (copy_from_user(&item, user_item_ptr, sizeof(item)))
+ return -EFAULT;
+
+ if (item.query_id == 0)
+ return -EINVAL;
+
+ func_idx = item.query_id - 1;
+
+ if (func_idx < ARRAY_SIZE(i915_query_funcs))
+ ret = i915_query_funcs[func_idx](dev_priv, &item);
+ else
+ ret = -EINVAL;
+
+ /* Only write the length back to userspace if they differ. */
+ if (ret != item.length && put_user(ret, &user_item_ptr->length))
+ return -EFAULT;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_query.h b/drivers/gpu/drm/i915/i915_query.h
new file mode 100644
index 000000000000..31dcef181f63
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_query.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_QUERY_H_
+#define _I915_QUERY_H_
+
+struct drm_device;
+struct drm_file;
+
+int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b576b6ba32a4..e6a8c0ee7df1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -178,6 +178,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define BCS_HW 2
#define VECS_HW 3
#define VCS2_HW 4
+#define VCS3_HW 6
+#define VCS4_HW 7
+#define VECS2_HW 12
/* Engine class */
@@ -188,7 +191,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OTHER_CLASS 4
#define MAX_ENGINE_CLASS 4
-#define MAX_ENGINE_INSTANCE 1
+#define MAX_ENGINE_INSTANCE 3
/* PCI config space */
@@ -1906,6 +1909,11 @@ enum i915_power_well_id {
#define CL_POWER_DOWN_ENABLE (1 << 4)
#define SUS_CLOCK_CONFIG (3 << 0)
+#define _ICL_PORT_CL_DW5_A 0x162014
+#define _ICL_PORT_CL_DW5_B 0x6C014
+#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
+ _ICL_PORT_CL_DW5_B)
+
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@@ -2029,7 +2037,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW5_LN0_AE 0x162454
#define _CNL_PORT_TX_DW5_LN0_B 0x162654
#define _CNL_PORT_TX_DW5_LN0_C 0x162C54
-#define _CNL_PORT_TX_DW5_LN0_D 0x162ED4
+#define _CNL_PORT_TX_DW5_LN0_D 0x162E54
#define _CNL_PORT_TX_DW5_LN0_F 0x162854
#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW5_GRP_AE, \
@@ -2060,7 +2068,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C
#define _CNL_PORT_TX_DW7_LN0_B 0x16265C
#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C
-#define _CNL_PORT_TX_DW7_LN0_D 0x162EDC
+#define _CNL_PORT_TX_DW7_LN0_D 0x162E5C
#define _CNL_PORT_TX_DW7_LN0_F 0x16285C
#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW7_GRP_AE, \
@@ -2104,6 +2112,28 @@ enum i915_power_well_id {
#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
+#define _ICL_PORT_COMP_DW0_A 0x162100
+#define _ICL_PORT_COMP_DW0_B 0x6C100
+#define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
+ _ICL_PORT_COMP_DW0_B)
+#define _ICL_PORT_COMP_DW1_A 0x162104
+#define _ICL_PORT_COMP_DW1_B 0x6C104
+#define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
+ _ICL_PORT_COMP_DW1_B)
+#define _ICL_PORT_COMP_DW3_A 0x16210C
+#define _ICL_PORT_COMP_DW3_B 0x6C10C
+#define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
+ _ICL_PORT_COMP_DW3_B)
+#define _ICL_PORT_COMP_DW9_A 0x162124
+#define _ICL_PORT_COMP_DW9_B 0x6C124
+#define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
+ _ICL_PORT_COMP_DW9_B)
+#define _ICL_PORT_COMP_DW10_A 0x162128
+#define _ICL_PORT_COMP_DW10_B 0x6C128
+#define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \
+ _ICL_PORT_COMP_DW10_A, \
+ _ICL_PORT_COMP_DW10_B)
+
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
@@ -2315,7 +2345,13 @@ enum i915_power_well_id {
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
#define GEN8_BSD2_RING_BASE 0x1c000
+#define GEN11_BSD_RING_BASE 0x1c0000
+#define GEN11_BSD2_RING_BASE 0x1c4000
+#define GEN11_BSD3_RING_BASE 0x1d0000
+#define GEN11_BSD4_RING_BASE 0x1d4000
#define VEBOX_RING_BASE 0x1a000
+#define GEN11_VEBOX_RING_BASE 0x1c8000
+#define GEN11_VEBOX2_RING_BASE 0x1d8000
#define BLT_RING_BASE 0x22000
#define RING_TAIL(base) _MMIO((base)+0x30)
#define RING_HEAD(base) _MMIO((base)+0x34)
@@ -2780,6 +2816,13 @@ enum i915_power_well_id {
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
/* Fuse readout registers for GT */
+#define HSW_PAVP_FUSE1 _MMIO(0x911C)
+#define HSW_F1_EU_DIS_SHIFT 16
+#define HSW_F1_EU_DIS_MASK (0x3 << HSW_F1_EU_DIS_SHIFT)
+#define HSW_F1_EU_DIS_10EUS 0
+#define HSW_F1_EU_DIS_8EUS 1
+#define HSW_F1_EU_DIS_6EUS 2
+
#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
@@ -3869,6 +3912,12 @@ enum {
#define GEN8_CTX_ID_SHIFT 32
#define GEN8_CTX_ID_WIDTH 21
+#define GEN11_SW_CTX_ID_SHIFT 37
+#define GEN11_SW_CTX_ID_WIDTH 11
+#define GEN11_ENGINE_CLASS_SHIFT 61
+#define GEN11_ENGINE_CLASS_WIDTH 3
+#define GEN11_ENGINE_INSTANCE_SHIFT 48
+#define GEN11_ENGINE_INSTANCE_WIDTH 6
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
@@ -3916,6 +3965,9 @@ enum {
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
+#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
+#define GWUNIT_CLKGATE_DIS (1 << 16)
+
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS (1 << 20)
@@ -5320,8 +5372,8 @@ enum {
#define _DPF_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64520)
#define _DPF_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64524)
-#define DP_AUX_CH_CTL(port) _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
-#define DP_AUX_CH_DATA(port, i) _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
+#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
#define DP_AUX_CH_CTL_DONE (1 << 30)
@@ -7160,6 +7212,8 @@ enum {
#define DISP_DATA_PARTITION_5_6 (1<<6)
#define DISP_IPC_ENABLE (1<<3)
#define DBUF_CTL _MMIO(0x45008)
+#define DBUF_CTL_S1 _MMIO(0x45008)
+#define DBUF_CTL_S2 _MMIO(0x44FE8)
#define DBUF_POWER_REQUEST (1<<31)
#define DBUF_POWER_STATE (1<<30)
#define GEN7_MSG_CTL _MMIO(0x45010)
@@ -7169,8 +7223,9 @@ enum {
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
-#define MASK_WAKEMEM (1<<13)
+#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
+#define MASK_WAKEMEM (1 << 13)
+#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
@@ -7182,8 +7237,12 @@ enum {
#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
-#define SKL_DSSM _MMIO(0x51004)
-#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
+#define SKL_DSSM _MMIO(0x51004)
+#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
@@ -7863,8 +7922,8 @@ enum {
#define _PCH_DPD_AUX_CH_DATA4 0xe4320
#define _PCH_DPD_AUX_CH_DATA5 0xe4324
-#define PCH_DP_AUX_CH_CTL(port) _MMIO_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL)
-#define PCH_DP_AUX_CH_DATA(port, i) _MMIO(_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define PCH_DP_AUX_CH_CTL(aux_ch) _MMIO_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL)
+#define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
/* CPT */
#define PORT_TRANS_A_SEL_CPT 0
@@ -7964,9 +8023,13 @@ enum {
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
+#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4)
+#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4)
#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
#define FORCEWAKE_BLITTER_GEN9 _MMIO(0xa188)
#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
+#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0x0D50 + (n) * 4)
+#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0x0D70 + (n) * 4)
#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044)
#define FORCEWAKE_KERNEL BIT(0)
@@ -8816,20 +8879,21 @@ enum skl_power_gate {
/* CDCLK_CTL */
#define CDCLK_CTL _MMIO(0x46000)
-#define CDCLK_FREQ_SEL_MASK (3<<26)
-#define CDCLK_FREQ_450_432 (0<<26)
-#define CDCLK_FREQ_540 (1<<26)
-#define CDCLK_FREQ_337_308 (2<<26)
-#define CDCLK_FREQ_675_617 (3<<26)
-#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
-#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
-#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
-#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
-#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
+#define CDCLK_FREQ_SEL_MASK (3 << 26)
+#define CDCLK_FREQ_450_432 (0 << 26)
+#define CDCLK_FREQ_540 (1 << 26)
+#define CDCLK_FREQ_337_308 (2 << 26)
+#define CDCLK_FREQ_675_617 (3 << 26)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3 << 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 (0 << 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1 << 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 (2 << 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 (3 << 22)
+#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
-#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
+#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
/* LCPLL_CTL */
@@ -9738,4 +9802,10 @@ enum skl_power_gate {
#define MMCD_PCLA (1 << 31)
#define MMCD_HOTSPOT_EN (1 << 27)
+#define _ICL_PHY_MISC_A 0x64C00
+#define _ICL_PHY_MISC_B 0x64C04
+#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \
+ _ICL_PHY_MISC_B)
+#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8efa9e7a9e46..d437beac3969 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -37,7 +37,8 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
- /* The timeline struct (as part of the ppgtt underneath a context)
+ /*
+ * The timeline struct (as part of the ppgtt underneath a context)
* may be freed when the request is no longer in use by the GPU.
* We could extend the life of a context to beyond that of all
* fences, possibly keeping the hw resource around indefinitely,
@@ -53,7 +54,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
static bool i915_fence_signaled(struct dma_fence *fence)
{
- return i915_gem_request_completed(to_request(fence));
+ return i915_request_completed(to_request(fence));
}
static bool i915_fence_enable_signaling(struct dma_fence *fence)
@@ -69,22 +70,23 @@ static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{
- return i915_wait_request(to_request(fence), interruptible, timeout);
+ return i915_request_wait(to_request(fence), interruptible, timeout);
}
static void i915_fence_release(struct dma_fence *fence)
{
- struct drm_i915_gem_request *req = to_request(fence);
+ struct i915_request *rq = to_request(fence);
- /* The request is put onto a RCU freelist (i.e. the address
+ /*
+ * The request is put onto a RCU freelist (i.e. the address
* is immediately reused), mark the fences as being freed now.
* Otherwise the debugobjects for the fences are only marked as
* freed when the slab cache itself is freed, and so we would get
* caught trying to reuse dead objects.
*/
- i915_sw_fence_fini(&req->submit);
+ i915_sw_fence_fini(&rq->submit);
- kmem_cache_free(req->i915->requests, req);
+ kmem_cache_free(rq->i915->requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
@@ -97,7 +99,7 @@ const struct dma_fence_ops i915_fence_ops = {
};
static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+i915_request_remove_from_client(struct i915_request *request)
{
struct drm_i915_file_private *file_priv;
@@ -215,9 +217,9 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
struct intel_timeline *tl = engine->timeline;
if (!i915_seqno_passed(seqno, tl->seqno)) {
- /* spin until threads are complete */
- while (intel_breadcrumbs_busy(engine))
- cond_resched();
+ /* Flush any waiters before we reuse the seqno */
+ intel_engine_disarm_breadcrumbs(engine);
+ GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
}
/* Check we are idle before we fiddle with hw state! */
@@ -238,17 +240,15 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
- /* HWS page needs to be set less than what we
- * will inject to ring
- */
- return reset_all_global_seqno(dev_priv, seqno - 1);
+ /* HWS page needs to be set less than what we will inject to ring */
+ return reset_all_global_seqno(i915, seqno - 1);
}
static void mark_busy(struct drm_i915_private *i915)
@@ -331,16 +331,17 @@ static void unreserve_engine(struct intel_engine_cs *engine)
}
void i915_gem_retire_noop(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/* Space left intentionally blank */
}
-static void advance_ring(struct drm_i915_gem_request *request)
+static void advance_ring(struct i915_request *request)
{
unsigned int tail;
- /* We know the GPU must have read the request to have
+ /*
+ * We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
@@ -349,7 +350,8 @@ static void advance_ring(struct drm_i915_gem_request *request)
* completion order.
*/
if (list_is_last(&request->ring_link, &request->ring->request_list)) {
- /* We may race here with execlists resubmitting this request
+ /*
+ * We may race here with execlists resubmitting this request
* as we retire it. The resubmission will move the ring->tail
* forwards (to request->wa_tail). We either read the
* current value that was written to hw, or the value that
@@ -365,30 +367,30 @@ static void advance_ring(struct drm_i915_gem_request *request)
request->ring->head = tail;
}
-static void free_capture_list(struct drm_i915_gem_request *request)
+static void free_capture_list(struct i915_request *request)
{
- struct i915_gem_capture_list *capture;
+ struct i915_capture_list *capture;
capture = request->capture_list;
while (capture) {
- struct i915_gem_capture_list *next = capture->next;
+ struct i915_capture_list *next = capture->next;
kfree(capture);
capture = next;
}
}
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+static void i915_request_retire(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct i915_gem_active *active, *next;
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
- GEM_BUG_ON(!i915_gem_request_completed(request));
+ GEM_BUG_ON(!i915_request_completed(request));
GEM_BUG_ON(!request->i915->gt.active_requests);
- trace_i915_gem_request_retire(request);
+ trace_i915_request_retire(request);
spin_lock_irq(&engine->timeline->lock);
list_del_init(&request->link);
@@ -399,7 +401,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
free_capture_list(request);
- /* Walk through the active list, calling retire on each. This allows
+ /*
+ * Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
* when their *last* active request is completed (updating state
* tracking lists for eviction, active references for GEM, etc).
@@ -409,7 +412,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
* the node after the callback).
*/
list_for_each_entry_safe(active, next, &request->active_list, link) {
- /* In microbenchmarks or focusing upon time inside the kernel,
+ /*
+ * In microbenchmarks or focusing upon time inside the kernel,
* we may spend an inordinate amount of time simply handling
* the retirement of requests and processing their callbacks.
* Of which, this loop itself is particularly hot due to the
@@ -426,15 +430,16 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
active->retire(active, request);
}
- i915_gem_request_remove_from_client(request);
+ i915_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */
atomic_dec_if_positive(&request->ctx->ban_score);
- /* The backing object for the context is done after switching to the
+ /*
+ * The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. However, since we
- * cannot take the required locks at i915_gem_request_submit() we
+ * cannot take the required locks at i915_request_submit() we
* defer the unpinning of the active context to now, retirement of
* the subsequent request.
*/
@@ -443,35 +448,37 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
engine->last_retired_context = request->ctx;
spin_lock_irq(&request->lock);
- if (request->waitboost)
- atomic_dec(&request->i915->gt_pm.rps.num_waiters);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
dma_fence_signal_locked(&request->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
intel_engine_cancel_signaling(request);
+ if (request->waitboost) {
+ GEM_BUG_ON(!atomic_read(&request->i915->gt_pm.rps.num_waiters));
+ atomic_dec(&request->i915->gt_pm.rps.num_waiters);
+ }
spin_unlock_irq(&request->lock);
i915_priotree_fini(request->i915, &request->priotree);
- i915_gem_request_put(request);
+ i915_request_put(request);
}
-void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
+void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_gem_request *tmp;
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_request *tmp;
- lockdep_assert_held(&req->i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_gem_request_completed(req));
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_request_completed(rq));
- if (list_empty(&req->link))
+ if (list_empty(&rq->link))
return;
do {
tmp = list_first_entry(&engine->timeline->requests,
typeof(*tmp), link);
- i915_gem_request_retire(tmp);
- } while (tmp != req);
+ i915_request_retire(tmp);
+ } while (tmp != rq);
}
static u32 timeline_get_seqno(struct intel_timeline *tl)
@@ -479,7 +486,7 @@ static u32 timeline_get_seqno(struct intel_timeline *tl)
return ++tl->seqno;
}
-void __i915_gem_request_submit(struct drm_i915_gem_request *request)
+void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
@@ -488,8 +495,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
- trace_i915_gem_request_execute(request);
-
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
@@ -513,10 +518,12 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
+ trace_i915_request_execute(request);
+
wake_up_all(&request->execute);
}
-void i915_gem_request_submit(struct drm_i915_gem_request *request)
+void i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
@@ -524,12 +531,12 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- __i915_gem_request_submit(request);
+ __i915_request_submit(request);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
@@ -537,7 +544,8 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
- /* Only unwind in reverse order, required so that the per-context list
+ /*
+ * Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
GEM_BUG_ON(!request->global_seqno);
@@ -561,15 +569,16 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
list_move(&request->link, &timeline->requests);
spin_unlock(&timeline->lock);
- /* We don't need to wake_up any waiters on request->execute, they
+ /*
+ * We don't need to wake_up any waiters on request->execute, they
* will get woken by any other event or us re-adding this request
- * to the engine timeline (__i915_gem_request_submit()). The waiters
+ * to the engine timeline (__i915_request_submit()). The waiters
* should be quite adapt at finding that the request now has a new
* global_seqno to the one they went to sleep on.
*/
}
-void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+void i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
@@ -577,7 +586,7 @@ void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- __i915_gem_request_unsubmit(request);
+ __i915_request_unsubmit(request);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
@@ -585,18 +594,19 @@ void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
- struct drm_i915_gem_request *request =
+ struct i915_request *request =
container_of(fence, typeof(*request), submit);
switch (state) {
case FENCE_COMPLETE:
- trace_i915_gem_request_submit(request);
+ trace_i915_request_submit(request);
/*
- * We need to serialize use of the submit_request() callback with its
- * hotplugging performed during an emergency i915_gem_set_wedged().
- * We use the RCU mechanism to mark the critical section in order to
- * force i915_gem_set_wedged() to wait until the submit_request() is
- * completed before proceeding.
+ * We need to serialize use of the submit_request() callback
+ * with its hotplugging performed during an emergency
+ * i915_gem_set_wedged(). We use the RCU mechanism to mark the
+ * critical section in order to force i915_gem_set_wedged() to
+ * wait until the submit_request() is completed before
+ * proceeding.
*/
rcu_read_lock();
request->engine->submit_request(request);
@@ -604,7 +614,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
break;
case FENCE_FREE:
- i915_gem_request_put(request);
+ i915_request_put(request);
break;
}
@@ -612,7 +622,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
}
/**
- * i915_gem_request_alloc - allocate a request structure
+ * i915_request_alloc - allocate a request structure
*
* @engine: engine that we wish to issue the request on.
* @ctx: context that the request will be associated with.
@@ -620,31 +630,32 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
* Returns a pointer to the allocated request if successful,
* or an error code if not.
*/
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+struct i915_request *
+i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
- struct drm_i915_private *dev_priv = engine->i915;
- struct drm_i915_gem_request *req;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_request *rq;
struct intel_ring *ring;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
/*
* Preempt contexts are reserved for exclusive use to inject a
* preemption context switch. They are never to be used for any trivial
* request!
*/
- GEM_BUG_ON(ctx == dev_priv->preempt_context);
+ GEM_BUG_ON(ctx == i915->preempt_context);
- /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ /*
+ * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(&i915->gpu_error))
return ERR_PTR(-EIO);
- /* Pinning the contexts may generate requests in order to acquire
+ /*
+ * Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
@@ -662,12 +673,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
goto err_unreserve;
/* Move the oldest request to the slab-cache (if not in use!) */
- req = list_first_entry_or_null(&engine->timeline->requests,
- typeof(*req), link);
- if (req && i915_gem_request_completed(req))
- i915_gem_request_retire(req);
+ rq = list_first_entry_or_null(&engine->timeline->requests,
+ typeof(*rq), link);
+ if (rq && i915_request_completed(rq))
+ i915_request_retire(rq);
- /* Beware: Dragons be flying overhead.
+ /*
+ * Beware: Dragons be flying overhead.
*
* We use RCU to look up requests in flight. The lookups may
* race with the request being allocated from the slab freelist.
@@ -695,11 +707,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
*
* Do not use kmem_cache_zalloc() here!
*/
- req = kmem_cache_alloc(dev_priv->requests,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- if (unlikely(!req)) {
+ rq = kmem_cache_alloc(i915->requests,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (unlikely(!rq)) {
/* Ratelimit ourselves to prevent oom from malicious clients */
- ret = i915_gem_wait_for_idle(dev_priv,
+ ret = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
I915_WAIT_INTERRUPTIBLE);
if (ret)
@@ -713,55 +725,55 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* Having already penalized the client to stall, we spend
* a little extra time to re-optimise page allocation.
*/
- kmem_cache_shrink(dev_priv->requests);
+ kmem_cache_shrink(i915->requests);
rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
- req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
- if (!req) {
+ rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
+ if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
}
}
- req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
- GEM_BUG_ON(req->timeline == engine->timeline);
+ rq->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+ GEM_BUG_ON(rq->timeline == engine->timeline);
- spin_lock_init(&req->lock);
- dma_fence_init(&req->fence,
+ spin_lock_init(&rq->lock);
+ dma_fence_init(&rq->fence,
&i915_fence_ops,
- &req->lock,
- req->timeline->fence_context,
- timeline_get_seqno(req->timeline));
+ &rq->lock,
+ rq->timeline->fence_context,
+ timeline_get_seqno(rq->timeline));
/* We bump the ref for the fence chain */
- i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
- init_waitqueue_head(&req->execute);
+ i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
+ init_waitqueue_head(&rq->execute);
- i915_priotree_init(&req->priotree);
+ i915_priotree_init(&rq->priotree);
- INIT_LIST_HEAD(&req->active_list);
- req->i915 = dev_priv;
- req->engine = engine;
- req->ctx = ctx;
- req->ring = ring;
+ INIT_LIST_HEAD(&rq->active_list);
+ rq->i915 = i915;
+ rq->engine = engine;
+ rq->ctx = ctx;
+ rq->ring = ring;
/* No zalloc, must clear what we need by hand */
- req->global_seqno = 0;
- req->signaling.wait.seqno = 0;
- req->file_priv = NULL;
- req->batch = NULL;
- req->capture_list = NULL;
- req->waitboost = false;
+ rq->global_seqno = 0;
+ rq->signaling.wait.seqno = 0;
+ rq->file_priv = NULL;
+ rq->batch = NULL;
+ rq->capture_list = NULL;
+ rq->waitboost = false;
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
- * i915_add_request() call can't fail. Note that the reserve may need
+ * i915_request_add() call can't fail. Note that the reserve may need
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
*/
- req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
- GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
+ rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+ GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
/*
* Record the position of the start of the request so that
@@ -769,30 +781,30 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- req->head = req->ring->emit;
+ rq->head = rq->ring->emit;
/* Unconditionally invalidate GPU caches and TLBs. */
- ret = engine->emit_flush(req, EMIT_INVALIDATE);
+ ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
goto err_unwind;
- ret = engine->request_alloc(req);
+ ret = engine->request_alloc(rq);
if (ret)
goto err_unwind;
/* Check that we didn't interrupt ourselves with a new request */
- GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
- return req;
+ GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
+ return rq;
err_unwind:
- req->ring->emit = req->head;
+ rq->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
- GEM_BUG_ON(!list_empty(&req->active_list));
- GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
- GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
+ GEM_BUG_ON(!list_empty(&rq->active_list));
+ GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list));
+ GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list));
- kmem_cache_free(dev_priv->requests, req);
+ kmem_cache_free(i915->requests, rq);
err_unreserve:
unreserve_engine(engine);
err_unpin:
@@ -801,15 +813,14 @@ err_unpin:
}
static int
-i915_gem_request_await_request(struct drm_i915_gem_request *to,
- struct drm_i915_gem_request *from)
+i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
int ret;
GEM_BUG_ON(to == from);
GEM_BUG_ON(to->timeline == from->timeline);
- if (i915_gem_request_completed(from))
+ if (i915_request_completed(from))
return 0;
if (to->engine->schedule) {
@@ -832,7 +843,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
GEM_BUG_ON(!from->engine->semaphore.signal);
- seqno = i915_gem_request_global_seqno(from);
+ seqno = i915_request_global_seqno(from);
if (!seqno)
goto await_dma_fence;
@@ -856,14 +867,14 @@ await_dma_fence:
}
int
-i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
- struct dma_fence *fence)
+i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
{
struct dma_fence **child = &fence;
unsigned int nchild = 1;
int ret;
- /* Note that if the fence-array was created in signal-on-any mode,
+ /*
+ * Note that if the fence-array was created in signal-on-any mode,
* we should *not* decompose it into its individual fences. However,
* we don't currently store which mode the fence-array is operating
* in. Fortunately, the only user of signal-on-any is private to
@@ -885,40 +896,39 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
/*
* Requests on the same timeline are explicitly ordered, along
- * with their dependencies, by i915_add_request() which ensures
+ * with their dependencies, by i915_request_add() which ensures
* that requests are submitted in-order through each ring.
*/
- if (fence->context == req->fence.context)
+ if (fence->context == rq->fence.context)
continue;
/* Squash repeated waits to the same timelines */
- if (fence->context != req->i915->mm.unordered_timeline &&
- intel_timeline_sync_is_later(req->timeline, fence))
+ if (fence->context != rq->i915->mm.unordered_timeline &&
+ intel_timeline_sync_is_later(rq->timeline, fence))
continue;
if (dma_fence_is_i915(fence))
- ret = i915_gem_request_await_request(req,
- to_request(fence));
+ ret = i915_request_await_request(rq, to_request(fence));
else
- ret = i915_sw_fence_await_dma_fence(&req->submit, fence,
+ ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
if (ret < 0)
return ret;
/* Record the latest fence used against each timeline */
- if (fence->context != req->i915->mm.unordered_timeline)
- intel_timeline_sync_set(req->timeline, fence);
+ if (fence->context != rq->i915->mm.unordered_timeline)
+ intel_timeline_sync_set(rq->timeline, fence);
} while (--nchild);
return 0;
}
/**
- * i915_gem_request_await_object - set this request to (async) wait upon a bo
- *
+ * i915_request_await_object - set this request to (async) wait upon a bo
* @to: request we are wishing to use
* @obj: object which may be in use on another ring.
+ * @write: whether the wait is on behalf of a writer
*
* This code is meant to abstract object synchronization with the GPU.
* Conceptually we serialise writes between engines inside the GPU.
@@ -935,9 +945,9 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
* Returns 0 if successful, else propagates up the lower layer error.
*/
int
-i915_gem_request_await_object(struct drm_i915_gem_request *to,
- struct drm_i915_gem_object *obj,
- bool write)
+i915_request_await_object(struct i915_request *to,
+ struct drm_i915_gem_object *obj,
+ bool write)
{
struct dma_fence *excl;
int ret = 0;
@@ -952,7 +962,7 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
return ret;
for (i = 0; i < count; i++) {
- ret = i915_gem_request_await_dma_fence(to, shared[i]);
+ ret = i915_request_await_dma_fence(to, shared[i]);
if (ret)
break;
@@ -968,7 +978,7 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
if (excl) {
if (ret == 0)
- ret = i915_gem_request_await_dma_fence(to, excl);
+ ret = i915_request_await_dma_fence(to, excl);
dma_fence_put(excl);
}
@@ -981,20 +991,21 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
+void __i915_request_add(struct i915_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
struct intel_timeline *timeline = request->timeline;
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
u32 *cs;
int err;
lockdep_assert_held(&request->i915->drm.struct_mutex);
- trace_i915_gem_request_add(request);
+ trace_i915_request_add(request);
- /* Make sure that no request gazumped us - if it was allocated after
- * our i915_gem_request_alloc() and called __i915_add_request() before
+ /*
+ * Make sure that no request gazumped us - if it was allocated after
+ * our i915_request_alloc() and called __i915_request_add() before
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(timeline->seqno != request->fence.seqno);
@@ -1020,7 +1031,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
WARN(err, "engine->emit_flush() failed: %d!\n", err);
}
- /* Record the position of the start of the breadcrumb so that
+ /*
+ * Record the position of the start of the breadcrumb so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
@@ -1029,7 +1041,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
- /* Seal the request and mark it as pending execution. Note that
+ /*
+ * Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
* hangcheck. Hence we apply the barrier to ensure that we do not
* see a more recent value in the hws than we are tracking.
@@ -1037,7 +1050,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
- if (prev) {
+ if (prev && !i915_request_completed(prev)) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
if (engine->schedule)
@@ -1057,7 +1070,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
list_add_tail(&request->ring_link, &ring->request_list);
request->emitted_jiffies = jiffies;
- /* Let the backend know a new request has arrived that may need
+ /*
+ * Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
* request - i.e. we may want to preempt the current request in order
* to run a high priority dependency chain *before* we can execute this
@@ -1073,13 +1087,34 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+ /*
+ * In typical scenarios, we do not expect the previous request on
+ * the timeline to be still tracked by timeline->last_request if it
+ * has been completed. If the completed request is still here, that
+ * implies that request retirement is a long way behind submission,
+ * suggesting that we haven't been retiring frequently enough from
+ * the combination of retire-before-alloc, waiters and the background
+ * retirement worker. So if the last request on this timeline was
+ * already completed, do a catch up pass, flushing the retirement queue
+ * up to this client. Since we have now moved the heaviest operations
+ * during retirement onto secondary workers, such as freeing objects
+ * or contexts, retiring a bunch of requests is mostly list management
+ * (and cache misses), and so we should not be overly penalizing this
+ * client by performing excess work, though we may still performing
+ * work on behalf of others -- but instead we should benefit from
+ * improved resource management. (Well, that's the theory at least.)
+ */
+ if (prev && i915_request_completed(prev))
+ i915_request_retire_upto(prev);
}
static unsigned long local_clock_us(unsigned int *cpu)
{
unsigned long t;
- /* Cheaply and approximately convert from nanoseconds to microseconds.
+ /*
+ * Cheaply and approximately convert from nanoseconds to microseconds.
* The result and subsequent calculations are also defined in the same
* approximate microseconds units. The principal source of timing
* error here is from the simple truncation.
@@ -1107,10 +1142,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct drm_i915_gem_request *req,
+static bool __i915_spin_request(const struct i915_request *rq,
u32 seqno, int state, unsigned long timeout_us)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
unsigned int irq, cpu;
GEM_BUG_ON(!seqno);
@@ -1129,7 +1164,8 @@ static bool __i915_spin_request(const struct drm_i915_gem_request *req,
if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
return false;
- /* When waiting for high frequency requests, e.g. during synchronous
+ /*
+ * When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
* rate. By busywaiting on the request completion for a short while we
@@ -1143,9 +1179,10 @@ static bool __i915_spin_request(const struct drm_i915_gem_request *req,
timeout_us += local_clock_us(&cpu);
do {
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
- return seqno == i915_gem_request_global_seqno(req);
+ return seqno == i915_request_global_seqno(rq);
- /* Seqno are meant to be ordered *before* the interrupt. If
+ /*
+ * Seqno are meant to be ordered *before* the interrupt. If
* we see an interrupt without a corresponding seqno advance,
* assume we won't see one in the near future but require
* the engine->seqno_barrier() to fixup coherency.
@@ -1165,7 +1202,7 @@ static bool __i915_spin_request(const struct drm_i915_gem_request *req,
return false;
}
-static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
+static bool __i915_wait_request_check_and_reset(struct i915_request *request)
{
if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
return false;
@@ -1176,12 +1213,12 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req
}
/**
- * i915_wait_request - wait until execution of request has finished
- * @req: the request to wait upon
+ * i915_request_wait - wait until execution of request has finished
+ * @rq: the request to wait upon
* @flags: how to wait
* @timeout: how long to wait in jiffies
*
- * i915_wait_request() waits for the request to be completed, for a
+ * i915_request_wait() waits for the request to be completed, for a
* maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
* unbounded wait).
*
@@ -1194,13 +1231,13 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req
* May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
* pending before the request completes.
*/
-long i915_wait_request(struct drm_i915_gem_request *req,
+long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
+ wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
DEFINE_WAIT_FUNC(reset, default_wake_function);
DEFINE_WAIT_FUNC(exec, default_wake_function);
struct intel_wait wait;
@@ -1208,33 +1245,33 @@ long i915_wait_request(struct drm_i915_gem_request *req,
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
GEM_BUG_ON(debug_locks &&
- !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+ !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
GEM_BUG_ON(timeout < 0);
- if (i915_gem_request_completed(req))
+ if (i915_request_completed(rq))
return timeout;
if (!timeout)
return -ETIME;
- trace_i915_gem_request_wait_begin(req, flags);
+ trace_i915_request_wait_begin(rq, flags);
- add_wait_queue(&req->execute, &exec);
+ add_wait_queue(&rq->execute, &exec);
if (flags & I915_WAIT_LOCKED)
add_wait_queue(errq, &reset);
- intel_wait_init(&wait, req);
+ intel_wait_init(&wait, rq);
restart:
do {
set_current_state(state);
- if (intel_wait_update_request(&wait, req))
+ if (intel_wait_update_request(&wait, rq))
break;
if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(req))
+ __i915_wait_request_check_and_reset(rq))
continue;
if (signal_pending_state(state, current)) {
@@ -1251,22 +1288,23 @@ restart:
} while (1);
GEM_BUG_ON(!intel_wait_has_seqno(&wait));
- GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
/* Optimistic short spin before touching IRQs */
- if (__i915_spin_request(req, wait.seqno, state, 5))
+ if (__i915_spin_request(rq, wait.seqno, state, 5))
goto complete;
set_current_state(state);
- if (intel_engine_add_wait(req->engine, &wait))
- /* In order to check that we haven't missed the interrupt
+ if (intel_engine_add_wait(rq->engine, &wait))
+ /*
+ * In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
* coherent check on the seqno before we sleep.
*/
goto wakeup;
if (flags & I915_WAIT_LOCKED)
- __i915_wait_request_check_and_reset(req);
+ __i915_wait_request_check_and_reset(rq);
for (;;) {
if (signal_pending_state(state, current)) {
@@ -1282,21 +1320,23 @@ restart:
timeout = io_schedule_timeout(timeout);
if (intel_wait_complete(&wait) &&
- intel_wait_check_request(&wait, req))
+ intel_wait_check_request(&wait, rq))
break;
set_current_state(state);
wakeup:
- /* Carefully check if the request is complete, giving time
+ /*
+ * Carefully check if the request is complete, giving time
* for the seqno to be visible following the interrupt.
* We also have to check in case we are kicked by the GPU
* reset in order to drop the struct_mutex.
*/
- if (__i915_request_irq_complete(req))
+ if (__i915_request_irq_complete(rq))
break;
- /* If the GPU is hung, and we hold the lock, reset the GPU
+ /*
+ * If the GPU is hung, and we hold the lock, reset the GPU
* and then check for completion. On a full reset, the engine's
* HW seqno will be advanced passed us and we are complete.
* If we do a partial reset, we have to wait for the GPU to
@@ -1307,33 +1347,33 @@ wakeup:
* itself, or indirectly by recovering the GPU).
*/
if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(req))
+ __i915_wait_request_check_and_reset(rq))
continue;
/* Only spin if we know the GPU is processing this request */
- if (__i915_spin_request(req, wait.seqno, state, 2))
+ if (__i915_spin_request(rq, wait.seqno, state, 2))
break;
- if (!intel_wait_check_request(&wait, req)) {
- intel_engine_remove_wait(req->engine, &wait);
+ if (!intel_wait_check_request(&wait, rq)) {
+ intel_engine_remove_wait(rq->engine, &wait);
goto restart;
}
}
- intel_engine_remove_wait(req->engine, &wait);
+ intel_engine_remove_wait(rq->engine, &wait);
complete:
__set_current_state(TASK_RUNNING);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(errq, &reset);
- remove_wait_queue(&req->execute, &exec);
- trace_i915_gem_request_wait_end(req);
+ remove_wait_queue(&rq->execute, &exec);
+ trace_i915_request_wait_end(rq);
return timeout;
}
static void engine_retire_requests(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request, *next;
+ struct i915_request *request, *next;
u32 seqno = intel_engine_get_seqno(engine);
LIST_HEAD(retire);
@@ -1348,24 +1388,24 @@ static void engine_retire_requests(struct intel_engine_cs *engine)
spin_unlock_irq(&engine->timeline->lock);
list_for_each_entry_safe(request, next, &retire, link)
- i915_gem_request_retire(request);
+ i915_request_retire(request);
}
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+void i915_retire_requests(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
- if (!dev_priv->gt.active_requests)
+ if (!i915->gt.active_requests)
return;
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, i915, id)
engine_retire_requests(engine);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
-#include "selftests/i915_gem_request.c"
+#include "selftests/i915_request.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_request.h
index 2236e9188c5c..7d6eb82eeb91 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2008-2015 Intel Corporation
+ * Copyright © 2008-2018 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,8 +22,8 @@
*
*/
-#ifndef I915_GEM_REQUEST_H
-#define I915_GEM_REQUEST_H
+#ifndef I915_REQUEST_H
+#define I915_REQUEST_H
#include <linux/dma-fence.h>
@@ -34,18 +34,18 @@
struct drm_file;
struct drm_i915_gem_object;
-struct drm_i915_gem_request;
+struct i915_request;
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
u32 seqno;
};
struct intel_signal_node {
- struct rb_node node;
struct intel_wait wait;
+ struct list_head link;
};
struct i915_dependency {
@@ -57,7 +57,12 @@ struct i915_dependency {
#define I915_DEPENDENCY_ALLOC BIT(0)
};
-/* Requests exist in a complex web of interdependencies. Each request
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
* has to wait for some other request to complete before it is ready to be run
* (e.g. we have to wait until the pixels have been rendering into a texture
* before we can copy from it). We track the readiness of a request in terms
@@ -81,8 +86,8 @@ enum {
I915_PRIORITY_INVALID = INT_MIN
};
-struct i915_gem_capture_list {
- struct i915_gem_capture_list *next;
+struct i915_capture_list {
+ struct i915_capture_list *next;
struct i915_vma *vma;
};
@@ -106,7 +111,7 @@ struct i915_gem_capture_list {
*
* The requests are reference counted.
*/
-struct drm_i915_gem_request {
+struct i915_request {
struct dma_fence fence;
spinlock_t lock;
@@ -120,7 +125,7 @@ struct drm_i915_gem_request {
* it persists while any request is linked to it. Requests themselves
* are also refcounted, so the request will only be freed when the last
* reference to it is dismissed, and the code in
- * i915_gem_request_free() will then decrement the refcount on the
+ * i915_request_free() will then decrement the refcount on the
* context.
*/
struct i915_gem_context *ctx;
@@ -129,7 +134,8 @@ struct drm_i915_gem_request {
struct intel_timeline *timeline;
struct intel_signal_node signaling;
- /* Fences for the various phases in the request's lifetime.
+ /*
+ * Fences for the various phases in the request's lifetime.
*
* The submit fence is used to await upon all of the request's
* dependencies. When it is signaled, the request is ready to run.
@@ -139,7 +145,8 @@ struct drm_i915_gem_request {
wait_queue_entry_t submitq;
wait_queue_head_t execute;
- /* A list of everyone we wait upon, and everyone who waits upon us.
+ /*
+ * A list of everyone we wait upon, and everyone who waits upon us.
* Even though we will not be submitted to the hardware before the
* submit fence is signaled (it waits for all external events as well
* as our own requests), the scheduler still needs to know the
@@ -150,7 +157,8 @@ struct drm_i915_gem_request {
struct i915_priotree priotree;
struct i915_dependency dep;
- /** GEM sequence number associated with this request on the
+ /**
+ * GEM sequence number associated with this request on the
* global execution timeline. It is zero when the request is not
* on the HW queue (i.e. not on the engine timeline list).
* Its value is guarded by the timeline spinlock.
@@ -180,12 +188,13 @@ struct drm_i915_gem_request {
* error state dump only).
*/
struct i915_vma *batch;
- /** Additional buffers requested by userspace to be captured upon
+ /**
+ * Additional buffers requested by userspace to be captured upon
* a GPU hang. The vma/obj on this list are protected by their
* active reference - all objects on this list must also be
* on the active_list (of their final request).
*/
- struct i915_gem_capture_list *capture_list;
+ struct i915_capture_list *capture_list;
struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */
@@ -213,40 +222,40 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
return fence->ops == &i915_fence_ops;
}
-struct drm_i915_gem_request * __must_check
-i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
-void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
+struct i915_request * __must_check
+i915_request_alloc(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+void i915_request_retire_upto(struct i915_request *rq);
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
+ BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
- return container_of(fence, struct drm_i915_gem_request, fence);
+ return container_of(fence, struct i915_request, fence);
}
-static inline struct drm_i915_gem_request *
-i915_gem_request_get(struct drm_i915_gem_request *req)
+static inline struct i915_request *
+i915_request_get(struct i915_request *rq)
{
- return to_request(dma_fence_get(&req->fence));
+ return to_request(dma_fence_get(&rq->fence));
}
-static inline struct drm_i915_gem_request *
-i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
+static inline struct i915_request *
+i915_request_get_rcu(struct i915_request *rq)
{
- return to_request(dma_fence_get_rcu(&req->fence));
+ return to_request(dma_fence_get_rcu(&rq->fence));
}
static inline void
-i915_gem_request_put(struct drm_i915_gem_request *req)
+i915_request_put(struct i915_request *rq)
{
- dma_fence_put(&req->fence);
+ dma_fence_put(&rq->fence);
}
/**
- * i915_gem_request_global_seqno - report the current global seqno
+ * i915_request_global_seqno - report the current global seqno
* @request - the request
*
* A request is assigned a global seqno only when it is on the hardware
@@ -264,34 +273,28 @@ i915_gem_request_put(struct drm_i915_gem_request *req)
* after the read, it is indeed complete).
*/
static u32
-i915_gem_request_global_seqno(const struct drm_i915_gem_request *request)
+i915_request_global_seqno(const struct i915_request *request)
{
return READ_ONCE(request->global_seqno);
}
-int
-i915_gem_request_await_object(struct drm_i915_gem_request *to,
+int i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write);
-int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
- struct dma_fence *fence);
+int i915_request_await_dma_fence(struct i915_request *rq,
+ struct dma_fence *fence);
-void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
-#define i915_add_request(req) \
- __i915_add_request(req, false)
+void __i915_request_add(struct i915_request *rq, bool flush_caches);
+#define i915_request_add(rq) \
+ __i915_request_add(rq, false)
-void __i915_gem_request_submit(struct drm_i915_gem_request *request);
-void i915_gem_request_submit(struct drm_i915_gem_request *request);
+void __i915_request_submit(struct i915_request *request);
+void i915_request_submit(struct i915_request *request);
-void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
-void i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
+void __i915_request_unsubmit(struct i915_request *request);
+void i915_request_unsubmit(struct i915_request *request);
-struct intel_rps_client;
-#define NO_WAITBOOST ERR_PTR(-1)
-#define IS_RPS_CLIENT(p) (!IS_ERR(p))
-#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
-
-long i915_wait_request(struct drm_i915_gem_request *req,
+long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
__attribute__((nonnull(1)));
@@ -310,47 +313,48 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
-__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
+__i915_request_completed(const struct i915_request *rq, u32 seqno)
{
GEM_BUG_ON(!seqno);
- return i915_seqno_passed(intel_engine_get_seqno(req->engine), seqno) &&
- seqno == i915_gem_request_global_seqno(req);
+ return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
+ seqno == i915_request_global_seqno(rq);
}
-static inline bool
-i915_gem_request_completed(const struct drm_i915_gem_request *req)
+static inline bool i915_request_completed(const struct i915_request *rq)
{
u32 seqno;
- seqno = i915_gem_request_global_seqno(req);
+ seqno = i915_request_global_seqno(rq);
if (!seqno)
return false;
- return __i915_gem_request_completed(req, seqno);
+ return __i915_request_completed(rq, seqno);
}
-static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
+static inline bool i915_request_started(const struct i915_request *rq)
{
u32 seqno;
- seqno = i915_gem_request_global_seqno(req);
+ seqno = i915_request_global_seqno(rq);
if (!seqno)
return false;
- return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+ return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
seqno - 1);
}
static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
{
- const struct drm_i915_gem_request *rq =
- container_of(pt, const struct drm_i915_gem_request, priotree);
+ const struct i915_request *rq =
+ container_of(pt, const struct i915_request, priotree);
- return i915_gem_request_completed(rq);
+ return i915_request_completed(rq);
}
-/* We treat requests as fences. This is not be to confused with our
+void i915_retire_requests(struct drm_i915_private *i915);
+
+/*
+ * We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
* We use the fences to synchronize access from the CPU with activity on the
* GPU, for example, we should not rewrite an object's PTE whilst the GPU
@@ -380,16 +384,16 @@ static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
struct i915_gem_active;
typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
- struct drm_i915_gem_request *);
+ struct i915_request *);
struct i915_gem_active {
- struct drm_i915_gem_request __rcu *request;
+ struct i915_request __rcu *request;
struct list_head link;
i915_gem_retire_fn retire;
};
void i915_gem_retire_noop(struct i915_gem_active *,
- struct drm_i915_gem_request *request);
+ struct i915_request *request);
/**
* init_request_active - prepares the activity tracker for use
@@ -421,7 +425,7 @@ init_request_active(struct i915_gem_active *active,
*/
static inline void
i915_gem_active_set(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
list_move(&active->link, &request->active_list);
rcu_assign_pointer(active->request, request);
@@ -446,10 +450,11 @@ i915_gem_active_set_retire_fn(struct i915_gem_active *active,
active->retire = fn ?: i915_gem_retire_noop;
}
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
__i915_gem_active_peek(const struct i915_gem_active *active)
{
- /* Inside the error capture (running with the driver in an unknown
+ /*
+ * Inside the error capture (running with the driver in an unknown
* state), we want to bend the rules slightly (a lot).
*
* Work is in progress to make it safer, in the meantime this keeps
@@ -466,7 +471,7 @@ __i915_gem_active_peek(const struct i915_gem_active *active)
* It does not obtain a reference on the request for the caller, so the caller
* must hold struct_mutex.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
{
return rcu_dereference_protected(active->request,
@@ -481,13 +486,13 @@ i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
* still active, or NULL. It does not obtain a reference on the request
* for the caller, so the caller must hold struct_mutex.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = i915_gem_active_raw(active, mutex);
- if (!request || i915_gem_request_completed(request))
+ if (!request || i915_request_completed(request))
return NULL;
return request;
@@ -500,10 +505,10 @@ i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
* i915_gem_active_get() returns a reference to the active request, or NULL
* if the active tracker is idle. The caller must hold struct_mutex.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
{
- return i915_gem_request_get(i915_gem_active_peek(active, mutex));
+ return i915_request_get(i915_gem_active_peek(active, mutex));
}
/**
@@ -514,10 +519,11 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
* if the active tracker is idle. The caller must hold the RCU read lock, but
* the returned pointer is safe to use outside of RCU.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
__i915_gem_active_get_rcu(const struct i915_gem_active *active)
{
- /* Performing a lockless retrieval of the active request is super
+ /*
+ * Performing a lockless retrieval of the active request is super
* tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
* slab of request objects will not be freed whilst we hold the
* RCU read lock. It does not guarantee that the request itself
@@ -525,13 +531,13 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
*
* Thread A Thread B
*
- * req = active.request
- * retire(req) -> free(req);
- * (req is now first on the slab freelist)
+ * rq = active.request
+ * retire(rq) -> free(rq);
+ * (rq is now first on the slab freelist)
* active.request = NULL
*
- * req = new submission on a new object
- * ref(req)
+ * rq = new submission on a new object
+ * ref(rq)
*
* To prevent the request from being reused whilst the caller
* uses it, we take a reference like normal. Whilst acquiring
@@ -560,32 +566,34 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
*
* It is then imperative that we do not zero the request on
* reallocation, so that we can chase the dangling pointers!
- * See i915_gem_request_alloc().
+ * See i915_request_alloc().
*/
do {
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = rcu_dereference(active->request);
- if (!request || i915_gem_request_completed(request))
+ if (!request || i915_request_completed(request))
return NULL;
- /* An especially silly compiler could decide to recompute the
- * result of i915_gem_request_completed, more specifically
+ /*
+ * An especially silly compiler could decide to recompute the
+ * result of i915_request_completed, more specifically
* re-emit the load for request->fence.seqno. A race would catch
* a later seqno value, which could flip the result from true to
* false. Which means part of the instructions below might not
* be executed, while later on instructions are executed. Due to
* barriers within the refcounting the inconsistency can't reach
- * past the call to i915_gem_request_get_rcu, but not executing
- * that while still executing i915_gem_request_put() creates
+ * past the call to i915_request_get_rcu, but not executing
+ * that while still executing i915_request_put() creates
* havoc enough. Prevent this with a compiler barrier.
*/
barrier();
- request = i915_gem_request_get_rcu(request);
+ request = i915_request_get_rcu(request);
- /* What stops the following rcu_access_pointer() from occurring
- * before the above i915_gem_request_get_rcu()? If we were
+ /*
+ * What stops the following rcu_access_pointer() from occurring
+ * before the above i915_request_get_rcu()? If we were
* to read the value before pausing to get the reference to
* the request, we may not notice a change in the active
* tracker.
@@ -599,9 +607,9 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
* compiler.
*
* The atomic operation at the heart of
- * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
+ * i915_request_get_rcu(), see dma_fence_get_rcu(), is
* atomic_inc_not_zero() which is only a full memory barrier
- * when successful. That is, if i915_gem_request_get_rcu()
+ * when successful. That is, if i915_request_get_rcu()
* returns the request (and so with the reference counted
* incremented) then the following read for rcu_access_pointer()
* must occur after the atomic operation and so confirm
@@ -613,7 +621,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
if (!request || request == rcu_access_pointer(active->request))
return rcu_pointer_handoff(request);
- i915_gem_request_put(request);
+ i915_request_put(request);
} while (1);
}
@@ -625,12 +633,12 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
* or NULL if the active tracker is idle. The reference is obtained under RCU,
* so no locking is required by the caller.
*
- * The reference should be freed with i915_gem_request_put().
+ * The reference should be freed with i915_request_put().
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_get_unlocked(const struct i915_gem_active *active)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
rcu_read_lock();
request = __i915_gem_active_get_rcu(active);
@@ -670,7 +678,7 @@ i915_gem_active_isset(const struct i915_gem_active *active)
* can then wait upon the request, and afterwards release our reference,
* free of any locking.
*
- * This function wraps i915_wait_request(), see it for the full details on
+ * This function wraps i915_request_wait(), see it for the full details on
* the arguments.
*
* Returns 0 if successful, or a negative error code.
@@ -678,13 +686,13 @@ i915_gem_active_isset(const struct i915_gem_active *active)
static inline int
i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
long ret = 0;
request = i915_gem_active_get_unlocked(active);
if (request) {
- ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(request);
+ ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(request);
}
return ret < 0 ? ret : 0;
@@ -703,14 +711,14 @@ static inline int __must_check
i915_gem_active_retire(struct i915_gem_active *active,
struct mutex *mutex)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
long ret;
request = i915_gem_active_raw(active, mutex);
if (!request)
return 0;
- ret = i915_wait_request(request,
+ ret = i915_request_wait(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
@@ -727,4 +735,4 @@ i915_gem_active_retire(struct i915_gem_active *active,
#define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
-#endif /* I915_GEM_REQUEST_H */
+#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 0087acf731a8..58f8d0cc125c 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -86,7 +86,7 @@ struct i915_syncmap {
/**
* i915_syncmap_init -- initialise the #i915_syncmap
- * @root - pointer to the #i915_syncmap
+ * @root: pointer to the #i915_syncmap
*/
void i915_syncmap_init(struct i915_syncmap **root)
{
@@ -139,9 +139,9 @@ static inline bool seqno_later(u32 a, u32 b)
/**
* i915_syncmap_is_later -- compare against the last know sync point
- * @root - pointer to the #i915_syncmap
- * @id - the context id (other timeline) we are synchronising to
- * @seqno - the sequence number along the other timeline
+ * @root: pointer to the #i915_syncmap
+ * @id: the context id (other timeline) we are synchronising to
+ * @seqno: the sequence number along the other timeline
*
* If we have already synchronised this @root timeline with another (@id) then
* we can omit any repeated or earlier synchronisation requests. If the two
@@ -339,9 +339,9 @@ found:
/**
* i915_syncmap_set -- mark the most recent syncpoint between contexts
- * @root - pointer to the #i915_syncmap
- * @id - the context id (other timeline) we have synchronised to
- * @seqno - the sequence number along the other timeline
+ * @root: pointer to the #i915_syncmap
+ * @id: the context id (other timeline) we have synchronised to
+ * @seqno: the sequence number along the other timeline
*
* When we synchronise this @root timeline with another (@id), we also know
* that we have synchronized with all previous seqno along that timeline. If
@@ -382,7 +382,7 @@ static void __sync_free(struct i915_syncmap *p)
/**
* i915_syncmap_free -- free all memory associated with the syncmap
- * @root - pointer to the #i915_syncmap
+ * @root: pointer to the #i915_syncmap
*
* Either when the timeline is to be freed and we no longer need the sync
* point tracking, or when the fences are all known to be signaled and the
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e1169c02eb2b..408827bf5d96 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -586,8 +586,7 @@ TRACE_EVENT(i915_gem_evict_vm,
);
TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct drm_i915_gem_request *to,
- struct drm_i915_gem_request *from),
+ TP_PROTO(struct i915_request *to, struct i915_request *from),
TP_ARGS(to, from),
TP_STRUCT__entry(
@@ -610,9 +609,9 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->seqno)
);
-TRACE_EVENT(i915_gem_request_queue,
- TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
- TP_ARGS(req, flags),
+TRACE_EVENT(i915_request_queue,
+ TP_PROTO(struct i915_request *rq, u32 flags),
+ TP_ARGS(rq, flags),
TP_STRUCT__entry(
__field(u32, dev)
@@ -624,11 +623,11 @@ TRACE_EVENT(i915_gem_request_queue,
),
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->hw_id = req->ctx->hw_id;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
__entry->flags = flags;
),
@@ -637,9 +636,9 @@ TRACE_EVENT(i915_gem_request_queue,
__entry->seqno, __entry->flags)
);
-DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req),
+DECLARE_EVENT_CLASS(i915_request,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field(u32, dev)
@@ -651,12 +650,12 @@ DECLARE_EVENT_CLASS(i915_gem_request,
),
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->hw_id = req->ctx->hw_id;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
- __entry->global = req->global_seqno;
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global = rq->global_seqno;
),
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
@@ -664,26 +663,25 @@ DECLARE_EVENT_CLASS(i915_gem_request,
__entry->seqno, __entry->global)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_add,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
-DEFINE_EVENT(i915_gem_request, i915_gem_request_submit,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_submit,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_execute,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_execute,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
-DECLARE_EVENT_CLASS(i915_gem_request_hw,
- TP_PROTO(struct drm_i915_gem_request *req,
- unsigned int port),
- TP_ARGS(req, port),
+DECLARE_EVENT_CLASS(i915_request_hw,
+ TP_PROTO(struct i915_request *rq, unsigned int port),
+ TP_ARGS(rq, port),
TP_STRUCT__entry(
__field(u32, dev)
@@ -696,14 +694,14 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
),
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->hw_id = req->ctx->hw_id;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
- __entry->global_seqno = req->global_seqno;
- __entry->port = port;
- ),
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global_seqno = rq->global_seqno;
+ __entry->port = port;
+ ),
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
__entry->dev, __entry->hw_id, __entry->ring,
@@ -711,34 +709,34 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
__entry->global_seqno, __entry->port)
);
-DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
- TP_PROTO(struct drm_i915_gem_request *req, unsigned int port),
- TP_ARGS(req, port)
+DEFINE_EVENT(i915_request_hw, i915_request_in,
+ TP_PROTO(struct i915_request *rq, unsigned int port),
+ TP_ARGS(rq, port)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_out,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_out,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
#else
#if !defined(TRACE_HEADER_MULTI_READ)
static inline void
-trace_i915_gem_request_submit(struct drm_i915_gem_request *req)
+trace_i915_request_submit(struct i915_request *rq)
{
}
static inline void
-trace_i915_gem_request_execute(struct drm_i915_gem_request *req)
+trace_i915_request_execute(struct i915_request *rq)
{
}
static inline void
-trace_i915_gem_request_in(struct drm_i915_gem_request *req, unsigned int port)
+trace_i915_request_in(struct i915_request *rq, unsigned int port)
{
}
static inline void
-trace_i915_gem_request_out(struct drm_i915_gem_request *req)
+trace_i915_request_out(struct i915_request *rq)
{
}
#endif
@@ -767,14 +765,14 @@ TRACE_EVENT(intel_engine_notify,
__entry->waiters)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_retire,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
-TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct drm_i915_gem_request *req, unsigned int flags),
- TP_ARGS(req, flags),
+TRACE_EVENT(i915_request_wait_begin,
+ TP_PROTO(struct i915_request *rq, unsigned int flags),
+ TP_ARGS(rq, flags),
TP_STRUCT__entry(
__field(u32, dev)
@@ -793,12 +791,12 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->hw_id = req->ctx->hw_id;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
- __entry->global = req->global_seqno;
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global = rq->global_seqno;
__entry->flags = flags;
),
@@ -808,9 +806,9 @@ TRACE_EVENT(i915_gem_request_wait_begin,
!!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_wait_end,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
TRACE_EVENT(i915_flip_request,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e0e7c48f45dc..4bda3bd29bf5 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -31,8 +31,7 @@
#include <drm/drm_gem.h>
static void
-i915_vma_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *rq)
+i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
{
const unsigned int idx = rq->engine->id;
struct i915_vma *vma =
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index fd5b84904f7c..8c5022095418 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -32,8 +32,8 @@
#include "i915_gem_gtt.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
-#include "i915_gem_request.h"
+#include "i915_request.h"
enum i915_cache_level;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index d452c327dc1d..e9fb692076d7 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -188,13 +188,14 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
/**
* intel_crtc_destroy_state - destroy crtc state
* @crtc: drm crtc
+ * @state: the state to destroy
*
* Destroys the crtc state (both common and Intel-specific) for the
* specified crtc.
*/
void
intel_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_crtc_state *state)
{
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
@@ -202,7 +203,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
* @dev_priv: i915 device
- * @crtc: intel crtc
+ * @intel_crtc: intel crtc
* @crtc_state: incoming crtc_state to validate and setup scalers
*
* This function sets up scalers based on staged scaling requests for
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 0ee32275994a..7481ce85746b 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -85,6 +85,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->vma = NULL;
+ intel_state->flags = 0;
return state;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 522d54fecb53..709d6ca68074 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -704,7 +704,7 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
- } else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) {
+ } else if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) {
dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
@@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
- return NULL;
-
/* MST */
if (pipe >= 0) {
+ if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+ return NULL;
+
encoder = dev_priv->av_enc_map[pipe];
/*
* when bootup, audio driver may not know it is
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4e74aa2f16bc..c5c7530ba157 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -391,7 +391,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
bool alternate)
{
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 2:
return alternate ? 66667 : 48000;
case 3:
@@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
return 0;
}
+/*
+ * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
+ * skip all delay + gpio operands and stop at the first DSI packet op.
+ */
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
+{
+ const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ int index, len;
+
+ if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+ return 0;
+
+ /* index = 1 to skip sequence byte */
+ for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
+ switch (data[index]) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ return index == 1 ? 0 : index;
+ case MIPI_SEQ_ELEM_DELAY:
+ len = 5; /* 1 byte for operand + uint32 */
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
+ * The deassert must be done before calling intel_dsi_device_ready, so for
+ * these devices we split the init OTP sequence into a deassert sequence and
+ * the actual init OTP part.
+ */
+static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
+{
+ u8 *init_otp;
+ int len;
+
+ /* Limit this to VLV for now. */
+ if (!IS_VALLEYVIEW(dev_priv))
+ return;
+
+ /* Limit this to v1 vid-mode sequences */
+ if (dev_priv->vbt.dsi.config->is_cmd_mode ||
+ dev_priv->vbt.dsi.seq_version != 1)
+ return;
+
+ /* Only do this if there are otp and assert seqs and no deassert seq */
+ if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+ !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+ return;
+
+ /* The deassert-sequence ends at the first DSI packet */
+ len = get_init_otp_deassert_fragment_len(dev_priv);
+ if (!len)
+ return;
+
+ DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+
+ /* Copy the fragment, update seq byte and terminate it */
+ init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.deassert_seq)
+ return;
+ dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+ dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+ /* Use the copy for deassert */
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+ dev_priv->vbt.dsi.deassert_seq;
+ /* Replace the last byte of the fragment with init OTP seq byte */
+ init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
+ /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+}
+
static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
@@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
dev_priv->vbt.dsi.size = seq_size;
dev_priv->vbt.dsi.seq_version = sequence->version;
+ fixup_mipi_sequences(dev_priv);
+
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
return;
@@ -1611,6 +1693,29 @@ out:
}
/**
+ * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
+ * @dev_priv: i915 device instance
+ */
+void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+{
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
+ kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+ dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+ dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.dsi.data);
+ dev_priv->vbt.dsi.data = NULL;
+ kfree(dev_priv->vbt.dsi.pps);
+ dev_priv->vbt.dsi.pps = NULL;
+ kfree(dev_priv->vbt.dsi.config);
+ dev_priv->vbt.dsi.config = NULL;
+ kfree(dev_priv->vbt.dsi.deassert_seq);
+ dev_priv->vbt.dsi.deassert_seq = NULL;
+}
+
+/**
* intel_bios_is_tv_present - is integrated TV present in VBT
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index efbc627a2a25..1f79e7a47433 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -168,17 +168,21 @@ static void irq_enable(struct intel_engine_cs *engine)
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
- engine->irq_enable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ if (engine->irq_enable) {
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_enable(engine);
+ spin_unlock(&engine->i915->irq_lock);
+ }
}
static void irq_disable(struct intel_engine_cs *engine)
{
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
- engine->irq_disable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ if (engine->irq_disable) {
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_disable(engine);
+ spin_unlock(&engine->i915->irq_lock);
+ }
}
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
@@ -243,6 +247,8 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
spin_unlock(&b->irq_lock);
rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
+ GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
+ wait->seqno));
RB_CLEAR_NODE(&wait->node);
wake_up_process(wait->tsk);
}
@@ -336,7 +342,8 @@ static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
lockdep_assert_held(&b->rb_lock);
GEM_BUG_ON(b->irq_wait == wait);
- /* This request is completed, so remove it from the tree, mark it as
+ /*
+ * This request is completed, so remove it from the tree, mark it as
* complete, and *then* wake up the associated task. N.B. when the
* task wakes up, it will find the empty rb_node, discern that it
* has already been removed from the tree and skip the serialisation
@@ -347,7 +354,8 @@ static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
rb_erase(&wait->node, &b->waiters);
RB_CLEAR_NODE(&wait->node);
- wake_up_process(wait->tsk); /* implicit smp_wmb() */
+ if (wait->tsk->state != TASK_RUNNING)
+ wake_up_process(wait->tsk); /* implicit smp_wmb() */
}
static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
@@ -588,36 +596,6 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
spin_unlock_irq(&b->rb_lock);
}
-static bool signal_valid(const struct drm_i915_gem_request *request)
-{
- return intel_wait_check_request(&request->signaling.wait, request);
-}
-
-static bool signal_complete(const struct drm_i915_gem_request *request)
-{
- if (!request)
- return false;
-
- /* If another process served as the bottom-half it may have already
- * signalled that this wait is already completed.
- */
- if (intel_wait_complete(&request->signaling.wait))
- return signal_valid(request);
-
- /* Carefully check if the request is complete, giving time for the
- * seqno to be visible or if the GPU hung.
- */
- if (__i915_request_irq_complete(request))
- return true;
-
- return false;
-}
-
-static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
-{
- return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
-}
-
static void signaler_set_rtpriority(void)
{
struct sched_param param = { .sched_priority = 1 };
@@ -625,78 +603,26 @@ static void signaler_set_rtpriority(void)
sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
}
-static void __intel_engine_remove_signal(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- lockdep_assert_held(&b->rb_lock);
-
- /*
- * Wake up all other completed waiters and select the
- * next bottom-half for the next user interrupt.
- */
- __intel_engine_remove_wait(engine, &request->signaling.wait);
-
- /*
- * Find the next oldest signal. Note that as we have
- * not been holding the lock, another client may
- * have installed an even older signal than the one
- * we just completed - so double check we are still
- * the oldest before picking the next one.
- */
- if (request->signaling.wait.seqno) {
- if (request == rcu_access_pointer(b->first_signal)) {
- struct rb_node *rb = rb_next(&request->signaling.node);
- rcu_assign_pointer(b->first_signal,
- rb ? to_signaler(rb) : NULL);
- }
-
- rb_erase(&request->signaling.node, &b->signals);
- request->signaling.wait.seqno = 0;
- }
-}
-
-static struct drm_i915_gem_request *
-get_first_signal_rcu(struct intel_breadcrumbs *b)
-{
- /*
- * See the big warnings for i915_gem_active_get_rcu() and similarly
- * for dma_fence_get_rcu_safe() that explain the intricacies involved
- * here with defeating CPU/compiler speculation and enforcing
- * the required memory barriers.
- */
- do {
- struct drm_i915_gem_request *request;
-
- request = rcu_dereference(b->first_signal);
- if (request)
- request = i915_gem_request_get_rcu(request);
-
- barrier();
-
- if (!request || request == rcu_access_pointer(b->first_signal))
- return rcu_pointer_handoff(request);
-
- i915_gem_request_put(request);
- } while (1);
-}
-
static int intel_breadcrumbs_signaler(void *arg)
{
struct intel_engine_cs *engine = arg;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct drm_i915_gem_request *request;
+ struct i915_request *rq, *n;
/* Install ourselves with high priority to reduce signalling latency */
signaler_set_rtpriority();
do {
bool do_schedule = true;
+ LIST_HEAD(list);
+ u32 seqno;
set_current_state(TASK_INTERRUPTIBLE);
+ if (list_empty(&b->signals))
+ goto sleep;
- /* We are either woken up by the interrupt bottom-half,
+ /*
+ * We are either woken up by the interrupt bottom-half,
* or by a client adding a new signaller. In both cases,
* the GPU seqno may have advanced beyond our oldest signal.
* If it has, propagate the signal, remove the waiter and
@@ -704,24 +630,45 @@ static int intel_breadcrumbs_signaler(void *arg)
* need to wait for a new interrupt from the GPU or for
* a new client.
*/
- rcu_read_lock();
- request = get_first_signal_rcu(b);
- rcu_read_unlock();
- if (signal_complete(request)) {
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &request->fence.flags)) {
- local_bh_disable();
- dma_fence_signal(&request->fence);
- local_bh_enable(); /* kick start the tasklets */
- }
+ seqno = intel_engine_get_seqno(engine);
+
+ spin_lock_irq(&b->rb_lock);
+ list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
+ u32 this = rq->signaling.wait.seqno;
- if (READ_ONCE(request->signaling.wait.seqno)) {
- spin_lock_irq(&b->rb_lock);
- __intel_engine_remove_signal(engine, request);
- spin_unlock_irq(&b->rb_lock);
+ GEM_BUG_ON(!rq->signaling.wait.seqno);
+
+ if (!i915_seqno_passed(seqno, this))
+ break;
+
+ if (likely(this == i915_request_global_seqno(rq))) {
+ __intel_engine_remove_wait(engine,
+ &rq->signaling.wait);
+
+ rq->signaling.wait.seqno = 0;
+ __list_del_entry(&rq->signaling.link);
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &rq->fence.flags)) {
+ list_add_tail(&rq->signaling.link,
+ &list);
+ i915_request_get(rq);
+ }
}
+ }
+ spin_unlock_irq(&b->rb_lock);
+
+ if (!list_empty(&list)) {
+ local_bh_disable();
+ list_for_each_entry_safe(rq, n, &list, signaling.link) {
+ dma_fence_signal(&rq->fence);
+ GEM_BUG_ON(!i915_request_completed(rq));
+ i915_request_put(rq);
+ }
+ local_bh_enable(); /* kick start the tasklets */
- /* If the engine is saturated we may be continually
+ /*
+ * If the engine is saturated we may be continually
* processing completed requests. This angers the
* NMI watchdog if we never let anything else
* have access to the CPU. Let's pretend to be nice
@@ -730,9 +677,19 @@ static int intel_breadcrumbs_signaler(void *arg)
*/
do_schedule = need_resched();
}
- i915_gem_request_put(request);
if (unlikely(do_schedule)) {
+ /* Before we sleep, check for a missed seqno */
+ if (current->state & TASK_NORMAL &&
+ !list_empty(&b->signals) &&
+ engine->irq_seqno_barrier &&
+ test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted)) {
+ engine->irq_seqno_barrier(engine);
+ intel_engine_wakeup(engine);
+ }
+
+sleep:
if (kthread_should_park())
kthread_parkme();
@@ -747,14 +704,40 @@ static int intel_breadcrumbs_signaler(void *arg)
return 0;
}
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
- bool wakeup)
+static void insert_signal(struct intel_breadcrumbs *b,
+ struct i915_request *request,
+ const u32 seqno)
+{
+ struct i915_request *iter;
+
+ lockdep_assert_held(&b->rb_lock);
+
+ /*
+ * A reasonable assumption is that we are called to add signals
+ * in sequence, as the requests are submitted for execution and
+ * assigned a global_seqno. This will be the case for the majority
+ * of internally generated signals (inter-engine signaling).
+ *
+ * Out of order waiters triggering random signaling enabling will
+ * be more problematic, but hopefully rare enough and the list
+ * small enough that the O(N) insertion sort is not an issue.
+ */
+
+ list_for_each_entry_reverse(iter, &b->signals, signaling.link)
+ if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
+ break;
+
+ list_add(&request->signaling.link, &iter->signaling.link);
+}
+
+void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
{
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
u32 seqno;
- /* Note that we may be called from an interrupt handler on another
+ /*
+ * Note that we may be called from an interrupt handler on another
* device (e.g. nouveau signaling a fence completion causing us
* to submit a request, and so enable signaling). As such,
* we need to make sure that all other users of b->rb_lock protect
@@ -765,18 +748,17 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock);
- seqno = i915_gem_request_global_seqno(request);
- if (!seqno)
+ seqno = i915_request_global_seqno(request);
+ if (!seqno) /* will be enabled later upon execution */
return;
- spin_lock(&b->rb_lock);
-
GEM_BUG_ON(request->signaling.wait.seqno);
request->signaling.wait.tsk = b->signaler;
request->signaling.wait.request = request;
request->signaling.wait.seqno = seqno;
- /* First add ourselves into the list of waiters, but register our
+ /*
+ * Add ourselves into the list of waiters, but registering our
* bottom-half as the signaller thread. As per usual, only the oldest
* waiter (not just signaller) is tasked as the bottom-half waking
* up all completed waiters after the user interrupt.
@@ -784,58 +766,31 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
* If we are the oldest waiter, enable the irq (after which we
* must double check that the seqno did not complete).
*/
+ spin_lock(&b->rb_lock);
+ insert_signal(b, request, seqno);
wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
-
- if (!__i915_gem_request_completed(request, seqno)) {
- struct rb_node *parent, **p;
- bool first;
-
- /* Now insert ourselves into the retirement ordered list of
- * signals on this engine. We track the oldest seqno as that
- * will be the first signal to complete.
- */
- parent = NULL;
- first = true;
- p = &b->signals.rb_node;
- while (*p) {
- parent = *p;
- if (i915_seqno_passed(seqno,
- to_signaler(parent)->signaling.wait.seqno)) {
- p = &parent->rb_right;
- first = false;
- } else {
- p = &parent->rb_left;
- }
- }
- rb_link_node(&request->signaling.node, parent, p);
- rb_insert_color(&request->signaling.node, &b->signals);
- if (first)
- rcu_assign_pointer(b->first_signal, request);
- } else {
- __intel_engine_remove_wait(engine, &request->signaling.wait);
- request->signaling.wait.seqno = 0;
- wakeup = false;
- }
-
spin_unlock(&b->rb_lock);
if (wakeup)
wake_up_process(b->signaler);
}
-void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
+void intel_engine_cancel_signaling(struct i915_request *request)
{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock);
- if (READ_ONCE(request->signaling.wait.seqno)) {
- struct intel_engine_cs *engine = request->engine;
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ if (!READ_ONCE(request->signaling.wait.seqno))
+ return;
- spin_lock(&b->rb_lock);
- __intel_engine_remove_signal(engine, request);
- spin_unlock(&b->rb_lock);
- }
+ spin_lock(&b->rb_lock);
+ __intel_engine_remove_wait(engine, &request->signaling.wait);
+ if (fetch_and_zero(&request->signaling.wait.seqno))
+ __list_del_entry(&request->signaling.link);
+ spin_unlock(&b->rb_lock);
}
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
@@ -849,6 +804,8 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
+ INIT_LIST_HEAD(&b->signals);
+
/* Spawn a thread to provide a common bottom-half for all signals.
* As this is an asynchronous interface we cannot steal the current
* task for handling the bottom-half to the user interrupt, therefore
@@ -908,8 +865,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
/* The engines should be idle and all requests accounted for! */
WARN_ON(READ_ONCE(b->irq_wait));
WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
- WARN_ON(rcu_access_pointer(b->first_signal));
- WARN_ON(!RB_EMPTY_ROOT(&b->signals));
+ WARN_ON(!list_empty(&b->signals));
if (!IS_ERR_OR_NULL(b->signaler))
kthread_stop(b->signaler);
@@ -917,28 +873,6 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
cancel_fake_irq(engine);
}
-bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- bool busy = false;
-
- spin_lock_irq(&b->rb_lock);
-
- if (b->irq_wait) {
- wake_up_process(b->irq_wait->tsk);
- busy = true;
- }
-
- if (rcu_access_pointer(b->first_signal)) {
- wake_up_process(b->signaler);
- busy = true;
- }
-
- spin_unlock_irq(&b->rb_lock);
-
- return busy;
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_breadcrumbs.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index ee788d5be5e3..dc7db8a2caf8 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1778,6 +1778,199 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
+static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
+{
+ int ranges_24[] = { 312000, 552000, 648000 };
+ int ranges_19_38[] = { 307200, 556800, 652800 };
+ int *ranges;
+
+ switch (ref) {
+ default:
+ MISSING_CASE(ref);
+ case 24000:
+ ranges = ranges_24;
+ break;
+ case 19200:
+ case 38400:
+ ranges = ranges_19_38;
+ break;
+ }
+
+ if (min_cdclk > ranges[1])
+ return ranges[2];
+ else if (min_cdclk > ranges[0])
+ return ranges[1];
+ else
+ return ranges[0];
+}
+
+static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+ int ratio;
+
+ if (cdclk == dev_priv->cdclk.hw.bypass)
+ return 0;
+
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ case 307200:
+ case 556800:
+ case 652800:
+ WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
+ dev_priv->cdclk.hw.ref != 38400);
+ break;
+ case 312000:
+ case 552000:
+ case 648000:
+ WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+ }
+
+ ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
+
+ return dev_priv->cdclk.hw.ref * ratio;
+}
+
+static void icl_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ unsigned int cdclk = cdclk_state->cdclk;
+ unsigned int vco = cdclk_state->vco;
+ int ret;
+
+ mutex_lock(&dev_priv->pcu_lock);
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ mutex_unlock(&dev_priv->pcu_lock);
+ if (ret) {
+ DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+ ret);
+ return;
+ }
+
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_disable(dev_priv);
+
+ if (dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_enable(dev_priv, vco);
+
+ I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
+ skl_cdclk_decimal(cdclk));
+
+ mutex_lock(&dev_priv->pcu_lock);
+ /* TODO: add proper DVFS support. */
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2);
+ mutex_unlock(&dev_priv->pcu_lock);
+
+ intel_update_cdclk(dev_priv);
+}
+
+static void icl_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 val;
+
+ cdclk_state->bypass = 50000;
+
+ val = I915_READ(SKL_DSSM);
+ switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
+ default:
+ MISSING_CASE(val);
+ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
+ cdclk_state->ref = 24000;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
+ cdclk_state->ref = 19200;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
+ cdclk_state->ref = 38400;
+ break;
+ }
+
+ val = I915_READ(BXT_DE_PLL_ENABLE);
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
+ (val & BXT_DE_PLL_LOCK) == 0) {
+ /*
+ * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
+ * setting it to zero is a way to signal that.
+ */
+ cdclk_state->vco = 0;
+ cdclk_state->cdclk = cdclk_state->bypass;
+ return;
+ }
+
+ cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
+
+ val = I915_READ(CDCLK_CTL);
+ WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
+
+ cdclk_state->cdclk = cdclk_state->vco / 2;
+}
+
+/**
+ * icl_init_cdclk - Initialize CDCLK on ICL
+ * @dev_priv: i915 device
+ *
+ * Initialize CDCLK for ICL. This consists mainly of initializing
+ * dev_priv->cdclk.hw and sanitizing the state of the hardware if needed. This
+ * is generally done only during the display core initialization sequence, after
+ * which the DMC will take care of turning CDCLK off/on as needed.
+ */
+void icl_init_cdclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state sanitized_state;
+ u32 val;
+
+ /* This sets dev_priv->cdclk.hw. */
+ intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+
+ /* This means CDCLK disabled. */
+ if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+ goto sanitize;
+
+ val = I915_READ(CDCLK_CTL);
+
+ if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0)
+ goto sanitize;
+
+ if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
+ skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
+ goto sanitize;
+
+ return;
+
+sanitize:
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+
+ sanitized_state.ref = dev_priv->cdclk.hw.ref;
+ sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
+ sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
+ sanitized_state.cdclk);
+
+ icl_set_cdclk(dev_priv, &sanitized_state);
+}
+
+/**
+ * icl_uninit_cdclk - Uninitialize CDCLK on ICL
+ * @dev_priv: i915 device
+ *
+ * Uninitialize CDCLK for ICL. This is done only during the display core
+ * uninitialization sequence.
+ */
+void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+
+ cdclk_state.cdclk = cdclk_state.bypass;
+ cdclk_state.vco = 0;
+
+ icl_set_cdclk(dev_priv, &cdclk_state);
+}
+
/**
* cnl_init_cdclk - Initialize CDCLK on CNL
* @dev_priv: i915 device
@@ -2216,6 +2409,36 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
return 0;
}
+static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ unsigned int ref = intel_state->cdclk.logical.ref;
+ int min_cdclk, cdclk, vco;
+
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
+
+ cdclk = icl_calc_cdclk(min_cdclk, ref);
+ vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+
+ intel_state->cdclk.logical.vco = vco;
+ intel_state->cdclk.logical.cdclk = cdclk;
+
+ if (!intel_state->active_crtcs) {
+ cdclk = icl_calc_cdclk(0, ref);
+ vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+
+ intel_state->cdclk.actual.vco = vco;
+ intel_state->cdclk.actual.cdclk = cdclk;
+ } else {
+ intel_state->cdclk.actual = intel_state->cdclk.logical;
+ }
+
+ return 0;
+}
+
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
@@ -2233,7 +2456,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
return max_cdclk_freq*95/100;
- else if (INTEL_INFO(dev_priv)->gen < 4)
+ else if (INTEL_GEN(dev_priv) < 4)
return 2*max_cdclk_freq*90/100;
else
return max_cdclk_freq*90/100;
@@ -2249,7 +2472,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ if (dev_priv->cdclk.hw.ref == 24000)
+ dev_priv->max_cdclk_freq = 648000;
+ else
+ dev_priv->max_cdclk_freq = 652800;
+ } else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->max_cdclk_freq = 528000;
} else if (IS_GEN9_BC(dev_priv)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
@@ -2473,9 +2701,14 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.set_cdclk = cnl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
cnl_modeset_calc_cdclk;
+ } else if (IS_ICELAKE(dev_priv)) {
+ dev_priv->display.set_cdclk = icl_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
}
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ dev_priv->display.get_cdclk = icl_get_cdclk;
+ else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.get_cdclk = cnl_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 768f1c26080e..c6a7beabd58d 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -66,13 +66,13 @@
* of the CTM coefficient and we write the value from bit 3. We also round the
* value.
*/
-#define I9XX_CSC_COEFF_FP(coeff, fbits) \
+#define ILK_CSC_COEFF_FP(coeff, fbits) \
(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
-#define I9XX_CSC_COEFF_LIMITED_RANGE \
- I9XX_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
-#define I9XX_CSC_COEFF_1_0 \
- ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+#define ILK_CSC_COEFF_LIMITED_RANGE \
+ ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
+#define ILK_CSC_COEFF_1_0 \
+ ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
{
@@ -84,30 +84,31 @@ static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
/*
* When using limited range, multiply the matrix given by userspace by
- * the matrix that we would use for the limited range. We do the
- * multiplication in U2.30 format.
+ * the matrix that we would use for the limited range.
*/
-static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
+static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
{
int i;
- for (i = 0; i < 9; i++)
- result[i] = 0;
+ for (i = 0; i < 9; i++) {
+ u64 user_coeff = input[i];
+ u32 limited_coeff = CTM_COEFF_LIMITED_RANGE;
+ u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0,
+ CTM_COEFF_4_0 - 1) >> 2;
- for (i = 0; i < 3; i++) {
- int64_t user_coeff = input[i * 3 + i];
- uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2;
- uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff),
- 0,
- CTM_COEFF_4_0 - 1) >> 2;
-
- result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27;
- if (CTM_COEFF_NEGATIVE(user_coeff))
- result[i * 3 + i] |= CTM_COEFF_SIGN;
+ /*
+ * By scaling every co-efficient with limited range (16-235)
+ * vs full range (0-255) the final o/p will be scaled down to
+ * fit in the limited range supported by the panel.
+ */
+ result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30;
+ result[i] |= user_coeff & CTM_COEFF_SIGN;
}
+
+ return result;
}
-static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
+static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
{
int pipe = intel_crtc->pipe;
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
@@ -131,8 +132,7 @@ static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
}
-/* Set up the pipe CSC unit. */
-static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
+static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@@ -140,20 +140,27 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
+ bool limited_color_range = false;
+
+ /*
+ * FIXME if there's a gamma LUT after the CSC, we should
+ * do the range compression using the gamma LUT instead.
+ */
+ if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ limited_color_range = intel_crtc_state->limited_color_range;
if (intel_crtc_state->ycbcr420) {
- i9xx_load_ycbcr_conversion_matrix(intel_crtc);
+ ilk_load_ycbcr_conversion_matrix(intel_crtc);
return;
} else if (crtc_state->ctm) {
struct drm_color_ctm *ctm = crtc_state->ctm->data;
- uint64_t input[9] = { 0, };
+ const u64 *input;
+ u64 temp[9];
- if (intel_crtc_state->limited_color_range) {
- ctm_mult_by_limited(input, ctm->matrix);
- } else {
- for (i = 0; i < ARRAY_SIZE(input); i++)
- input[i] = ctm->matrix[i];
- }
+ if (limited_color_range)
+ input = ctm_mult_by_limited(temp, ctm->matrix);
+ else
+ input = ctm->matrix;
/*
* Convert fixed point S31.32 input to format supported by the
@@ -174,21 +181,21 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
if (abs_coeff < CTM_COEFF_0_125)
coeffs[i] |= (3 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 12);
+ ILK_CSC_COEFF_FP(abs_coeff, 12);
else if (abs_coeff < CTM_COEFF_0_25)
coeffs[i] |= (2 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 11);
+ ILK_CSC_COEFF_FP(abs_coeff, 11);
else if (abs_coeff < CTM_COEFF_0_5)
coeffs[i] |= (1 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 10);
+ ILK_CSC_COEFF_FP(abs_coeff, 10);
else if (abs_coeff < CTM_COEFF_1_0)
- coeffs[i] |= I9XX_CSC_COEFF_FP(abs_coeff, 9);
+ coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
else if (abs_coeff < CTM_COEFF_2_0)
coeffs[i] |= (7 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 8);
+ ILK_CSC_COEFF_FP(abs_coeff, 8);
else
coeffs[i] |= (6 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 7);
+ ILK_CSC_COEFF_FP(abs_coeff, 7);
}
} else {
/*
@@ -200,11 +207,11 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
* into consideration.
*/
for (i = 0; i < 3; i++) {
- if (intel_crtc_state->limited_color_range)
+ if (limited_color_range)
coeffs[i * 3 + i] =
- I9XX_CSC_COEFF_LIMITED_RANGE;
+ ILK_CSC_COEFF_LIMITED_RANGE;
else
- coeffs[i * 3 + i] = I9XX_CSC_COEFF_1_0;
+ coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0;
}
}
@@ -224,7 +231,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) > 6) {
uint16_t postoff = 0;
- if (intel_crtc_state->limited_color_range)
+ if (limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -235,7 +242,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
- if (intel_crtc_state->limited_color_range)
+ if (limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -645,14 +652,14 @@ void intel_color_init(struct drm_crtc *crtc)
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
dev_priv->display.load_luts = cherryview_load_luts;
} else if (IS_HASWELL(dev_priv)) {
- dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts;
} else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
IS_BROXTON(dev_priv)) {
- dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
} else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
dev_priv->display.load_luts = glk_load_luts;
} else {
dev_priv->display.load_luts = i9xx_load_luts;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 1cd4a7c22bd5..c0a8805b277f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -474,14 +474,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
-/**
- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
- *
- * Not for i915G/i915GM
- *
- * \return true if CRT is connected.
- * \return false if CRT is disconnected.
- */
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -807,10 +799,11 @@ intel_crt_detect(struct drm_connector *connector,
else
status = connector_status_unknown;
intel_release_load_detect_pipe(connector, &tmp, ctx);
- } else if (ret == 0)
+ } else if (ret == 0) {
status = connector_status_unknown;
- else if (ret < 0)
+ } else {
status = ret;
+ }
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain);
@@ -963,8 +956,10 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
if (I915_HAS_HOTPLUG(dev_priv) &&
- !dmi_check_system(intel_spurious_crt_detect))
+ !dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
+ crt->base.hotplug = intel_encoder_hotplug;
+ }
if (HAS_DDI(dev_priv)) {
crt->base.port = PORT_E;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cfcd9cb37d5d..ac8fc2a44ac6 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -25,6 +25,7 @@
*
*/
+#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
@@ -2152,7 +2153,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
I915_WRITE(DPLL_CTRL2, val);
- } else if (INTEL_INFO(dev_priv)->gen < 9) {
+ } else if (INTEL_GEN(dev_priv) < 9) {
I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
@@ -2507,6 +2508,8 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp->link_trained = false;
+
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
@@ -2798,6 +2801,150 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
return connector;
}
+static int modeset_pipe(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ crtc_state->mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ goto out;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto out;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ goto out;
+
+ return 0;
+
+ out:
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+
+static int intel_hdmi_reset_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_connector *connector = hdmi->attached_connector;
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ u8 config;
+ int ret;
+
+ if (!connector || connector->base.status != connector_status_connected)
+ return 0;
+
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ctx);
+ if (ret)
+ return ret;
+
+ conn_state = connector->base.state;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ return 0;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
+
+ if (!crtc_state->base.active)
+ return 0;
+
+ if (!crtc_state->hdmi_high_tmds_clock_ratio &&
+ !crtc_state->hdmi_scrambling)
+ return 0;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ return 0;
+
+ ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
+ if (ret < 0) {
+ DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+ return 0;
+ }
+
+ if (!!(config & SCDC_TMDS_BIT_CLOCK_RATIO_BY_40) ==
+ crtc_state->hdmi_high_tmds_clock_ratio &&
+ !!(config & SCDC_SCRAMBLING_ENABLE) ==
+ crtc_state->hdmi_scrambling)
+ return 0;
+
+ /*
+ * HDMI 2.0 says that one should not send scrambled data
+ * prior to configuring the sink scrambling, and that
+ * TMDS clock/data transmission should be suspended when
+ * changing the TMDS clock rate in the sink. So let's
+ * just do a full modeset here, even though some sinks
+ * would be perfectly happy if were to just reconfigure
+ * the SCDC settings on the fly.
+ */
+ return modeset_pipe(&crtc->base, ctx);
+}
+
+static bool intel_ddi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ bool changed;
+ int ret;
+
+ changed = intel_encoder_hotplug(encoder, connector);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ for (;;) {
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA)
+ ret = intel_hdmi_reset_link(encoder, &ctx);
+ else
+ ret = intel_dp_retrain_link(encoder, &ctx);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ continue;
+ }
+
+ break;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+
+ return changed;
+}
+
static struct intel_connector *
intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
{
@@ -2842,39 +2989,45 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
return false;
}
+static int
+intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev);
+ enum port port = intel_dport->base.port;
+ int max_lanes = 4;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ return max_lanes;
+
+ if (port == PORT_A || port == PORT_E) {
+ if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ max_lanes = port == PORT_A ? 4 : 0;
+ else
+ /* Both A and E share 2 lanes */
+ max_lanes = 2;
+ }
+
+ /*
+ * Some BIOS might fail to set this bit on port A if eDP
+ * wasn't lit up at boot. Force this bit set when needed
+ * so we use the proper lane count for our calculations.
+ */
+ if (intel_ddi_a_force_4_lanes(intel_dport)) {
+ DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
+ intel_dport->saved_port_bits |= DDI_A_4_LANES;
+ max_lanes = 4;
+ }
+
+ return max_lanes;
+}
+
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
- int max_lanes;
- if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
- switch (port) {
- case PORT_A:
- max_lanes = 4;
- break;
- case PORT_E:
- max_lanes = 0;
- break;
- default:
- max_lanes = 4;
- break;
- }
- } else {
- switch (port) {
- case PORT_A:
- max_lanes = 2;
- break;
- case PORT_E:
- max_lanes = 2;
- break;
- default:
- max_lanes = 4;
- break;
- }
- }
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
@@ -2908,6 +3061,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
+ intel_encoder->hotplug = intel_ddi_hotplug;
intel_encoder->compute_output_type = intel_ddi_compute_output_type;
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
@@ -2920,10 +3074,17 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->get_config = intel_ddi_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
+ intel_encoder->type = INTEL_OUTPUT_DDI;
+ intel_encoder->power_domain = intel_port_to_power_domain(port);
+ intel_encoder->port = port;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = 0;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
+ intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
+ intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
switch (port) {
case PORT_A:
@@ -2954,26 +3115,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
MISSING_CASE(port);
}
- /*
- * Some BIOS might fail to set this bit on port A if eDP
- * wasn't lit up at boot. Force this bit set when needed
- * so we use the proper lane count for our calculations.
- */
- if (intel_ddi_a_force_4_lanes(intel_dig_port)) {
- DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
- intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
- max_lanes = 4;
- }
-
- intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
- intel_dig_port->max_lanes = max_lanes;
-
- intel_encoder->type = INTEL_OUTPUT_DDI;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
- intel_encoder->port = port;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = 0;
-
intel_infoframe_init(intel_dig_port);
if (init_dp) {
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index a2c16140169f..3dd350f7b8e6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -81,12 +81,16 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
+ int s;
+
drm_printf(p, "slice mask: %04x\n", sseu->slice_mask);
drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask));
drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
- drm_printf(p, "subslice mask %04x\n", sseu->subslice_mask);
- drm_printf(p, "subslice per slice: %u\n",
- hweight8(sseu->subslice_mask));
+ for (s = 0; s < ARRAY_SIZE(sseu->subslice_mask); s++) {
+ drm_printf(p, "slice%d %u subslices mask=%04x\n",
+ s, hweight8(sseu->subslice_mask[s]),
+ sseu->subslice_mask[s]);
+ }
drm_printf(p, "EU total: %u\n", sseu->eu_total);
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
drm_printf(p, "has slice power gating: %s\n",
@@ -120,22 +124,100 @@ void intel_device_info_dump(const struct intel_device_info *info,
intel_device_info_dump_flags(info, p);
}
+void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ int s, ss;
+
+ if (sseu->max_slices == 0) {
+ drm_printf(p, "Unavailable\n");
+ return;
+ }
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
+ s, hweight8(sseu->subslice_mask[s]),
+ sseu->subslice_mask[s]);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u16 enabled_eus = sseu_get_eus(sseu, s, ss);
+
+ drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
+ ss, hweight16(enabled_eus), enabled_eus);
+ }
+ }
+}
+
+static u16 compute_eu_total(const struct sseu_dev_info *sseu)
+{
+ u16 i, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
+ total += hweight8(sseu->eu_mask[i]);
+
+ return total;
+}
+
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
const u32 fuse2 = I915_READ(GEN8_FUSE2);
+ int s, ss;
+ const int eu_mask = 0xff;
+ u32 subslice_mask, eu_en;
sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
GEN10_F2_S_ENA_SHIFT;
- sseu->subslice_mask = (1 << 4) - 1;
- sseu->subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
- GEN10_F2_SS_DIS_SHIFT);
+ sseu->max_slices = 6;
+ sseu->max_subslices = 4;
+ sseu->max_eus_per_subslice = 8;
+
+ subslice_mask = (1 << 4) - 1;
+ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
+ /*
+ * Slice0 can have up to 3 subslices, but there are only 2 in
+ * slice1/2.
+ */
+ sseu->subslice_mask[0] = subslice_mask;
+ for (s = 1; s < sseu->max_slices; s++)
+ sseu->subslice_mask[s] = subslice_mask & 0x3;
+
+ /* Slice0 */
+ eu_en = ~I915_READ(GEN8_EU_DISABLE0);
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
+ /* Slice1 */
+ sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~I915_READ(GEN8_EU_DISABLE1);
+ sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
+ /* Slice2 */
+ sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
+ sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
+ /* Slice3 */
+ sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~I915_READ(GEN8_EU_DISABLE2);
+ sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
+ /* Slice4 */
+ sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
+ sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
+ /* Slice5 */
+ sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~I915_READ(GEN10_EU_DISABLE3);
+ sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
+
+ /* Do a second pass where we mark the subslices disabled if all their
+ * eus are off.
+ */
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ if (sseu_get_eus(sseu, s, ss) == 0)
+ sseu->subslice_mask[s] &= ~BIT(ss);
+ }
+ }
- sseu->eu_total = hweight32(~I915_READ(GEN8_EU_DISABLE0));
- sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE1));
- sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE2));
- sseu->eu_total += hweight8(~(I915_READ(GEN10_EU_DISABLE3) &
- GEN10_EU_DIS_SS_MASK));
+ sseu->eu_total = compute_eu_total(sseu);
/*
* CNL is expected to always have a uniform distribution
@@ -156,26 +238,39 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
- u32 fuse, eu_dis;
+ u32 fuse;
fuse = I915_READ(CHV_FUSE_GT);
sseu->slice_mask = BIT(0);
+ sseu->max_slices = 1;
+ sseu->max_subslices = 2;
+ sseu->max_eus_per_subslice = 8;
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
- sseu->subslice_mask |= BIT(0);
- eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
- CHV_FGT_EU_DIS_SS0_R1_MASK);
- sseu->eu_total += 8 - hweight32(eu_dis);
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
+
+ sseu->subslice_mask[0] |= BIT(0);
+ sseu_set_eus(sseu, 0, 0, ~disabled_mask);
}
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
- sseu->subslice_mask |= BIT(1);
- eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
- CHV_FGT_EU_DIS_SS1_R1_MASK);
- sseu->eu_total += 8 - hweight32(eu_dis);
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
+
+ sseu->subslice_mask[0] |= BIT(1);
+ sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
+ sseu->eu_total = compute_eu_total(sseu);
+
/*
* CHV expected to always have a uniform distribution of EU
* across subslices.
@@ -197,41 +292,52 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
struct sseu_dev_info *sseu = &info->sseu;
- int s_max = 3, ss_max = 4, eu_max = 8;
int s, ss;
- u32 fuse2, eu_disable;
- u8 eu_mask = 0xff;
+ u32 fuse2, eu_disable, subslice_mask;
+ const u8 eu_mask = 0xff;
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ /* BXT has a single slice and at most 3 subslices. */
+ sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
+ sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
+ sseu->max_eus_per_subslice = 8;
+
/*
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
- sseu->subslice_mask = (1 << ss_max) - 1;
- sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
- GEN9_F2_SS_DIS_SHIFT);
+ subslice_mask = (1 << sseu->max_subslices) - 1;
+ subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT);
/*
* Iterate through enabled slices and subslices to
* count the total enabled EU.
*/
- for (s = 0; s < s_max; s++) {
+ for (s = 0; s < sseu->max_slices; s++) {
if (!(sseu->slice_mask & BIT(s)))
/* skip disabled slice */
continue;
+ sseu->subslice_mask[s] = subslice_mask;
+
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
- for (ss = 0; ss < ss_max; ss++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
int eu_per_ss;
+ u8 eu_disabled_mask;
- if (!(sseu->subslice_mask & BIT(ss)))
+ if (!(sseu->subslice_mask[s] & BIT(ss)))
/* skip disabled subslice */
continue;
- eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
- eu_mask);
+ eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+ eu_per_ss = sseu->max_eus_per_subslice -
+ hweight8(eu_disabled_mask);
/*
* Record which subslice(s) has(have) 7 EUs. we
@@ -240,11 +346,11 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
*/
if (eu_per_ss == 7)
sseu->subslice_7eu[s] |= BIT(ss);
-
- sseu->eu_total += eu_per_ss;
}
}
+ sseu->eu_total = compute_eu_total(sseu);
+
/*
* SKL is expected to always have a uniform distribution
* of EU across subslices with the exception that any one
@@ -270,8 +376,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_GEN9_LP(dev_priv)) {
-#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss)))
- info->has_pooled_eu = hweight8(sseu->subslice_mask) == 3;
+#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
+ info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
sseu->min_eu_in_pool = 0;
if (info->has_pooled_eu) {
@@ -289,19 +395,22 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
- const int s_max = 3, ss_max = 3, eu_max = 8;
int s, ss;
- u32 fuse2, eu_disable[3]; /* s_max */
+ u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ sseu->max_slices = 3;
+ sseu->max_subslices = 3;
+ sseu->max_eus_per_subslice = 8;
+
/*
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
- sseu->subslice_mask = GENMASK(ss_max - 1, 0);
- sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
- GEN8_F2_SS_DIS_SHIFT);
+ subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
+ subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+ GEN8_F2_SS_DIS_SHIFT);
eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
@@ -315,30 +424,38 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* Iterate through enabled slices and subslices to
* count the total enabled EU.
*/
- for (s = 0; s < s_max; s++) {
+ for (s = 0; s < sseu->max_slices; s++) {
if (!(sseu->slice_mask & BIT(s)))
/* skip disabled slice */
continue;
- for (ss = 0; ss < ss_max; ss++) {
+ sseu->subslice_mask[s] = subslice_mask;
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u8 eu_disabled_mask;
u32 n_disabled;
- if (!(sseu->subslice_mask & BIT(ss)))
+ if (!(sseu->subslice_mask[ss] & BIT(ss)))
/* skip disabled subslice */
continue;
- n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+ eu_disabled_mask =
+ eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+ n_disabled = hweight8(eu_disabled_mask);
/*
* Record which subslices have 7 EUs.
*/
- if (eu_max - n_disabled == 7)
+ if (sseu->max_eus_per_subslice - n_disabled == 7)
sseu->subslice_7eu[s] |= 1 << ss;
-
- sseu->eu_total += eu_max - n_disabled;
}
}
+ sseu->eu_total = compute_eu_total(sseu);
+
/*
* BDW is expected to always have a uniform distribution of EU across
* subslices with the exception that any one EU in any one subslice may
@@ -357,6 +474,72 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_eu_pg = 0;
}
+static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct sseu_dev_info *sseu = &info->sseu;
+ u32 fuse1;
+ int s, ss;
+
+ /*
+ * There isn't a register to tell us how many slices/subslices. We
+ * work off the PCI-ids here.
+ */
+ switch (info->gt) {
+ default:
+ MISSING_CASE(info->gt);
+ /* fall through */
+ case 1:
+ sseu->slice_mask = BIT(0);
+ sseu->subslice_mask[0] = BIT(0);
+ break;
+ case 2:
+ sseu->slice_mask = BIT(0);
+ sseu->subslice_mask[0] = BIT(0) | BIT(1);
+ break;
+ case 3:
+ sseu->slice_mask = BIT(0) | BIT(1);
+ sseu->subslice_mask[0] = BIT(0) | BIT(1);
+ sseu->subslice_mask[1] = BIT(0) | BIT(1);
+ break;
+ }
+
+ sseu->max_slices = hweight8(sseu->slice_mask);
+ sseu->max_subslices = hweight8(sseu->subslice_mask[0]);
+
+ fuse1 = I915_READ(HSW_PAVP_FUSE1);
+ switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
+ default:
+ MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
+ HSW_F1_EU_DIS_SHIFT);
+ /* fall through */
+ case HSW_F1_EU_DIS_10EUS:
+ sseu->eu_per_subslice = 10;
+ break;
+ case HSW_F1_EU_DIS_8EUS:
+ sseu->eu_per_subslice = 8;
+ break;
+ case HSW_F1_EU_DIS_6EUS:
+ sseu->eu_per_subslice = 6;
+ break;
+ }
+ sseu->max_eus_per_subslice = sseu->eu_per_subslice;
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ sseu_set_eus(sseu, s, ss,
+ (1UL << sseu->eu_per_subslice) - 1);
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /* No powergating for you. */
+ sseu->has_slice_pg = 0;
+ sseu->has_subslice_pg = 0;
+ sseu->has_eu_pg = 0;
+}
+
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
{
u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
@@ -489,6 +672,9 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
info->num_scalers[PIPE_C] = 1;
}
+ BUILD_BUG_ON(I915_NUM_ENGINES >
+ sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
+
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
@@ -574,7 +760,9 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
}
/* Initialize slice/subslice/EU info */
- if (IS_CHERRYVIEW(dev_priv))
+ if (IS_HASWELL(dev_priv))
+ haswell_sseu_info_init(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
@@ -586,3 +774,9 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
}
+
+void intel_driver_caps_print(const struct intel_driver_caps *caps,
+ struct drm_printer *p)
+{
+ drm_printf(p, "scheduler: %x\n", caps->scheduler);
+}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 9542018d11d0..0835752c8b22 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -96,6 +96,7 @@ enum intel_platform {
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
+ func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
func(has_overlay); \
func(has_pooled_eu); \
@@ -112,10 +113,13 @@ enum intel_platform {
func(supports_tv); \
func(has_ipc);
+#define GEN_MAX_SLICES (6) /* CNL upper bound */
+#define GEN_MAX_SUBSLICES (7)
+
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask;
- u8 eu_total;
+ u8 subslice_mask[GEN_MAX_SUBSLICES];
+ u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
@@ -123,8 +127,21 @@ struct sseu_dev_info {
u8 has_slice_pg:1;
u8 has_subslice_pg:1;
u8 has_eu_pg:1;
+
+ /* Topology fields */
+ u8 max_slices;
+ u8 max_subslices;
+ u8 max_eus_per_subslice;
+
+ /* We don't have more than 8 eus per subslice at the moment and as we
+ * store eus enabled using bits, no need to multiply by eus per
+ * subslice.
+ */
+ u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
};
+typedef u8 intel_ring_mask_t;
+
struct intel_device_info {
u16 device_id;
u16 gen_mask;
@@ -132,19 +149,19 @@ struct intel_device_info {
u8 gen;
u8 gt; /* GT number, 0 if undefined */
u8 num_rings;
- u8 ring_mask; /* Rings supported by the HW */
+ intel_ring_mask_t ring_mask; /* Rings supported by the HW */
enum intel_platform platform;
u32 platform_mask;
+ unsigned int page_sizes; /* page sizes supported by the HW */
+
u32 display_mmio_offset;
u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
- unsigned int page_sizes; /* page sizes supported by the HW */
-
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
@@ -167,9 +184,55 @@ struct intel_device_info {
} color;
};
+struct intel_driver_caps {
+ unsigned int scheduler;
+};
+
static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
{
- return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+ unsigned int i, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
+ total += hweight8(sseu->subslice_mask[i]);
+
+ return total;
+}
+
+static inline int sseu_eu_idx(const struct sseu_dev_info *sseu,
+ int slice, int subslice)
+{
+ int subslice_stride = DIV_ROUND_UP(sseu->max_eus_per_subslice,
+ BITS_PER_BYTE);
+ int slice_stride = sseu->max_subslices * subslice_stride;
+
+ return slice * slice_stride + subslice * subslice_stride;
+}
+
+static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu,
+ int slice, int subslice)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+ u16 eu_mask = 0;
+
+ for (i = 0;
+ i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
+ eu_mask |= ((u16) sseu->eu_mask[offset + i]) <<
+ (i * BITS_PER_BYTE);
+ }
+
+ return eu_mask;
+}
+
+static inline void sseu_set_eus(struct sseu_dev_info *sseu,
+ int slice, int subslice, u16 eu_mask)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+
+ for (i = 0;
+ i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
+ sseu->eu_mask[offset + i] =
+ (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+ }
}
const char *intel_platform_name(enum intel_platform platform);
@@ -181,5 +244,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
struct drm_printer *p);
void intel_device_info_dump_runtime(const struct intel_device_info *info,
struct drm_printer *p);
+void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p);
+
+void intel_driver_caps_print(const struct intel_driver_caps *caps,
+ struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index da8e2d4d84b5..3b48fd2561fe 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -558,11 +558,11 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
-/**
+
+/*
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-
static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
@@ -2029,12 +2029,12 @@ static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_pr
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 256 * 1024;
else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return 128 * 1024;
- else if (INTEL_INFO(dev_priv)->gen >= 4)
+ else if (INTEL_GEN(dev_priv) >= 4)
return 4 * 1024;
else
return 0;
@@ -2067,14 +2067,26 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
}
}
+static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
+}
+
struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ unsigned int rotation,
+ bool uses_fence,
+ unsigned long *out_flags)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
struct i915_vma *vma;
+ unsigned int pinctl;
u32 alignment;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -2102,11 +2114,26 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
+ pinctl = 0;
+
+ /* Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ pinctl |= PIN_MAPPABLE;
+
+ vma = i915_gem_object_pin_to_display_plane(obj,
+ alignment, &view, pinctl);
if (IS_ERR(vma))
goto err;
- if (i915_vma_is_map_and_fenceable(vma)) {
+ if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
+ int ret;
+
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always, when
@@ -2123,7 +2150,15 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
* something and try to run the system in a "less than optimal"
* mode that matches the user configuration.
*/
- i915_vma_pin_fence(vma);
+ ret = i915_vma_pin_fence(vma);
+ if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
+ i915_gem_object_unpin_from_display_plane(vma);
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ if (ret == 0 && vma->fence)
+ *out_flags |= PLANE_HAS_FENCE;
}
i915_vma_get(vma);
@@ -2134,11 +2169,12 @@ err:
return vma;
}
-void intel_unpin_fb_vma(struct i915_vma *vma)
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- i915_vma_unpin_fence(vma);
+ if (flags & PLANE_HAS_FENCE)
+ i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
i915_vma_put(vma);
}
@@ -2808,7 +2844,10 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
valid_fb:
mutex_lock(&dev->struct_mutex);
intel_state->vma =
- intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+ intel_pin_and_fence_fb_obj(fb,
+ primary->state->rotation,
+ intel_plane_uses_fence(intel_state),
+ &intel_state->flags);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(intel_state->vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
@@ -3155,7 +3194,7 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
- if (INTEL_GEN(dev_priv) < 4)
+ if (INTEL_GEN(dev_priv) < 5)
dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
switch (fb->format->format) {
@@ -4764,8 +4803,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
/**
* skl_update_scaler_plane - Stages update to scaler state for a given plane.
- *
- * @state: crtc's scaler state
+ * @crtc_state: crtc's scaler state
* @plane_state: atomic plane state to update
*
* Return
@@ -4962,6 +5000,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
/**
* intel_post_enable_primary - Perform operations after enabling primary plane
* @crtc: the CRTC whose primary plane was just enabled
+ * @new_crtc_state: the enabling state
*
* Performs potentially sleeping operations that must be done after the primary
* plane is enabled, such as updating FBC and IPS. Note that this may be
@@ -5426,6 +5465,20 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
}
+static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ uint32_t val;
+
+ val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
+
+ /* Program B credit equally to all pipes */
+ val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
+
+ I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
+}
+
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@@ -5503,6 +5556,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_pipe_mbus_enable(intel_crtc);
+
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
intel_enable_pipe(pipe_config);
@@ -6315,7 +6371,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* GDG double wide on either pipe, otherwise pipe A only */
- return INTEL_INFO(dev_priv)->gen < 4 &&
+ return INTEL_GEN(dev_priv) < 4 &&
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
@@ -8202,7 +8258,7 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *config = intel_crtc->config;
- if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
+ if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
u32 val = 0;
switch (intel_crtc->config->pipe_bpp) {
@@ -9539,7 +9595,8 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
if (HAS_DDI(dev_priv))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
switch (plane_state->base.crtc_w) {
case 64:
@@ -10704,6 +10761,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
struct drm_connector_list_iter conn_iter;
unsigned int used_ports = 0;
unsigned int used_mst_ports = 0;
+ bool ret = true;
/*
* Walk the connector list instead of the encoder
@@ -10738,7 +10796,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
/* the same port mustn't appear more than once */
if (used_ports & port_mask)
- return false;
+ ret = false;
used_ports |= port_mask;
break;
@@ -10756,7 +10814,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
if (used_ports & used_mst_ports)
return false;
- return true;
+ return ret;
}
static void
@@ -11988,6 +12046,14 @@ static int intel_atomic_check(struct drm_device *dev,
int ret, i;
bool any_ms = false;
+ /* Catch I915_MODE_FLAG_INHERITED */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ crtc_state, i) {
+ if (crtc_state->mode.private_flags !=
+ old_crtc_state->mode.private_flags)
+ crtc_state->mode_changed = true;
+ }
+
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
@@ -11996,10 +12062,6 @@ static int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
- /* Catch I915_MODE_FLAG_INHERITED */
- if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags)
- crtc_state->mode_changed = true;
-
if (!needs_modeset(crtc_state))
continue;
@@ -12008,13 +12070,6 @@ static int intel_atomic_check(struct drm_device *dev,
continue;
}
- /* FIXME: For only active_changed we shouldn't need to do any
- * state recomputation at all. */
-
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- return ret;
-
ret = intel_modeset_pipe_config(crtc, pipe_config);
if (ret) {
intel_dump_pipe_config(to_intel_crtc(crtc),
@@ -12033,10 +12088,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (needs_modeset(crtc_state))
any_ms = true;
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- return ret;
-
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
needs_modeset(crtc_state) ?
"[modeset]" : "[fastset]");
@@ -12070,7 +12121,7 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
if (!dev->max_vblank_count)
- return drm_crtc_accurate_vblank_count(&crtc->base);
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
return dev->driver->get_vblank_counter(dev, crtc->pipe);
}
@@ -12554,23 +12605,23 @@ struct wait_rps_boost {
struct wait_queue_entry wait;
struct drm_crtc *crtc;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
};
static int do_rps_boost(struct wait_queue_entry *_wait,
unsigned mode, int sync, void *key)
{
struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct drm_i915_gem_request *rq = wait->request;
+ struct i915_request *rq = wait->request;
/*
* If we missed the vblank, but the request is already running it
* is reasonable to assume that it will complete before the next
* vblank without our intervention, so leave RPS alone.
*/
- if (!i915_gem_request_started(rq))
+ if (!i915_request_started(rq))
gen6_rps_boost(rq, NULL);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -12608,10 +12659,46 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
}
+static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct i915_vma *vma;
+
+ if (plane->id == PLANE_CURSOR &&
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ const int align = intel_cursor_alignment(dev_priv);
+
+ return i915_gem_object_attach_phys(obj, align);
+ }
+
+ vma = intel_pin_and_fence_fb_obj(fb,
+ plane_state->base.rotation,
+ intel_plane_uses_fence(plane_state),
+ &plane_state->flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->vma = vma;
+
+ return 0;
+}
+
+static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+{
+ struct i915_vma *vma;
+
+ vma = fetch_and_zero(&old_plane_state->vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+}
+
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
- * @fb: framebuffer to prepare for presentation
+ * @new_state: the plane state being prepared
*
* Prepares a framebuffer for usage on a display plane. Generally this
* involves pinning the underlying object and updating the frontbuffer tracking
@@ -12682,20 +12769,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
}
- if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
- const int align = intel_cursor_alignment(dev_priv);
-
- ret = i915_gem_object_attach_phys(obj, align);
- } else {
- struct i915_vma *vma;
-
- vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (!IS_ERR(vma))
- to_intel_plane_state(new_state)->vma = vma;
- else
- ret = PTR_ERR(vma);
- }
+ ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
@@ -12729,7 +12803,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
/**
* intel_cleanup_plane_fb - Cleans up an fb after plane use
* @plane: drm plane to clean up for
- * @fb: old framebuffer that was on plane
+ * @old_state: the state from the previous modeset
*
* Cleans up a framebuffer that has just been removed from a plane.
*
@@ -12739,15 +12813,12 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct i915_vma *vma;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
/* Should only be called after a successful intel_prepare_plane_fb()! */
- vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
- if (vma) {
- mutex_lock(&plane->dev->struct_mutex);
- intel_unpin_fb_vma(vma);
- mutex_unlock(&plane->dev->struct_mutex);
- }
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_plane_unpin_fb(to_intel_plane_state(old_state));
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
int
@@ -13032,7 +13103,6 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *old_fb;
struct drm_crtc_state *crtc_state = crtc->state;
- struct i915_vma *old_vma, *vma;
/*
* When crtc is inactive or there is a modeset pending,
@@ -13091,25 +13161,9 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (ret)
goto out_free;
- if (INTEL_INFO(dev_priv)->cursor_needs_physical) {
- int align = intel_cursor_alignment(dev_priv);
-
- ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align);
- if (ret) {
- DRM_DEBUG_KMS("failed to attach phys object\n");
- goto out_unlock;
- }
- } else {
- vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation);
- if (IS_ERR(vma)) {
- DRM_DEBUG_KMS("failed to pin object\n");
-
- ret = PTR_ERR(vma);
- goto out_unlock;
- }
-
- to_intel_plane_state(new_plane_state)->vma = vma;
- }
+ ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
+ if (ret)
+ goto out_unlock;
old_fb = old_plane_state->fb;
@@ -13129,9 +13183,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
}
- old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma);
- if (old_vma)
- intel_unpin_fb_vma(old_vma);
+ intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -13159,6 +13211,32 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.format_mod_supported = intel_cursor_plane_format_mod_supported,
};
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return i9xx_plane == PLANE_A; /* tied to pipe A */
+ else if (IS_IVYBRIDGE(dev_priv))
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+ else
+ return i9xx_plane == PLANE_A;
+}
+
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
static struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -13201,6 +13279,21 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->i9xx_plane = (enum i9xx_plane_id) pipe;
primary->id = PLANE_PRIMARY;
primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ primary->has_fbc = skl_plane_has_fbc(dev_priv,
+ primary->pipe,
+ primary->id);
+ else
+ primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
+ primary->i9xx_plane);
+
+ if (primary->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
+ }
+
primary->check_plane = intel_check_primary_plane;
if (INTEL_GEN(dev_priv) >= 9) {
@@ -13496,8 +13589,8 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
return to_intel_crtc(connector->base.state->crtc)->pipe;
}
-int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file)
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_crtc *drmmode_crtc;
@@ -13945,7 +14038,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* gen2/3 display engine uses the fence if present,
* so the tiling mode must match the fb modifier exactly.
*/
- if (INTEL_INFO(dev_priv)->gen < 4 &&
+ if (INTEL_GEN(dev_priv) < 4 &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
goto err;
@@ -14160,7 +14253,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
intel_init_cdclk_hooks(dev_priv);
- if (INTEL_INFO(dev_priv)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
skylake_get_initial_plane_config;
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index c4042e342f50..4e7418b345bc 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -139,6 +139,17 @@ enum dpio_phy {
#define I915_NUM_PHYS_VLV 2
+enum aux_ch {
+ AUX_CH_A,
+ AUX_CH_B,
+ AUX_CH_C,
+ AUX_CH_D,
+ _AUX_CH_E, /* does not exist */
+ AUX_CH_F,
+};
+
+#define aux_ch_name(a) ((a) + 'A')
+
enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
@@ -175,6 +186,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_F,
+ POWER_DOMAIN_AUX_IO_A,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f10a14330e7c..9a4a51e79fa1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -96,15 +96,6 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
-static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
- 324000, 432000, 540000 };
-static const int skl_rates[] = { 162000, 216000, 270000,
- 324000, 432000, 540000 };
-static const int cnl_rates[] = { 162000, 216000, 270000,
- 324000, 432000, 540000,
- 648000, 810000 };
-static const int default_rates[] = { 162000, 270000, 540000 };
-
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -144,14 +135,17 @@ static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
+ static const int dp_rates[] = {
+ 162000, 270000, 540000, 810000
+ };
int i, max_rate;
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
- for (i = 0; i < ARRAY_SIZE(default_rates); i++) {
- if (default_rates[i] > max_rate)
+ for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
+ if (dp_rates[i] > max_rate)
break;
- intel_dp->sink_rates[i] = default_rates[i];
+ intel_dp->sink_rates[i] = dp_rates[i];
}
intel_dp->num_sink_rates = i;
@@ -258,7 +252,7 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
if (IS_CNL_WITH_PORT_F(dev_priv))
return 810000;
- /* For other SKUs, max rate on ports A and B is 5.4G */
+ /* For other SKUs, max rate on ports A and D is 5.4G */
if (port == PORT_A || port == PORT_D)
return 540000;
@@ -268,6 +262,22 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
+ /* The values must be in increasing order */
+ static const int cnl_rates[] = {
+ 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
+ };
+ static const int bxt_rates[] = {
+ 162000, 216000, 243000, 270000, 324000, 432000, 540000
+ };
+ static const int skl_rates[] = {
+ 162000, 216000, 270000, 324000, 432000, 540000
+ };
+ static const int hsw_rates[] = {
+ 162000, 270000, 540000
+ };
+ static const int g4x_rates[] = {
+ 162000, 270000
+ };
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
const struct ddi_vbt_port_info *info =
@@ -278,23 +288,23 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
/* This should only be done once */
WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
- if (IS_GEN9_LP(dev_priv)) {
- source_rates = bxt_rates;
- size = ARRAY_SIZE(bxt_rates);
- } else if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_CANNONLAKE(dev_priv)) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
max_rate = cnl_max_source_rate(intel_dp);
+ } else if (IS_GEN9_LP(dev_priv)) {
+ source_rates = bxt_rates;
+ size = ARRAY_SIZE(bxt_rates);
} else if (IS_GEN9_BC(dev_priv)) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
IS_BROADWELL(dev_priv)) {
- source_rates = default_rates;
- size = ARRAY_SIZE(default_rates);
+ source_rates = hsw_rates;
+ size = ARRAY_SIZE(hsw_rates);
} else {
- source_rates = default_rates;
- size = ARRAY_SIZE(default_rates) - 1;
+ source_rates = g4x_rates;
+ size = ARRAY_SIZE(g4x_rates);
}
if (max_rate && vbt_max_rate)
@@ -356,7 +366,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
/* Paranoia, there should always be something in common. */
if (WARN_ON(intel_dp->num_common_rates == 0)) {
- intel_dp->common_rates[0] = default_rates[0];
+ intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
}
@@ -656,19 +666,15 @@ static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ int backlight_controller = dev_priv->vbt.backlight.controller;
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
WARN_ON(!intel_dp_is_edp(intel_dp));
- /*
- * TODO: BXT has 2 PPS instances. The correct port->PPS instance
- * mapping needs to be retrieved from VBT, for now just hard-code to
- * use instance #0 always.
- */
if (!intel_dp->pps_reset)
- return 0;
+ return backlight_controller;
intel_dp->pps_reset = false;
@@ -678,7 +684,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
*/
intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
- return 0;
+ return backlight_controller;
}
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
@@ -936,7 +942,7 @@ static uint32_t
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
uint32_t status;
bool done;
@@ -956,8 +962,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (index)
return 0;
@@ -971,8 +976,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (index)
return 0;
@@ -982,7 +986,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* like to run at 2MHz. So, take the cdclk or PCH rawclk value and
* divide by 2000 and use that
*/
- if (intel_dig_port->base.port == PORT_A)
+ if (intel_dp->aux_ch == AUX_CH_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
else
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -990,10 +994,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- if (intel_dig_port->base.port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -1062,34 +1065,16 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
}
-static uint32_t intel_dp_get_aux_send_ctl(struct intel_dp *intel_dp,
- bool has_aux_irq,
- int send_bytes,
- uint32_t aux_clock_divider,
- bool aksv_write)
-{
- uint32_t val = 0;
-
- if (aksv_write) {
- send_bytes += 5;
- val |= DP_AUX_CH_CTL_AUX_AKSV_SELECT;
- }
-
- return val | intel_dp->get_aux_send_ctl(intel_dp,
- has_aux_irq,
- send_bytes,
- aux_clock_divider);
-}
-
static int
-intel_dp_aux_ch(struct intel_dp *intel_dp,
- const uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_size, bool aksv_write)
+intel_dp_aux_xfer(struct intel_dp *intel_dp,
+ const uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size,
+ u32 aux_send_ctl_flags)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
- i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ i915_reg_t ch_ctl, ch_data[5];
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
@@ -1097,6 +1082,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
+ ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ for (i = 0; i < ARRAY_SIZE(ch_data); i++)
+ ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+
pps_lock(intel_dp);
/*
@@ -1144,17 +1133,18 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
}
while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
- u32 send_ctl = intel_dp_get_aux_send_ctl(intel_dp,
- has_aux_irq,
- send_bytes,
- aux_clock_divider,
- aksv_write);
+ u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+ has_aux_irq,
+ send_bytes,
+ aux_clock_divider);
+
+ send_ctl |= aux_send_ctl_flags;
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
- I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
+ I915_WRITE(ch_data[i >> 2],
intel_dp_pack_aux(send + i,
send_bytes - i));
@@ -1170,14 +1160,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
- continue;
-
/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
* 400us delay required for errors and timeouts
* Timeout errors from the HW already meet this
* requirement so skip to next iteration
*/
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+ continue;
+
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
usleep_range(400, 500);
continue;
@@ -1223,14 +1213,6 @@ done:
if (recv_bytes == 0 || recv_bytes > 20) {
DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
recv_bytes);
- /*
- * FIXME: This patch was created on top of a series that
- * organize the retries at drm level. There EBUSY should
- * also take care for 1ms wait before retrying.
- * That aux retries re-org is still needed and after that is
- * merged we remove this sleep from here.
- */
- usleep_range(1000, 1500);
ret = -EBUSY;
goto out;
}
@@ -1239,7 +1221,7 @@ done:
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
+ intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
recv + i, recv_bytes - i);
ret = recv_bytes;
@@ -1256,6 +1238,17 @@ out:
#define BARE_ADDRESS_SIZE 3
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+
+static void
+intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
+ const struct drm_dp_aux_msg *msg)
+{
+ txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
+ txbuf[2] = msg->address & 0xff;
+ txbuf[3] = msg->size - 1;
+}
+
static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
@@ -1264,11 +1257,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
size_t txsize, rxsize;
int ret;
- txbuf[0] = (msg->request << 4) |
- ((msg->address >> 16) & 0xf);
- txbuf[1] = (msg->address >> 8) & 0xff;
- txbuf[2] = msg->address & 0xff;
- txbuf[3] = msg->size - 1;
+ intel_dp_aux_header(txbuf, msg);
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_WRITE:
@@ -1285,8 +1274,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
- ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize,
- false);
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, 0);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
@@ -1308,8 +1297,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (WARN_ON(rxsize > 20))
return -E2BIG;
- ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize,
- false);
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, 0);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
/*
@@ -1331,171 +1320,173 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
-static enum port intel_aux_port(struct drm_i915_private *dev_priv,
- enum port port)
+static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
- enum port aux_port;
+ enum aux_ch aux_ch;
if (!info->alternate_aux_channel) {
+ aux_ch = (enum aux_ch) port;
+
DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
- port_name(port), port_name(port));
- return port;
+ aux_ch_name(aux_ch), port_name(port));
+ return aux_ch;
}
switch (info->alternate_aux_channel) {
case DP_AUX_A:
- aux_port = PORT_A;
+ aux_ch = AUX_CH_A;
break;
case DP_AUX_B:
- aux_port = PORT_B;
+ aux_ch = AUX_CH_B;
break;
case DP_AUX_C:
- aux_port = PORT_C;
+ aux_ch = AUX_CH_C;
break;
case DP_AUX_D:
- aux_port = PORT_D;
+ aux_ch = AUX_CH_D;
break;
case DP_AUX_F:
- aux_port = PORT_F;
+ aux_ch = AUX_CH_F;
break;
default:
MISSING_CASE(info->alternate_aux_channel);
- aux_port = PORT_A;
+ aux_ch = AUX_CH_A;
break;
}
DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
- port_name(aux_port), port_name(port));
+ aux_ch_name(aux_ch), port_name(port));
- return aux_port;
+ return aux_ch;
}
-static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+static enum intel_display_power_domain
+intel_aux_power_domain(struct intel_dp *intel_dp)
{
- switch (port) {
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return DP_AUX_CH_CTL(port);
+ switch (intel_dp->aux_ch) {
+ case AUX_CH_A:
+ return POWER_DOMAIN_AUX_A;
+ case AUX_CH_B:
+ return POWER_DOMAIN_AUX_B;
+ case AUX_CH_C:
+ return POWER_DOMAIN_AUX_C;
+ case AUX_CH_D:
+ return POWER_DOMAIN_AUX_D;
+ case AUX_CH_F:
+ return POWER_DOMAIN_AUX_F;
default:
- MISSING_CASE(port);
- return DP_AUX_CH_CTL(PORT_B);
+ MISSING_CASE(intel_dp->aux_ch);
+ return POWER_DOMAIN_AUX_A;
}
}
-static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
{
- switch (port) {
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return DP_AUX_CH_DATA(port, index);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_CTL(aux_ch);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_DATA(PORT_B, index);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_B);
}
}
-static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- switch (port) {
- case PORT_A:
- return DP_AUX_CH_CTL(port);
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return PCH_DP_AUX_CH_CTL(port);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_DATA(aux_ch, index);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_CTL(PORT_A);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_B, index);
}
}
-static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
{
- switch (port) {
- case PORT_A:
- return DP_AUX_CH_DATA(port, index);
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return PCH_DP_AUX_CH_DATA(port, index);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_CTL(aux_ch);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_CTL(aux_ch);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_DATA(PORT_A, index);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
}
}
-static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- switch (port) {
- case PORT_A:
- case PORT_B:
- case PORT_C:
- case PORT_D:
- case PORT_F:
- return DP_AUX_CH_CTL(port);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_DATA(aux_ch, index);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_CTL(PORT_A);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
}
}
-static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
{
- switch (port) {
- case PORT_A:
- case PORT_B:
- case PORT_C:
- case PORT_D:
- case PORT_F:
- return DP_AUX_CH_DATA(port, index);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_F:
+ return DP_AUX_CH_CTL(aux_ch);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_DATA(PORT_A, index);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
}
}
-static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
-{
- if (INTEL_INFO(dev_priv)->gen >= 9)
- return skl_aux_ctl_reg(dev_priv, port);
- else if (HAS_PCH_SPLIT(dev_priv))
- return ilk_aux_ctl_reg(dev_priv, port);
- else
- return g4x_aux_ctl_reg(dev_priv, port);
-}
-
-static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
-{
- if (INTEL_INFO(dev_priv)->gen >= 9)
- return skl_aux_data_reg(dev_priv, port, index);
- else if (HAS_PCH_SPLIT(dev_priv))
- return ilk_aux_data_reg(dev_priv, port, index);
- else
- return g4x_aux_data_reg(dev_priv, port, index);
-}
-
-static void intel_aux_reg_init(struct intel_dp *intel_dp)
+static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = intel_aux_port(dev_priv,
- dp_to_dig_port(intel_dp)->base.port);
- int i;
-
- intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
- for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
- intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_F:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
}
static void
@@ -1507,14 +1498,42 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
static void
intel_dp_aux_init(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->base.port;
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp->aux_ch = intel_aux_ch(intel_dp);
+ intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
+
+ if (INTEL_GEN(dev_priv) >= 9) {
+ intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = skl_aux_data_reg;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else {
+ intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+ else
+ intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- intel_aux_reg_init(intel_dp);
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
+ port_name(encoder->port));
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
@@ -1894,6 +1913,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count,
bool link_mst)
{
+ intel_dp->link_trained = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
intel_dp->link_mst = link_mst;
@@ -2742,6 +2762,8 @@ static void intel_disable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp->link_trained = false;
+
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
@@ -3172,35 +3194,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
-static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
-{
- uint8_t psr_caps = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
- return false;
- return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
-static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
-{
- uint8_t dprx = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
- &dprx) != 1)
- return false;
- return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
-}
-
-static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
-{
- uint8_t alpm_caps = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
- &alpm_caps) != 1)
- return false;
- return alpm_caps & DP_ALPM_CAP;
-}
-
/* These are source-specific values. */
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
@@ -3751,40 +3744,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
- /* Check if the panel supports PSR */
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
- intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
- if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
- dev_priv->psr.sink_support = true;
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
- }
-
- if (INTEL_GEN(dev_priv) >= 9 &&
- (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
- uint8_t frame_sync_cap;
-
- dev_priv->psr.sink_support = true;
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
- &frame_sync_cap) != 1)
- frame_sync_cap = 0;
- dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
- /* PSR2 needs frame sync as well */
- dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
- DRM_DEBUG_KMS("PSR2 %s on sink",
- dev_priv->psr.psr2_support ? "supported" : "not supported");
-
- if (dev_priv->psr.psr2_support) {
- dev_priv->psr.y_cord_support =
- intel_dp_get_y_cord_status(intel_dp);
- dev_priv->psr.colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.alpm =
- intel_dp_get_alpm_status(intel_dp);
- }
-
- }
+ intel_psr_init_dpcd(intel_dp);
/*
* Read the eDP display control registers.
@@ -4315,12 +4275,85 @@ go_again:
return -EINVAL;
}
-static void
-intel_dp_retrain_link(struct intel_dp *intel_dp)
+static bool
+intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_dp->link_trained)
+ return false;
+
+ if (!intel_dp_get_link_status(intel_dp, link_status))
+ return false;
+
+ /*
+ * Validate the cached values of intel_dp->link_rate and
+ * intel_dp->lane_count before attempting to retrain.
+ */
+ if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
+ intel_dp->lane_count))
+ return false;
+
+ /* Retrain if Channel EQ or CR not ok */
+ return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
+}
+
+/*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggering
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
+int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ /* FIXME handle the MST connectors as well */
+
+ if (!connector || connector->base.status != connector_status_connected)
+ return 0;
+
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ctx);
+ if (ret)
+ return ret;
+
+ conn_state = connector->base.state;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ return 0;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
+
+ if (!crtc_state->base.active)
+ return 0;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ return 0;
+
+ if (!intel_dp_needs_link_retrain(intel_dp))
+ return 0;
/* Suppress underruns caused by re-training */
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
@@ -4338,51 +4371,49 @@ intel_dp_retrain_link(struct intel_dp *intel_dp)
if (crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), true);
+
+ return 0;
}
-static void
-intel_dp_check_link_status(struct intel_dp *intel_dp)
+/*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggering
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
+static bool intel_dp_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_connector_state *conn_state =
- intel_dp->attached_connector->base.state;
- u8 link_status[DP_LINK_STATUS_SIZE];
-
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
-
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
- DRM_ERROR("Failed to get link status\n");
- return;
- }
+ struct drm_modeset_acquire_ctx ctx;
+ bool changed;
+ int ret;
- if (!conn_state->crtc)
- return;
+ changed = intel_encoder_hotplug(encoder, connector);
- WARN_ON(!drm_modeset_is_locked(&conn_state->crtc->mutex));
+ drm_modeset_acquire_init(&ctx, 0);
- if (!conn_state->crtc->state->active)
- return;
+ for (;;) {
+ ret = intel_dp_retrain_link(encoder, &ctx);
- if (conn_state->commit &&
- !try_wait_for_completion(&conn_state->commit->hw_done))
- return;
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ continue;
+ }
- /*
- * Validate the cached values of intel_dp->link_rate and
- * intel_dp->lane_count before attempting to retrain.
- */
- if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
- intel_dp->lane_count))
- return;
+ break;
+ }
- /* Retrain if Channel EQ or CR not ok */
- if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
- DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- intel_encoder->base.name);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
- intel_dp_retrain_link(intel_dp);
- }
+ return changed;
}
/*
@@ -4440,7 +4471,9 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- intel_dp_check_link_status(intel_dp);
+ /* defer to the hotplug work for link retraining if needed */
+ if (intel_dp_needs_link_retrain(intel_dp))
+ return false;
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
@@ -4825,20 +4858,6 @@ intel_dp_long_pulse(struct intel_connector *connector)
*/
status = connector_status_disconnected;
goto out;
- } else {
- /*
- * If display is now connected check links status,
- * there has been known issues of link loss triggerring
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
- intel_dp_check_link_status(intel_dp);
}
/*
@@ -5054,7 +5073,12 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
- uint8_t txbuf[4], rxbuf[2], reply = 0;
+ static const struct drm_dp_aux_msg msg = {
+ .request = DP_AUX_NATIVE_WRITE,
+ .address = DP_AUX_HDCP_AKSV,
+ .size = DRM_HDCP_KSV_LEN,
+ };
+ uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
ssize_t dpcd_ret;
int ret;
@@ -5072,14 +5096,11 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
* we were writing the data, and then tickle the hardware to output the
* data once the header is sent out.
*/
- txbuf[0] = (DP_AUX_NATIVE_WRITE << 4) |
- ((DP_AUX_HDCP_AKSV >> 16) & 0xf);
- txbuf[1] = (DP_AUX_HDCP_AKSV >> 8) & 0xff;
- txbuf[2] = DP_AUX_HDCP_AKSV & 0xff;
- txbuf[3] = DRM_HDCP_KSV_LEN - 1;
-
- ret = intel_dp_aux_ch(intel_dp, txbuf, sizeof(txbuf), rxbuf,
- sizeof(rxbuf), true);
+ intel_dp_aux_header(txbuf, &msg);
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
+ rxbuf, sizeof(rxbuf),
+ DP_AUX_CH_CTL_AUX_AKSV_SELECT);
if (ret < 0) {
DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
return ret;
@@ -5413,37 +5434,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (!intel_dp->is_mst) {
- struct drm_modeset_acquire_ctx ctx;
- struct drm_connector *connector = &intel_dp->attached_connector->base;
- struct drm_crtc *crtc;
- int iret;
- bool handled = false;
-
- drm_modeset_acquire_init(&ctx, 0);
-retry:
- iret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, &ctx);
- if (iret)
- goto err;
-
- crtc = connector->state->crtc;
- if (crtc) {
- iret = drm_modeset_lock(&crtc->mutex, &ctx);
- if (iret)
- goto err;
- }
+ bool handled;
handled = intel_dp_short_pulse(intel_dp);
-err:
- if (iret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- WARN(iret, "Acquiring modeset locks failed with %i\n", iret);
-
/* Short pulse can signify loss of hdcp authentication */
intel_hdcp_check_link(intel_dp->attached_connector);
@@ -6266,42 +6260,6 @@ out_vdd_off:
return false;
}
-/* Set up the hotplug pin and aux power domain. */
-static void
-intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
-{
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
-
- encoder->hpd_pin = intel_hpd_pin_default(dev_priv, encoder->port);
-
- switch (encoder->port) {
- case PORT_A:
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
- break;
- case PORT_B:
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
- break;
- case PORT_C:
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
- break;
- case PORT_D:
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
- break;
- case PORT_E:
- /* FIXME: Check VBT for actual wiring of PORT E */
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
- break;
- case PORT_F:
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_F;
- break;
- default:
- MISSING_CASE(encoder->port);
- }
-}
-
static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
{
struct intel_connector *intel_connector;
@@ -6353,20 +6311,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->active_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev_priv))
- intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
- else
- intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
- else
- intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
-
if (HAS_DDI(dev_priv))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -6407,7 +6351,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- intel_dp_init_connector_port_info(intel_dig_port);
+ intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
intel_dp_aux_init(intel_dp);
@@ -6484,6 +6428,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
"DP %c", port_name(port)))
goto err_encoder_init;
+ intel_encoder->hotplug = intel_dp_hotplug;
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index cf8fef8b6f58..f59b59bb0a21 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -248,6 +248,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
int tries;
u32 training_pattern;
uint8_t link_status[DP_LINK_STATUS_SIZE];
+ bool channel_eq = false;
training_pattern = intel_dp_training_pattern(intel_dp);
@@ -259,7 +260,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
return false;
}
- intel_dp->channel_eq_status = false;
for (tries = 0; tries < 5; tries++) {
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
@@ -279,7 +279,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
- intel_dp->channel_eq_status = true;
+ channel_eq = true;
DRM_DEBUG_KMS("Channel EQ done. DP Training "
"successful\n");
break;
@@ -301,12 +301,14 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
intel_dp_set_idle_link_train(intel_dp);
- return intel_dp->channel_eq_status;
+ return channel_eq;
}
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
+ intel_dp->link_trained = true;
+
intel_dp_set_link_train(intel_dp,
DP_TRAINING_PATTERN_DISABLE);
}
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 76473e9836c6..c8e9e44e5981 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -147,7 +147,7 @@ struct bxt_ddi_phy_info {
*/
struct {
/**
- * @port: which port maps to this channel.
+ * @channel.port: which port maps to this channel.
*/
enum port port;
} channel[2];
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2bc4e17baf4f..d4368589b355 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -204,6 +204,7 @@ struct intel_fbdev {
struct drm_fb_helper helper;
struct intel_framebuffer *fb;
struct i915_vma *vma;
+ unsigned long vma_flags;
async_cookie_t cookie;
int preferred_bpp;
};
@@ -214,7 +215,8 @@ struct intel_encoder {
enum intel_output_type type;
enum port port;
unsigned int cloneable;
- void (*hot_plug)(struct intel_encoder *);
+ bool (*hotplug)(struct intel_encoder *encoder,
+ struct intel_connector *connector);
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
@@ -490,6 +492,8 @@ struct intel_atomic_state {
struct intel_plane_state {
struct drm_plane_state base;
struct i915_vma *vma;
+ unsigned long flags;
+#define PLANE_HAS_FENCE BIT(0)
struct {
u32 offset;
@@ -932,6 +936,7 @@ struct intel_plane {
enum plane_id id;
enum pipe pipe;
bool can_scale;
+ bool has_fbc;
int max_downscale;
uint32_t frontbuffer_bit;
@@ -1038,17 +1043,16 @@ struct intel_dp_compliance {
struct intel_dp {
i915_reg_t output_reg;
- i915_reg_t aux_ch_ctl_reg;
- i915_reg_t aux_ch_data_reg[5];
uint32_t DP;
int link_rate;
uint8_t lane_count;
uint8_t sink_count;
bool link_mst;
+ bool link_trained;
bool has_audio;
bool detect_done;
- bool channel_eq_status;
bool reset_link_params;
+ enum aux_ch aux_ch;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@@ -1123,6 +1127,9 @@ struct intel_dp {
int send_bytes,
uint32_t aux_clock_divider);
+ i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp);
+ i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index);
+
/* This is called before a link training is starterd */
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
@@ -1407,6 +1414,8 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv);
void cnl_uninit_cdclk(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
+void icl_init_cdclk(struct drm_i915_private *dev_priv);
+void icl_uninit_cdclk(struct drm_i915_private *dev_priv);
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
@@ -1455,8 +1464,8 @@ struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
-int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
static inline bool
@@ -1501,8 +1510,11 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_vma(struct i915_vma *vma);
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ unsigned int rotation,
+ bool uses_fence,
+ unsigned long *out_flags);
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
@@ -1615,6 +1627,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
@@ -1694,7 +1708,8 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
void intel_dvo_init(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
-
+bool intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector);
/* legacy fbdev emulation in intel_fbdev.c */
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -1856,6 +1871,7 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
/* intel_psr.c */
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_psr_disable(struct intel_dp *intel_dp,
@@ -1982,8 +1998,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct drm_i915_gem_request *rq,
- struct intel_rps_client *rps);
+void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -2020,8 +2035,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
-int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
void skl_update_plane(struct intel_plane *plane,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 59c066ca14e5..eb0c559b2715 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -245,7 +245,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
intel_dvo->attached_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- /* If we have timings from the BIOS for the panel, put them in
+ /*
+ * If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
@@ -293,11 +294,6 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
I915_WRITE(dvo_reg, dvo_val);
}
-/**
- * Detect the output connection on our DVO device.
- *
- * Unimplemented.
- */
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
@@ -313,7 +309,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
- /* We should probably have an i2c driver get_modes function for those
+ /*
+ * We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
@@ -371,7 +368,7 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
-/**
+/*
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
* Other chips with DVO LVDS will need to extend this to deal with the LVDS
@@ -443,7 +440,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
uint32_t dpll[I915_MAX_PIPES];
enum port port;
- /* Allow the I2C driver info to specify the GPIO to be used in
+ /*
+ * Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
*/
@@ -454,7 +452,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
else
gpio = GMBUS_PIN_DPB;
- /* Set up the I2C bus necessary for the chip we're probing.
+ /*
+ * Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
@@ -462,12 +461,14 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_dvo->dev = *dvo;
- /* GMBUS NAK handling seems to be unstable, hence let the
+ /*
+ * GMBUS NAK handling seems to be unstable, hence let the
* transmitter detection run in bit banging mode for now.
*/
intel_gmbus_force_bit(i2c, true);
- /* ns2501 requires the DVO 2x clock before it will
+ /*
+ * ns2501 requires the DVO 2x clock before it will
* respond to i2c accesses, so make sure we have
* have the clock enabled before we attempt to
* initialize the device.
@@ -525,7 +526,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
- /* For our LVDS chipsets, we should hopefully be able
+ /*
+ * For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
* However, it's in a different format from the BIOS
* data on chipsets with integrated LVDS (stored in AIM
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 7eebfbb95e89..4ba139c27fba 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -123,6 +123,22 @@ static const struct engine_info intel_engines[] = {
.mmio_base = GEN8_BSD2_RING_BASE,
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
},
+ [VCS3] = {
+ .hw_id = VCS3_HW,
+ .uabi_id = I915_EXEC_BSD,
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 2,
+ .mmio_base = GEN11_BSD3_RING_BASE,
+ .irq_shift = 0, /* not used */
+ },
+ [VCS4] = {
+ .hw_id = VCS4_HW,
+ .uabi_id = I915_EXEC_BSD,
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 3,
+ .mmio_base = GEN11_BSD4_RING_BASE,
+ .irq_shift = 0, /* not used */
+ },
[VECS] = {
.hw_id = VECS_HW,
.uabi_id = I915_EXEC_VEBOX,
@@ -131,6 +147,14 @@ static const struct engine_info intel_engines[] = {
.mmio_base = VEBOX_RING_BASE,
.irq_shift = GEN8_VECS_IRQ_SHIFT,
},
+ [VECS2] = {
+ .hw_id = VECS2_HW,
+ .uabi_id = I915_EXEC_VEBOX,
+ .class = VIDEO_ENHANCEMENT_CLASS,
+ .instance = 1,
+ .mmio_base = GEN11_VEBOX2_RING_BASE,
+ .irq_shift = 0, /* not used */
+ },
};
/**
@@ -210,6 +234,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
class_info = &intel_engine_classes[info->class];
+ BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
+ BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
+
if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
@@ -230,7 +257,25 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
class_info->name, info->instance) >=
sizeof(engine->name));
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = info->mmio_base;
+ if (INTEL_GEN(dev_priv) >= 11) {
+ switch (engine->id) {
+ case VCS:
+ engine->mmio_base = GEN11_BSD_RING_BASE;
+ break;
+ case VCS2:
+ engine->mmio_base = GEN11_BSD2_RING_BASE;
+ break;
+ case VECS:
+ engine->mmio_base = GEN11_VEBOX_RING_BASE;
+ break;
+ default:
+ /* take the original value for all other engines */
+ engine->mmio_base = info->mmio_base;
+ break;
+ }
+ } else {
+ engine->mmio_base = info->mmio_base;
+ }
engine->irq_shift = info->irq_shift;
engine->class = info->class;
engine->instance = info->instance;
@@ -423,6 +468,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
+ execlists->queue_priority = INT_MIN;
execlists->queue = RB_ROOT;
execlists->first = NULL;
}
@@ -631,7 +677,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
+ if (engine->i915->preempt_context) {
ring = engine->context_pin(engine,
engine->i915->preempt_context);
if (IS_ERR(ring)) {
@@ -656,7 +702,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+ if (engine->i915->preempt_context)
engine->context_unpin(engine, engine->i915->preempt_context);
err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context);
@@ -686,12 +732,12 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+ if (engine->i915->preempt_context)
engine->context_unpin(engine, engine->i915->preempt_context);
engine->context_unpin(engine, engine->i915->kernel_context);
}
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
@@ -707,7 +753,7 @@ u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
return acthd;
}
-u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 bbaddr;
@@ -1426,20 +1472,20 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
return 0;
}
-int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
+int intel_ring_workarounds_emit(struct i915_request *rq)
{
- struct i915_workarounds *w = &req->i915->workarounds;
+ struct i915_workarounds *w = &rq->i915->workarounds;
u32 *cs;
int ret, i;
if (w->count == 0)
return 0;
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
- cs = intel_ring_begin(req, (w->count * 2 + 2));
+ cs = intel_ring_begin(rq, w->count * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1450,9 +1496,9 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
@@ -1464,7 +1510,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
bool idle = true;
- intel_runtime_pm_get(dev_priv);
+ /* If the whole device is asleep, the engine must be idle */
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return true;
/* First check that no commands are left in the ring */
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
@@ -1503,10 +1551,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
return true;
- /* Interrupt/tasklet pending? */
- if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
- return false;
-
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active))
return false;
@@ -1554,7 +1598,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
const struct i915_gem_context * const kernel_context =
engine->i915->kernel_context;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
lockdep_assert_held(&engine->i915->drm.struct_mutex);
@@ -1666,13 +1710,13 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
}
static void print_request(struct drm_printer *m,
- struct drm_i915_gem_request *rq,
+ struct i915_request *rq,
const char *prefix)
{
- drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
+ drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
rq->global_seqno,
- i915_gem_request_completed(rq) ? "!" : "",
- rq->ctx->hw_id, rq->fence.seqno,
+ i915_request_completed(rq) ? "!" : "",
+ rq->fence.context, rq->fence.seqno,
rq->priotree.priority,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
rq->timeline->common->name);
@@ -1707,73 +1751,20 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
-void intel_engine_dump(struct intel_engine_cs *engine,
- struct drm_printer *m,
- const char *header, ...)
+static void intel_engine_print_registers(const struct intel_engine_cs *engine,
+ struct drm_printer *m)
{
- struct intel_breadcrumbs * const b = &engine->breadcrumbs;
- const struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct drm_i915_private *dev_priv = engine->i915;
- struct drm_i915_gem_request *rq;
- struct rb_node *rb;
- char hdr[80];
+ const struct intel_engine_execlists * const execlists =
+ &engine->execlists;
u64 addr;
- if (header) {
- va_list ap;
-
- va_start(ap, header);
- drm_vprintf(m, header, &ap);
- va_end(ap);
- }
-
- if (i915_terminally_wedged(&engine->i915->gpu_error))
- drm_printf(m, "*** WEDGED ***\n");
-
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine),
- engine->hangcheck.seqno,
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
- engine->timeline->inflight_seqnos);
- drm_printf(m, "\tReset count: %d (global %d)\n",
- i915_reset_engine_count(error, engine),
- i915_reset_count(error));
-
- rcu_read_lock();
-
- drm_printf(m, "\tRequests:\n");
-
- rq = list_first_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tfirst ");
-
- rq = list_last_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tlast ");
-
- rq = i915_gem_find_active_request(engine);
- if (rq) {
- print_request(m, rq, "\t\tactive ");
- drm_printf(m,
- "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
- rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
- }
-
- drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
- I915_READ(RING_START(engine->mmio_base)),
- rq ? i915_ggtt_offset(rq->ring->vma) : 0);
- drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
- I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
- rq ? rq->ring->head : 0);
- drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
- I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
- rq ? rq->ring->tail : 0);
+ drm_printf(m, "\tRING_START: 0x%08x\n",
+ I915_READ(RING_START(engine->mmio_base)));
+ drm_printf(m, "\tRING_HEAD: 0x%08x\n",
+ I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
+ drm_printf(m, "\tRING_TAIL: 0x%08x\n",
+ I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
I915_READ(RING_CTL(engine->mmio_base)),
I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
@@ -1782,6 +1773,11 @@ void intel_engine_dump(struct intel_engine_cs *engine,
I915_READ(RING_MI_MODE(engine->mmio_base)),
I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
}
+
+ if (INTEL_GEN(dev_priv) >= 6) {
+ drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+ }
+
if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
drm_printf(m, "\tSYNC_0: 0x%08x\n",
I915_READ(RING_SYNC_0(engine->mmio_base)));
@@ -1792,8 +1788,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
I915_READ(RING_SYNC_2(engine->mmio_base)));
}
- rcu_read_unlock();
-
addr = intel_engine_get_active_head(engine);
drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
@@ -1855,10 +1849,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rcu_read_lock();
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
+ struct i915_request *rq;
unsigned int count;
rq = port_unpack(&execlists->port[idx], &count);
if (rq) {
+ char hdr[80];
+
snprintf(hdr, sizeof(hdr),
"\t\tELSP[%d] count=%d, rq: ",
idx, count);
@@ -1877,10 +1874,82 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
I915_READ(RING_PP_DIR_DCLV(engine)));
}
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...)
+{
+ struct intel_breadcrumbs * const b = &engine->breadcrumbs;
+ const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_gpu_error * const error = &engine->i915->gpu_error;
+ struct i915_request *rq;
+ struct rb_node *rb;
+
+ if (header) {
+ va_list ap;
+
+ va_start(ap, header);
+ drm_vprintf(m, header, &ap);
+ va_end(ap);
+ }
+
+ if (i915_terminally_wedged(&engine->i915->gpu_error))
+ drm_printf(m, "*** WEDGED ***\n");
+
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+ intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine),
+ engine->hangcheck.seqno,
+ jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+ engine->timeline->inflight_seqnos);
+ drm_printf(m, "\tReset count: %d (global %d)\n",
+ i915_reset_engine_count(error, engine),
+ i915_reset_count(error));
+
+ rcu_read_lock();
+
+ drm_printf(m, "\tRequests:\n");
+
+ rq = list_first_entry(&engine->timeline->requests,
+ struct i915_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tfirst ");
+
+ rq = list_last_entry(&engine->timeline->requests,
+ struct i915_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tlast ");
+
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ print_request(m, rq, "\t\tactive ");
+ drm_printf(m,
+ "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ drm_printf(m, "\t\tring->start: 0x%08x\n",
+ i915_ggtt_offset(rq->ring->vma));
+ drm_printf(m, "\t\tring->head: 0x%08x\n",
+ rq->ring->head);
+ drm_printf(m, "\t\tring->tail: 0x%08x\n",
+ rq->ring->tail);
+ }
+
+ rcu_read_unlock();
+
+ if (intel_runtime_pm_get_if_in_use(engine->i915)) {
+ intel_engine_print_registers(engine, m);
+ intel_runtime_pm_put(engine->i915);
+ } else {
+ drm_printf(m, "\tDevice is asleep; skipping register dump\n");
+ }
spin_lock_irq(&engine->timeline->lock);
list_for_each_entry(rq, &engine->timeline->requests, link)
print_request(m, rq, "\t\tE ");
+ drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = execlists->first; rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
@@ -1899,10 +1968,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
}
spin_unlock_irq(&b->rb_lock);
- if (INTEL_GEN(dev_priv) >= 6) {
- drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
- }
-
drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
engine->irq_posted,
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index d7d1ac79c38a..707d49c12638 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -46,16 +46,6 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
return HAS_FBC(dev_priv);
}
-static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
-{
- return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8;
-}
-
-static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
-{
- return INTEL_GEN(dev_priv) < 4;
-}
-
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
{
return INTEL_GEN(dev_priv) <= 3;
@@ -183,7 +173,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- if (params->vma->fence) {
+ if (params->flags & PLANE_HAS_FENCE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
} else {
@@ -241,7 +231,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->vma->fence) {
+ if (params->flags & PLANE_HAS_FENCE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
dpfc_ctl |= params->vma->fence->id;
@@ -324,7 +314,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->vma->fence) {
+ if (params->flags & PLANE_HAS_FENCE) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
@@ -753,6 +743,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct drm_framebuffer *fb = plane_state->base.fb;
cache->vma = NULL;
+ cache->flags = 0;
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -778,6 +769,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.stride = fb->pitches[0];
cache->vma = plane_state->vma;
+ cache->flags = plane_state->flags;
+ if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
+ cache->flags &= ~PLANE_HAS_FENCE;
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -815,8 +809,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* Note that is possible for a tiled surface to be unmappable (and
* so have no fence associated with it) due to aperture constaints
* at the time of pinning.
+ *
+ * FIXME with 90/270 degree rotation we should use the fence on
+ * the normal GTT view (the rotated view doesn't even have a
+ * fence). Would need changes to the FBC fence Y offset as well.
+ * For now this will effecively disable FBC with 90/270 degree
+ * rotation.
*/
- if (!cache->vma->fence) {
+ if (!(cache->flags & PLANE_HAS_FENCE)) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@@ -859,6 +859,17 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ /*
+ * Work around a problem on GEN9+ HW, where enabling FBC on a plane
+ * having a Y offset that isn't divisible by 4 causes FIFO underrun
+ * and screen flicker.
+ */
+ if (IS_GEN(dev_priv, 9, 10) &&
+ (fbc->state_cache.plane.adjusted_y & 3)) {
+ fbc->no_fbc_reason = "plane Y offset is misaligned";
+ return false;
+ }
+
return true;
}
@@ -897,6 +908,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
memset(params, 0, sizeof(*params));
params->vma = cache->vma;
+ params->flags = cache->flags;
params->crtc.pipe = crtc->pipe;
params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
@@ -948,6 +960,30 @@ unlock:
mutex_unlock(&fbc->lock);
}
+/**
+ * __intel_fbc_disable - disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This is the low level function that actually disables FBC. Callers should
+ * grab the FBC lock.
+ */
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_crtc *crtc = fbc->crtc;
+
+ WARN_ON(!mutex_is_locked(&fbc->lock));
+ WARN_ON(!fbc->enabled);
+ WARN_ON(fbc->active);
+
+ DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+
+ __intel_fbc_cleanup_cfb(dev_priv);
+
+ fbc->enabled = false;
+ fbc->crtc = NULL;
+}
+
static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -959,6 +995,13 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
if (!fbc->enabled || fbc->crtc != crtc)
return;
+ if (!i915_modparams.enable_fbc) {
+ intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
+ __intel_fbc_disable(dev_priv);
+
+ return;
+ }
+
if (!intel_fbc_can_activate(crtc)) {
WARN_ON(fbc->active);
return;
@@ -1089,13 +1132,10 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
- if (!plane_state->base.visible)
- continue;
-
- if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
+ if (!plane->has_fbc)
continue;
- if (fbc_on_plane_a_only(dev_priv) && plane->i9xx_plane != PLANE_A)
+ if (!plane_state->base.visible)
continue;
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -1166,31 +1206,6 @@ out:
}
/**
- * __intel_fbc_disable - disable FBC
- * @dev_priv: i915 device instance
- *
- * This is the low level function that actually disables FBC. Callers should
- * grab the FBC lock.
- */
-static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
-{
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_crtc *crtc = fbc->crtc;
-
- WARN_ON(!mutex_is_locked(&fbc->lock));
- WARN_ON(!fbc->enabled);
- WARN_ON(fbc->active);
- WARN_ON(crtc->active);
-
- DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
-
- __intel_fbc_cleanup_cfb(dev_priv);
-
- fbc->enabled = false;
- fbc->crtc = NULL;
-}
-
-/**
* intel_fbc_disable - disable FBC if it's associated with crtc
* @crtc: the CRTC
*
@@ -1204,6 +1219,8 @@ void intel_fbc_disable(struct intel_crtc *crtc)
if (!fbc_supported(dev_priv))
return;
+ WARN_ON(crtc->active);
+
mutex_lock(&fbc->lock);
if (fbc->crtc == crtc)
__intel_fbc_disable(dev_priv);
@@ -1226,8 +1243,10 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
return;
mutex_lock(&fbc->lock);
- if (fbc->enabled)
+ if (fbc->enabled) {
+ WARN_ON(fbc->crtc->active);
__intel_fbc_disable(dev_priv);
+ }
mutex_unlock(&fbc->lock);
cancel_work_sync(&fbc->work.work);
@@ -1352,7 +1371,6 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- enum pipe pipe;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
@@ -1373,14 +1391,6 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
return;
}
- for_each_pipe(dev_priv, pipe) {
- fbc->possible_framebuffer_bits |=
- INTEL_FRONTBUFFER(pipe, PLANE_PRIMARY);
-
- if (fbc_on_pipe_a_only(dev_priv))
- break;
- }
-
/* This value was pulled out of someone's hat */
if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index da48af11eb6b..6f12adc06365 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -48,7 +48,8 @@
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
struct drm_i915_gem_object *obj = ifbdev->fb->obj;
- unsigned int origin = ifbdev->vma->fence ? ORIGIN_GTT : ORIGIN_CPU;
+ unsigned int origin =
+ ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
intel_fb_obj_invalidate(obj, origin);
}
@@ -177,6 +178,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct fb_info *info;
struct drm_framebuffer *fb;
struct i915_vma *vma;
+ unsigned long flags = 0;
bool prealloc = false;
void __iomem *vaddr;
int ret;
@@ -211,7 +213,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_MODE_ROTATE_0);
+ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
+ DRM_MODE_ROTATE_0,
+ false, &flags);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_unlock;
@@ -268,6 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
fb->width, fb->height, i915_ggtt_offset(vma));
ifbdev->vma = vma;
+ ifbdev->vma_flags = flags;
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -275,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
return 0;
out_unpin:
- intel_unpin_fb_vma(vma);
+ intel_unpin_fb_vma(vma, flags);
out_unlock:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -513,7 +518,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->vma) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
- intel_unpin_fb_vma(ifbdev->vma);
+ intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index fcfc217e754e..3a8d3d06c26a 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -79,6 +79,7 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
spin_unlock(&dev_priv->fb_tracking.lock);
}
+ might_sleep();
intel_psr_invalidate(dev_priv, frontbuffer_bits);
intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
@@ -108,6 +109,7 @@ static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
if (!frontbuffer_bits)
return;
+ might_sleep();
intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
intel_psr_flush(dev_priv, frontbuffer_bits, origin);
intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 21140ccd7a97..ff08ea0ebf49 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -370,7 +370,7 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
u32 action[2];
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
- /* WaRsDisableCoarsePowerGating:skl,bxt */
+ /* WaRsDisableCoarsePowerGating:skl,cnl */
if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
action[1] = 0;
else
@@ -403,22 +403,15 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
/**
* intel_guc_suspend() - notify GuC entering suspend state
- * @dev_priv: i915 device private
+ * @guc: the guc
*/
-int intel_guc_suspend(struct drm_i915_private *dev_priv)
+int intel_guc_suspend(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
- u32 data[3];
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
-
- gen9_disable_guc_interrupts(dev_priv);
-
- data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
- /* any value greater than GUC_POWER_D0 */
- data[1] = GUC_POWER_D1;
- data[2] = guc_ggtt_offset(guc->shared_data);
+ u32 data[] = {
+ INTEL_GUC_ACTION_ENTER_S_STATE,
+ GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
+ guc_ggtt_offset(guc->shared_data)
+ };
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
@@ -448,22 +441,15 @@ int intel_guc_reset_engine(struct intel_guc *guc,
/**
* intel_guc_resume() - notify GuC resuming from suspend state
- * @dev_priv: i915 device private
+ * @guc: the guc
*/
-int intel_guc_resume(struct drm_i915_private *dev_priv)
+int intel_guc_resume(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
- u32 data[3];
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
-
- if (i915_modparams.guc_log_level)
- gen9_enable_guc_interrupts(dev_priv);
-
- data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
- data[1] = GUC_POWER_D0;
- data[2] = guc_ggtt_offset(guc->shared_data);
+ u32 data[] = {
+ INTEL_GUC_ACTION_EXIT_S_STATE,
+ GUC_POWER_D0,
+ guc_ggtt_offset(guc->shared_data)
+ };
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 52856a97477d..b9424ac644ac 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -127,8 +127,8 @@ int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
-int intel_guc_suspend(struct drm_i915_private *dev_priv);
-int intel_guc_resume(struct drm_i915_private *dev_priv);
+int intel_guc_suspend(struct intel_guc *guc);
+int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 3b0932942857..d07f2b985f1c 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -269,15 +269,15 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
}
/**
- * intel_guc_fw_upload() - finish preparing the GuC for activity
+ * intel_guc_fw_upload() - load GuC uCode to device
* @guc: intel_guc structure
*
- * Called during driver loading and also after a GPU reset.
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset.
*
- * The main action required here it to load the GuC uCode into the device.
* The firmware image should have already been fetched into memory by the
- * earlier call to intel_guc_init(), so here we need only check that
- * worked, and then transfer the image to the h/w.
+ * earlier call to intel_uc_init_fw(), so here we need to only check that
+ * fetch succeeded, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 7b5074e2120c..c0c2e7d1c7d7 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -61,8 +61,10 @@ static int guc_log_flush(struct intel_guc *guc)
static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
{
union guc_log_control control_val = {
- .logging_enabled = enable,
- .verbosity = verbosity,
+ {
+ .logging_enabled = enable,
+ .verbosity = verbosity,
+ },
};
u32 action[] = {
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 1f3a8786bbdc..8a8ad2fe158d 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -26,8 +26,14 @@
#include <trace/events/dma_fence.h>
#include "intel_guc_submission.h"
+#include "intel_lrc_reg.h"
#include "i915_drv.h"
+#define GUC_PREEMPT_FINISHED 0x1
+#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
+#define GUC_PREEMPT_BREADCRUMB_BYTES \
+ (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
+
/**
* DOC: GuC-based command submission
*
@@ -75,6 +81,11 @@
*
*/
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
static inline bool is_high_priority(struct intel_guc_client *client)
{
return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
@@ -496,8 +507,7 @@ static void guc_ring_doorbell(struct intel_guc_client *client)
GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
}
-static void guc_add_request(struct intel_guc *guc,
- struct drm_i915_gem_request *rq)
+static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
struct intel_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine = rq->engine;
@@ -531,8 +541,6 @@ static void flush_ggtt_writes(struct i915_vma *vma)
POSTING_READ_FW(GUC_STATUS);
}
-#define GUC_PREEMPT_FINISHED 0x1
-#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
static void inject_preempt_context(struct work_struct *work)
{
struct guc_preempt_work *preempt_work =
@@ -542,37 +550,17 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- struct intel_ring *ring = client->owner->engine[engine->id].ring;
u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
engine));
- u32 *cs = ring->vaddr + ring->tail;
u32 data[7];
- if (engine->id == RCS) {
- cs = gen8_emit_ggtt_write_rcs(cs, GUC_PREEMPT_FINISHED,
- intel_hws_preempt_done_address(engine));
- } else {
- cs = gen8_emit_ggtt_write(cs, GUC_PREEMPT_FINISHED,
- intel_hws_preempt_done_address(engine));
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- GEM_BUG_ON(!IS_ALIGNED(ring->size,
- GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32)));
- GEM_BUG_ON((void *)cs - (ring->vaddr + ring->tail) !=
- GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32));
-
- ring->tail += GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32);
- ring->tail &= (ring->size - 1);
-
- flush_ggtt_writes(ring->vma);
-
+ /*
+ * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP.
+ * See guc_fill_preempt_context().
+ */
spin_lock_irq(&client->wq_lock);
guc_wq_item_append(client, engine->guc_id, ctx_desc,
- ring->tail / sizeof(u64), 0);
+ GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
spin_unlock_irq(&client->wq_lock);
/*
@@ -648,7 +636,7 @@ static void guc_submit(struct intel_engine_cs *engine)
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int count;
rq = port_unpack(&port[n], &count);
@@ -662,19 +650,18 @@ static void guc_submit(struct intel_engine_cs *engine)
}
}
-static void port_assign(struct execlist_port *port,
- struct drm_i915_gem_request *rq)
+static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
GEM_BUG_ON(port_isset(port));
- port_set(port, i915_gem_request_get(rq));
+ port_set(port, i915_request_get(rq));
}
static void guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- struct drm_i915_gem_request *last = NULL;
+ struct i915_request *last = NULL;
const struct execlist_port * const last_port =
&execlists->port[execlists->port_mask];
bool submit = false;
@@ -684,15 +671,12 @@ static void guc_dequeue(struct intel_engine_cs *engine)
rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
- if (!rb)
- goto unlock;
-
if (port_isset(port)) {
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
+ if (engine->i915->preempt_context) {
struct guc_preempt_work *preempt_work =
&engine->i915->guc.preempt_work[engine->id];
- if (rb_entry(rb, struct i915_priolist, node)->priority >
+ if (execlists->queue_priority >
max(port_request(port)->priotree.priority, 0)) {
execlists_set_active(execlists,
EXECLISTS_ACTIVE_PREEMPT);
@@ -708,9 +692,9 @@ static void guc_dequeue(struct intel_engine_cs *engine)
}
GEM_BUG_ON(port_isset(port));
- do {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- struct drm_i915_gem_request *rq, *rn;
+ while (rb) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
if (last && rq->ctx != last->ctx) {
@@ -727,9 +711,8 @@ static void guc_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&rq->priotree.link);
- __i915_gem_request_submit(rq);
- trace_i915_gem_request_in(rq,
- port_index(port, execlists));
+ __i915_request_submit(rq);
+ trace_i915_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
@@ -739,14 +722,21 @@ static void guc_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
- } while (rb);
+ }
done:
+ execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
execlists->first = rb;
if (submit) {
port_assign(port, last);
execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
guc_submit(engine);
}
+
+ /* We must always keep the beast fed if we have work piled up */
+ GEM_BUG_ON(port_isset(execlists->port) &&
+ !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
+ GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+
unlock:
spin_unlock_irq(&engine->timeline->lock);
}
@@ -756,12 +746,12 @@ static void guc_submission_tasklet(unsigned long data)
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
rq = port_request(&port[0]);
- while (rq && i915_gem_request_completed(rq)) {
- trace_i915_gem_request_out(rq);
- i915_gem_request_put(rq);
+ while (rq && i915_request_completed(rq)) {
+ trace_i915_request_out(rq);
+ i915_request_put(rq);
execlists_port_complete(execlists, port);
@@ -832,10 +822,12 @@ static int guc_clients_doorbell_init(struct intel_guc *guc)
if (ret)
return ret;
- ret = create_doorbell(guc->preempt_client);
- if (ret) {
- destroy_doorbell(guc->execbuf_client);
- return ret;
+ if (guc->preempt_client) {
+ ret = create_doorbell(guc->preempt_client);
+ if (ret) {
+ destroy_doorbell(guc->execbuf_client);
+ return ret;
+ }
}
return 0;
@@ -848,8 +840,11 @@ static void guc_clients_doorbell_fini(struct intel_guc *guc)
* Instead of trying (in vain) to communicate with it, let's just
* cleanup the doorbell HW and our internal state.
*/
- __destroy_doorbell(guc->preempt_client);
- __update_doorbell_desc(guc->preempt_client, GUC_DOORBELL_INVALID);
+ if (guc->preempt_client) {
+ __destroy_doorbell(guc->preempt_client);
+ __update_doorbell_desc(guc->preempt_client,
+ GUC_DOORBELL_INVALID);
+ }
__destroy_doorbell(guc->execbuf_client);
__update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
}
@@ -961,6 +956,62 @@ static void guc_client_free(struct intel_guc_client *client)
kfree(client);
}
+static inline bool ctx_save_restore_disabled(struct intel_context *ce)
+{
+ u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
+
+#define SR_DISABLED \
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
+ CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
+
+ return (sr & SR_DISABLED) == SR_DISABLED;
+
+#undef SR_DISABLED
+}
+
+static void guc_fill_preempt_context(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_guc_client *client = guc->preempt_client;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_context *ce = &client->owner->engine[id];
+ u32 addr = intel_hws_preempt_done_address(engine);
+ u32 *cs;
+
+ GEM_BUG_ON(!ce->pin_count);
+
+ /*
+ * We rely on this context image *not* being saved after
+ * preemption. This ensures that the RING_HEAD / RING_TAIL
+ * remain pointing at initial values forever.
+ */
+ GEM_BUG_ON(!ctx_save_restore_disabled(ce));
+
+ cs = ce->ring->vaddr;
+ if (id == RCS) {
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ GUC_PREEMPT_FINISHED,
+ addr);
+ } else {
+ cs = gen8_emit_ggtt_write(cs,
+ GUC_PREEMPT_FINISHED,
+ addr);
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
+ GUC_PREEMPT_BREADCRUMB_BYTES);
+
+ flush_ggtt_writes(ce->ring->vma);
+ }
+}
+
static int guc_clients_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -979,17 +1030,21 @@ static int guc_clients_create(struct intel_guc *guc)
}
guc->execbuf_client = client;
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
- GUC_CLIENT_PRIORITY_KMD_HIGH,
- dev_priv->preempt_context);
- if (IS_ERR(client)) {
- DRM_ERROR("Failed to create GuC client for preemption!\n");
- guc_client_free(guc->execbuf_client);
- guc->execbuf_client = NULL;
- return PTR_ERR(client);
+ if (dev_priv->preempt_context) {
+ client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CLIENT_PRIORITY_KMD_HIGH,
+ dev_priv->preempt_context);
+ if (IS_ERR(client)) {
+ DRM_ERROR("Failed to create GuC client for preemption!\n");
+ guc_client_free(guc->execbuf_client);
+ guc->execbuf_client = NULL;
+ return PTR_ERR(client);
+ }
+ guc->preempt_client = client;
+
+ guc_fill_preempt_context(guc);
}
- guc->preempt_client = client;
return 0;
}
@@ -998,10 +1053,11 @@ static void guc_clients_destroy(struct intel_guc *guc)
{
struct intel_guc_client *client;
- client = fetch_and_zero(&guc->execbuf_client);
- guc_client_free(client);
-
client = fetch_and_zero(&guc->preempt_client);
+ if (client)
+ guc_client_free(client);
+
+ client = fetch_and_zero(&guc->execbuf_client);
guc_client_free(client);
}
@@ -1160,7 +1216,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
GEM_BUG_ON(!guc->execbuf_client);
guc_reset_wq(guc->execbuf_client);
- guc_reset_wq(guc->preempt_client);
+ if (guc->preempt_client)
+ guc_reset_wq(guc->preempt_client);
err = intel_guc_sample_forcewake(guc);
if (err)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f5d7bfb43006..1baef4ac7ecb 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -2383,6 +2383,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port));
+ intel_encoder->hotplug = intel_encoder_hotplug;
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index fe28c1ea84a5..0e3d3e89d66a 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -274,24 +274,26 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
intel_runtime_pm_put(dev_priv);
}
-static bool intel_hpd_irq_event(struct drm_device *dev,
- struct drm_connector *connector)
+bool intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- old_status = connector->status;
+ old_status = connector->base.status;
- connector->status = drm_helper_probe_detect(connector, NULL, false);
+ connector->base.status =
+ drm_helper_probe_detect(&connector->base, NULL, false);
- if (old_status == connector->status)
+ if (old_status == connector->base.status)
return false;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.id,
- connector->name,
+ connector->base.base.id,
+ connector->base.name,
drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->status));
+ drm_get_connector_status_name(connector->base.status));
return true;
}
@@ -381,10 +383,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
connector->name, intel_encoder->hpd_pin);
- if (intel_encoder->hot_plug)
- intel_encoder->hot_plug(intel_encoder);
- if (intel_hpd_irq_event(dev, connector))
- changed = true;
+
+ changed |= intel_encoder->hotplug(intel_encoder,
+ intel_connector);
}
}
drm_connector_list_iter_end(&conn_iter);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 8ed05182f944..65e2afb9b955 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -27,160 +27,9 @@
#include "intel_huc.h"
#include "i915_drv.h"
-/**
- * DOC: HuC Firmware
- *
- * Motivation:
- * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
- * Efficiency Video Coding) operations. Userspace can use the firmware
- * capabilities by adding HuC specific commands to batch buffers.
- *
- * Implementation:
- * The same firmware loader is used as the GuC. However, the actual
- * loading to HW is deferred until GEM initialization is done.
- *
- * Note that HuC firmware loading must be done before GuC loading.
- */
-
-#define BXT_HUC_FW_MAJOR 01
-#define BXT_HUC_FW_MINOR 07
-#define BXT_BLD_NUM 1398
-
-#define SKL_HUC_FW_MAJOR 01
-#define SKL_HUC_FW_MINOR 07
-#define SKL_BLD_NUM 1398
-
-#define KBL_HUC_FW_MAJOR 02
-#define KBL_HUC_FW_MINOR 00
-#define KBL_BLD_NUM 1810
-
-#define HUC_FW_PATH(platform, major, minor, bld_num) \
- "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
- __stringify(minor) "_" __stringify(bld_num) ".bin"
-
-#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
- SKL_HUC_FW_MINOR, SKL_BLD_NUM)
-MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
-
-#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
- BXT_HUC_FW_MINOR, BXT_BLD_NUM)
-MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
-
-#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
- KBL_HUC_FW_MINOR, KBL_BLD_NUM)
-MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
-
-static void huc_fw_select(struct intel_uc_fw *huc_fw)
-{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
-
- GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
-
- if (!HAS_HUC(dev_priv))
- return;
-
- if (i915_modparams.huc_firmware_path) {
- huc_fw->path = i915_modparams.huc_firmware_path;
- huc_fw->major_ver_wanted = 0;
- huc_fw->minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- huc_fw->path = I915_SKL_HUC_UCODE;
- huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- huc_fw->path = I915_BXT_HUC_UCODE;
- huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- huc_fw->path = I915_KBL_HUC_UCODE;
- huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else {
- DRM_WARN("%s: No firmware known for this platform!\n",
- intel_uc_fw_type_repr(huc_fw->type));
- }
-}
-
-/**
- * intel_huc_init_early() - initializes HuC struct
- * @huc: intel_huc struct
- *
- * On platforms with HuC selects firmware for uploading
- */
void intel_huc_init_early(struct intel_huc *huc)
{
- struct intel_uc_fw *huc_fw = &huc->fw;
-
- intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC);
- huc_fw_select(huc_fw);
-}
-
-/**
- * huc_ucode_xfer() - DMA's the firmware
- * @dev_priv: the drm_i915_private device
- *
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Return: 0 on success, non-zero on failure
- */
-static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
-{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
- unsigned long offset = 0;
- u32 size;
- int ret;
-
- GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
-
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-
- /* Set the source address for the uCode */
- offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
-
- /* Hardware doesn't look at destination address for HuC. Set it to 0,
- * but still program the correct address space.
- */
- I915_WRITE(DMA_ADDR_1_LOW, 0);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- size = huc_fw->header_size + huc_fw->ucode_size;
- I915_WRITE(DMA_COPY_SIZE, size);
-
- /* Start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
-
- /* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
-
- DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
-
- /* Disable the bits once DMA is over */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
- return ret;
-}
-
-/**
- * intel_huc_init_hw() - load HuC uCode to device
- * @huc: intel_huc structure
- *
- * Called from intel_uc_init_hw() during driver loading and also after a GPU
- * reset. Be note that HuC loading must be done before GuC loading.
- *
- * The firmware image should have already been fetched into memory by the
- * earlier call to intel_uc_init_fw(), so here we need only check that
- * is succeeded, and then transfer the image to the h/w.
- *
- */
-int intel_huc_init_hw(struct intel_huc *huc)
-{
- return intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
+ intel_huc_fw_init_early(huc);
}
/**
@@ -199,6 +48,7 @@ int intel_huc_auth(struct intel_huc *huc)
struct drm_i915_private *i915 = huc_to_i915(huc);
struct intel_guc *guc = &i915->guc;
struct i915_vma *vma;
+ u32 status;
int ret;
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
@@ -209,28 +59,35 @@ int intel_huc_auth(struct intel_huc *huc)
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
- return ret;
+ goto fail;
}
ret = intel_guc_auth_huc(guc,
guc_ggtt_offset(vma) + huc->fw.rsa_offset);
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
- goto out;
+ goto fail_unpin;
}
/* Check authentication status, it should be done by now */
- ret = intel_wait_for_register(i915,
- HUC_STATUS2,
- HUC_FW_VERIFIED,
- HUC_FW_VERIFIED,
- 50);
+ ret = __intel_wait_for_register(i915,
+ HUC_STATUS2,
+ HUC_FW_VERIFIED,
+ HUC_FW_VERIFIED,
+ 2, 50, &status);
if (ret) {
- DRM_ERROR("HuC: Authentication failed %d\n", ret);
- goto out;
+ DRM_ERROR("HuC: Firmware not verified %#x\n", status);
+ goto fail_unpin;
}
-out:
i915_vma_unpin(vma);
+ return 0;
+
+fail_unpin:
+ i915_vma_unpin(vma);
+fail:
+ huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
+
+ DRM_ERROR("HuC: Authentication failed %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index 40039db59e04..5d6e804f9771 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -26,6 +26,7 @@
#define _INTEL_HUC_H_
#include "intel_uc_fw.h"
+#include "intel_huc_fw.h"
struct intel_huc {
/* Generic uC firmware management */
@@ -35,7 +36,6 @@ struct intel_huc {
};
void intel_huc_init_early(struct intel_huc *huc);
-int intel_huc_init_hw(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
new file mode 100644
index 000000000000..c66afa9b989a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -0,0 +1,166 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "intel_huc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: HuC Firmware
+ *
+ * Motivation:
+ * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * Implementation:
+ * The same firmware loader is used as the GuC. However, the actual
+ * loading to HW is deferred until GEM initialization is done.
+ *
+ * Note that HuC firmware loading must be done before GuC loading.
+ */
+
+#define BXT_HUC_FW_MAJOR 01
+#define BXT_HUC_FW_MINOR 07
+#define BXT_BLD_NUM 1398
+
+#define SKL_HUC_FW_MAJOR 01
+#define SKL_HUC_FW_MINOR 07
+#define SKL_BLD_NUM 1398
+
+#define KBL_HUC_FW_MAJOR 02
+#define KBL_HUC_FW_MINOR 00
+#define KBL_BLD_NUM 1810
+
+#define HUC_FW_PATH(platform, major, minor, bld_num) \
+ "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
+ __stringify(minor) "_" __stringify(bld_num) ".bin"
+
+#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
+ SKL_HUC_FW_MINOR, SKL_BLD_NUM)
+MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
+
+#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
+ BXT_HUC_FW_MINOR, BXT_BLD_NUM)
+MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
+
+#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
+ KBL_HUC_FW_MINOR, KBL_BLD_NUM)
+MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
+
+static void huc_fw_select(struct intel_uc_fw *huc_fw)
+{
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
+
+ GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
+
+ if (!HAS_HUC(dev_priv))
+ return;
+
+ if (i915_modparams.huc_firmware_path) {
+ huc_fw->path = i915_modparams.huc_firmware_path;
+ huc_fw->major_ver_wanted = 0;
+ huc_fw->minor_ver_wanted = 0;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ huc_fw->path = I915_SKL_HUC_UCODE;
+ huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
+ } else if (IS_BROXTON(dev_priv)) {
+ huc_fw->path = I915_BXT_HUC_UCODE;
+ huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
+ } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ huc_fw->path = I915_KBL_HUC_UCODE;
+ huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
+ } else {
+ DRM_WARN("%s: No firmware known for this platform!\n",
+ intel_uc_fw_type_repr(huc_fw->type));
+ }
+}
+
+/**
+ * intel_huc_fw_init_early() - initializes HuC firmware struct
+ * @huc: intel_huc struct
+ *
+ * On platforms with HuC selects firmware for uploading
+ */
+void intel_huc_fw_init_early(struct intel_huc *huc)
+{
+ struct intel_uc_fw *huc_fw = &huc->fw;
+
+ intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC);
+ huc_fw_select(huc_fw);
+}
+
+/**
+ * huc_fw_xfer() - DMA's the firmware
+ * @huc_fw: the firmware descriptor
+ * @vma: the firmware image (bound into the GGTT)
+ *
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
+{
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ unsigned long offset = 0;
+ u32 size;
+ int ret;
+
+ GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* Set the source address for the uCode */
+ offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
+ I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+ I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+ /* Hardware doesn't look at destination address for HuC. Set it to 0,
+ * but still program the correct address space.
+ */
+ I915_WRITE(DMA_ADDR_1_LOW, 0);
+ I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ size = huc_fw->header_size + huc_fw->ucode_size;
+ I915_WRITE(DMA_COPY_SIZE, size);
+
+ /* Start the DMA */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
+
+ /* Wait for DMA to finish */
+ ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
+
+ DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
+
+ /* Disable the bits once DMA is over */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+/**
+ * intel_huc_fw_upload() - load HuC uCode to device
+ * @huc: intel_huc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset. Note that HuC must be loaded before GuC.
+ *
+ * The firmware image should have already been fetched into memory by the
+ * earlier call to intel_uc_init_fw(), so here we need to only check that
+ * fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_huc_fw_upload(struct intel_huc *huc)
+{
+ return intel_uc_fw_upload(&huc->fw, huc_fw_xfer);
+}
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.h b/drivers/gpu/drm/i915/intel_huc_fw.h
new file mode 100644
index 000000000000..8a00a0ebddc5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_huc_fw.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_HUC_FW_H_
+#define _INTEL_HUC_FW_H_
+
+struct intel_huc;
+
+void intel_huc_fw_init_early(struct intel_huc *huc);
+int intel_huc_fw_upload(struct intel_huc *huc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 5809b29044fc..6269750e2b54 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -74,7 +74,6 @@
static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
{
- int ret;
struct drm_device *dev = &dev_priv->drm;
struct platform_device_info pinfo = {};
struct resource *rsc;
@@ -119,24 +118,19 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
spin_lock_init(&pdata->lpe_audio_slock);
platdev = platform_device_register_full(&pinfo);
+ kfree(rsc);
+ kfree(pdata);
+
if (IS_ERR(platdev)) {
- ret = PTR_ERR(platdev);
DRM_ERROR("Failed to allocate LPE audio platform device\n");
- goto err;
+ return platdev;
}
- kfree(rsc);
-
pm_runtime_forbid(&platdev->dev);
pm_runtime_set_active(&platdev->dev);
pm_runtime_enable(&platdev->dev);
return platdev;
-
-err:
- kfree(rsc);
- kfree(pdata);
- return ERR_PTR(ret);
}
static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 380c0838d8b3..3a69b367e565 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -161,7 +161,6 @@
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-#define PREEMPT_ID 0x1
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
@@ -170,6 +169,23 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine,
struct intel_ring *ring);
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static inline int rq_prio(const struct i915_request *rq)
+{
+ return rq->priotree.priority;
+}
+
+static inline bool need_preempt(const struct intel_engine_cs *engine,
+ const struct i915_request *last,
+ int prio)
+{
+ return engine->i915->preempt_context && prio > max(rq_prio(last), 0);
+}
+
/**
* intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context
@@ -188,6 +204,18 @@ static void execlists_init_reg_state(u32 *reg_state,
* bits 32-52: ctx ID, a globally unique tag
* bits 53-54: mbz, reserved for use by hardware
* bits 55-63: group ID, currently unused and set to 0
+ *
+ * Starting from Gen11, the upper dword of the descriptor has a new format:
+ *
+ * bits 32-36: reserved
+ * bits 37-47: SW context ID
+ * bits 48:53: engine instance
+ * bit 54: mbz, reserved for use by hardware
+ * bits 55-60: SW counter
+ * bits 61-63: engine class
+ *
+ * engine info, SW context ID and SW counter need to form a unique number
+ * (Context ID) per lrc.
*/
static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
@@ -196,12 +224,32 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_context *ce = &ctx->engine[engine->id];
u64 desc;
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
+ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
+ BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
desc = ctx->desc_template; /* bits 0-11 */
+ GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
+
desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
/* bits 12-31 */
- desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
+ GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
+
+ if (INTEL_GEN(ctx->i915) >= 11) {
+ GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
+ desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
+ /* bits 37-47 */
+
+ desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
+ /* bits 48-53 */
+
+ /* TODO: decide what to do with SW counter (bits 55-60) */
+
+ desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
+ /* bits 61-63 */
+ } else {
+ GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
+ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
+ }
ce->lrc_desc = desc;
}
@@ -225,7 +273,7 @@ find_priolist:
parent = &execlists->queue.rb_node;
while (*parent) {
rb = *parent;
- p = rb_entry(rb, typeof(*p), node);
+ p = to_priolist(rb);
if (prio > p->priority) {
parent = &rb->rb_left;
} else if (prio < p->priority) {
@@ -265,10 +313,10 @@ find_priolist:
if (first)
execlists->first = &p->node;
- return ptr_pack_bits(p, first, 1);
+ return p;
}
-static void unwind_wa_tail(struct drm_i915_gem_request *rq)
+static void unwind_wa_tail(struct i915_request *rq)
{
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -276,7 +324,7 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq)
static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *rq, *rn;
+ struct i915_request *rq, *rn;
struct i915_priolist *uninitialized_var(p);
int last_prio = I915_PRIORITY_INVALID;
@@ -285,20 +333,16 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe_reverse(rq, rn,
&engine->timeline->requests,
link) {
- if (i915_gem_request_completed(rq))
+ if (i915_request_completed(rq))
return;
- __i915_gem_request_unsubmit(rq);
+ __i915_request_unsubmit(rq);
unwind_wa_tail(rq);
- GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID);
- if (rq->priotree.priority != last_prio) {
- p = lookup_priolist(engine,
- &rq->priotree,
- rq->priotree.priority);
- p = ptr_mask_bits(p, 1);
-
- last_prio = rq->priotree.priority;
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != last_prio) {
+ last_prio = rq_prio(rq);
+ p = lookup_priolist(engine, &rq->priotree, last_prio);
}
list_add(&rq->priotree.link, &p->requests);
@@ -317,8 +361,7 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
}
static inline void
-execlists_context_status_change(struct drm_i915_gem_request *rq,
- unsigned long status)
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
{
/*
* Only used when GVT-g is enabled now. When GVT-g is disabled,
@@ -332,14 +375,14 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
}
static inline void
-execlists_context_schedule_in(struct drm_i915_gem_request *rq)
+execlists_context_schedule_in(struct i915_request *rq)
{
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(rq->engine);
}
static inline void
-execlists_context_schedule_out(struct drm_i915_gem_request *rq)
+execlists_context_schedule_out(struct i915_request *rq)
{
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
@@ -354,7 +397,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
-static u64 execlists_update_context(struct drm_i915_gem_request *rq)
+static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
struct i915_hw_ppgtt *ppgtt =
@@ -374,19 +417,31 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
return ce->lrc_desc;
}
-static inline void elsp_write(u64 desc, u32 __iomem *elsp)
+static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
{
- writel(upper_32_bits(desc), elsp);
- writel(lower_32_bits(desc), elsp);
+ if (execlists->ctrl_reg) {
+ writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
+ writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ } else {
+ writel(upper_32_bits(desc), execlists->submit_reg);
+ writel(lower_32_bits(desc), execlists->submit_reg);
+ }
}
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlists.port;
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
unsigned int n;
- for (n = execlists_num_ports(&engine->execlists); n--; ) {
- struct drm_i915_gem_request *rq;
+ /*
+ * ELSQ note: the submit queue is not cleared after being submitted
+ * to the HW so we need to make sure we always clean it up. This is
+ * currently ensured by the fact that we always write the same number
+ * of elsq entries, keep this in mind before changing the loop below.
+ */
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct i915_request *rq;
unsigned int count;
u64 desc;
@@ -399,18 +454,24 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
- GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n",
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x, prio=%d\n",
engine->name, n,
port[n].context_id, count,
- rq->global_seqno);
+ rq->global_seqno,
+ rq_prio(rq));
} else {
GEM_BUG_ON(!n);
desc = 0;
}
- elsp_write(desc, engine->execlists.elsp);
+ write_desc(execlists, desc, n);
}
- execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
@@ -431,43 +492,47 @@ static bool can_merge_ctx(const struct i915_gem_context *prev,
return true;
}
-static void port_assign(struct execlist_port *port,
- struct drm_i915_gem_request *rq)
+static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
GEM_BUG_ON(rq == port_request(port));
if (port_isset(port))
- i915_gem_request_put(port_request(port));
+ i915_request_put(port_request(port));
- port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
+ port_set(port, port_pack(i915_request_get(rq), port_count(port)));
}
static void inject_preempt_context(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists *execlists = &engine->execlists;
struct intel_context *ce =
&engine->i915->preempt_context->engine[engine->id];
unsigned int n;
- GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
- GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
-
- memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
- ce->ring->tail += WA_TAIL_BYTES;
- ce->ring->tail &= (ce->ring->size - 1);
- ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
-
+ GEM_BUG_ON(execlists->preempt_complete_status !=
+ upper_32_bits(ce->lrc_desc));
GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] &
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) !=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT));
+ /*
+ * Switch to our empty preempt context so
+ * the state of the GPU is known (idle).
+ */
GEM_TRACE("%s\n", engine->name);
- for (n = execlists_num_ports(&engine->execlists); --n; )
- elsp_write(0, engine->execlists.elsp);
+ for (n = execlists_num_ports(execlists); --n; )
+ write_desc(execlists, 0, n);
+
+ write_desc(execlists, ce->lrc_desc, n);
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
- elsp_write(ce->lrc_desc, engine->execlists.elsp);
execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
+ execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
}
static void execlists_dequeue(struct intel_engine_cs *engine)
@@ -476,7 +541,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct execlist_port *port = execlists->port;
const struct execlist_port * const last_port =
&execlists->port[execlists->port_mask];
- struct drm_i915_gem_request *last = port_request(port);
+ struct i915_request *last = port_request(port);
struct rb_node *rb;
bool submit = false;
@@ -504,8 +569,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
spin_lock_irq(&engine->timeline->lock);
rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
- if (!rb)
- goto unlock;
if (last) {
/*
@@ -528,55 +591,49 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
goto unlock;
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
- rb_entry(rb, struct i915_priolist, node)->priority >
- max(last->priotree.priority, 0)) {
- /*
- * Switch to our empty preempt context so
- * the state of the GPU is known (idle).
- */
+ if (need_preempt(engine, last, execlists->queue_priority)) {
inject_preempt_context(engine);
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT);
goto unlock;
- } else {
- /*
- * In theory, we could coalesce more requests onto
- * the second port (the first port is active, with
- * no preemptions pending). However, that means we
- * then have to deal with the possible lite-restore
- * of the second port (as we submit the ELSP, there
- * may be a context-switch) but also we may complete
- * the resubmission before the context-switch. Ergo,
- * coalescing onto the second port will cause a
- * preemption event, but we cannot predict whether
- * that will affect port[0] or port[1].
- *
- * If the second port is already active, we can wait
- * until the next context-switch before contemplating
- * new requests. The GPU will be busy and we should be
- * able to resubmit the new ELSP before it idles,
- * avoiding pipeline bubbles (momentary pauses where
- * the driver is unable to keep up the supply of new
- * work).
- */
- if (port_count(&port[1]))
- goto unlock;
-
- /* WaIdleLiteRestore:bdw,skl
- * Apply the wa NOOPs to prevent
- * ring:HEAD == req:TAIL as we resubmit the
- * request. See gen8_emit_breadcrumb() for
- * where we prepare the padding after the
- * end of the request.
- */
- last->tail = last->wa_tail;
}
+
+ /*
+ * In theory, we could coalesce more requests onto
+ * the second port (the first port is active, with
+ * no preemptions pending). However, that means we
+ * then have to deal with the possible lite-restore
+ * of the second port (as we submit the ELSP, there
+ * may be a context-switch) but also we may complete
+ * the resubmission before the context-switch. Ergo,
+ * coalescing onto the second port will cause a
+ * preemption event, but we cannot predict whether
+ * that will affect port[0] or port[1].
+ *
+ * If the second port is already active, we can wait
+ * until the next context-switch before contemplating
+ * new requests. The GPU will be busy and we should be
+ * able to resubmit the new ELSP before it idles,
+ * avoiding pipeline bubbles (momentary pauses where
+ * the driver is unable to keep up the supply of new
+ * work). However, we have to double check that the
+ * priorities of the ports haven't been switch.
+ */
+ if (port_count(&port[1]))
+ goto unlock;
+
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ * Apply the wa NOOPs to prevent
+ * ring:HEAD == rq:TAIL as we resubmit the
+ * request. See gen8_emit_breadcrumb() for
+ * where we prepare the padding after the
+ * end of the request.
+ */
+ last->tail = last->wa_tail;
}
- do {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- struct drm_i915_gem_request *rq, *rn;
+ while (rb) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
/*
@@ -626,8 +683,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
INIT_LIST_HEAD(&rq->priotree.link);
- __i915_gem_request_submit(rq);
- trace_i915_gem_request_in(rq, port_index(port, execlists));
+ __i915_request_submit(rq);
+ trace_i915_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
@@ -637,11 +694,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
- } while (rb);
+ }
done:
+ execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
execlists->first = rb;
if (submit)
port_assign(port, last);
+
+ /* We must always keep the beast fed if we have work piled up */
+ GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+
unlock:
spin_unlock_irq(&engine->timeline->lock);
@@ -649,6 +711,9 @@ unlock:
execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
execlists_submit_ports(engine);
}
+
+ GEM_BUG_ON(port_isset(execlists->port) &&
+ !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
}
void
@@ -658,12 +723,17 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
unsigned int num_ports = execlists_num_ports(execlists);
while (num_ports-- && port_isset(port)) {
- struct drm_i915_gem_request *rq = port_request(port);
+ struct i915_request *rq = port_request(port);
GEM_BUG_ON(!execlists->active);
intel_engine_context_out(rq->engine);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
- i915_gem_request_put(rq);
+
+ execlists_context_status_change(rq,
+ i915_request_completed(rq) ?
+ INTEL_CONTEXT_SCHEDULE_OUT :
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED);
+
+ i915_request_put(rq);
memset(port, 0, sizeof(*port));
port++;
@@ -673,32 +743,50 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
static void execlists_cancel_requests(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_gem_request *rq, *rn;
+ struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ local_irq_save(flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists);
+ spin_lock(&engine->timeline->lock);
+
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline->requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (!i915_gem_request_completed(rq))
+ if (!i915_request_completed(rq))
dma_fence_set_error(&rq->fence, -EIO);
}
/* Flush the queued requests to the timeline list (for retiring). */
rb = execlists->first;
while (rb) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ struct i915_priolist *p = to_priolist(rb);
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
INIT_LIST_HEAD(&rq->priotree.link);
dma_fence_set_error(&rq->fence, -EIO);
- __i915_gem_request_submit(rq);
+ __i915_request_submit(rq);
}
rb = rb_next(rb);
@@ -710,11 +798,13 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
-
+ execlists->queue_priority = INT_MIN;
execlists->queue = RB_ROOT;
execlists->first = NULL;
GEM_BUG_ON(port_isset(execlists->port));
+ spin_unlock(&engine->timeline->lock);
+
/*
* The port is checked prior to scheduling a tasklet, but
* just in case we have suspended the tasklet to do the
@@ -723,7 +813,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
*/
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ /* Mark all CS interrupts as complete */
+ execlists->active = 0;
+
+ local_irq_restore(flags);
}
/*
@@ -799,7 +892,7 @@ static void execlists_submission_tasklet(unsigned long data)
tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
while (head != tail) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int status;
unsigned int count;
@@ -844,7 +937,7 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
if (status & GEN8_CTX_STATUS_COMPLETE &&
- buf[2*head + 1] == PREEMPT_ID) {
+ buf[2*head + 1] == execlists->preempt_complete_status) {
GEM_TRACE("%s preempt-idle\n", engine->name);
execlists_cancel_port_requests(execlists);
@@ -865,23 +958,28 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_USER));
- /* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
-
rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n",
+ GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x, prio=%d\n",
engine->name,
port->context_id, count,
- rq ? rq->global_seqno : 0);
+ rq ? rq->global_seqno : 0,
+ rq ? rq_prio(rq) : 0);
+
+ /* Check the context/desc id for this event matches */
+ GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+
GEM_BUG_ON(count == 0);
if (--count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
GEM_BUG_ON(port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
- GEM_BUG_ON(!i915_gem_request_completed(rq));
+ GEM_BUG_ON(!i915_request_completed(rq));
execlists_context_schedule_out(rq);
- trace_i915_gem_request_out(rq);
- i915_gem_request_put(rq);
+ trace_i915_request_out(rq);
+ i915_request_put(rq);
+
+ GEM_TRACE("%s completed ctx=%d\n",
+ engine->name, port->context_id);
execlists_port_complete(execlists, port);
} else {
@@ -910,18 +1008,22 @@ static void execlists_submission_tasklet(unsigned long data)
intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
}
-static void insert_request(struct intel_engine_cs *engine,
- struct i915_priotree *pt,
- int prio)
+static void queue_request(struct intel_engine_cs *engine,
+ struct i915_priotree *pt,
+ int prio)
{
- struct i915_priolist *p = lookup_priolist(engine, pt, prio);
+ list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests);
+}
- list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
- if (ptr_unmask_bits(p, 1))
+static void submit_queue(struct intel_engine_cs *engine, int prio)
+{
+ if (prio > engine->execlists.queue_priority) {
+ engine->execlists.queue_priority = prio;
tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
}
-static void execlists_submit_request(struct drm_i915_gem_request *request)
+static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
@@ -929,7 +1031,8 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- insert_request(engine, &request->priotree, request->priotree.priority);
+ queue_request(engine, &request->priotree, rq_prio(request));
+ submit_queue(engine, rq_prio(request));
GEM_BUG_ON(!engine->execlists.first);
GEM_BUG_ON(list_empty(&request->priotree.link));
@@ -937,9 +1040,9 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
+static struct i915_request *pt_to_request(struct i915_priotree *pt)
{
- return container_of(pt, struct drm_i915_gem_request, priotree);
+ return container_of(pt, struct i915_request, priotree);
}
static struct intel_engine_cs *
@@ -957,7 +1060,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
return engine;
}
-static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
+static void execlists_schedule(struct i915_request *request, int prio)
{
struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
@@ -966,7 +1069,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (i915_gem_request_completed(request))
+ if (i915_request_completed(request))
return;
if (prio <= READ_ONCE(request->priotree.priority))
@@ -985,7 +1088,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
* static void update_priorities(struct i915_priotree *pt, prio) {
* list_for_each_entry(dep, &pt->signalers_list, signal_link)
* update_priorities(dep->signal, prio)
- * insert_request(pt);
+ * queue_request(pt);
* }
* but that may have unlimited recursion depth and so runs a very
* real risk of overunning the kernel stack. Instead, we build
@@ -1048,8 +1151,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
pt->priority = prio;
if (!list_empty(&pt->link)) {
__list_del_entry(&pt->link);
- insert_request(engine, pt, prio);
+ queue_request(engine, pt, prio);
}
+ submit_queue(engine, prio);
}
spin_unlock_irq(&engine->timeline->lock);
@@ -1151,7 +1255,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
i915_gem_context_put(ctx);
}
-static int execlists_request_alloc(struct drm_i915_gem_request *request)
+static int execlists_request_alloc(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
@@ -1583,7 +1687,7 @@ static void reset_irq(struct intel_engine_cs *engine)
}
static void reset_common_ring(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
@@ -1592,9 +1696,10 @@ static void reset_common_ring(struct intel_engine_cs *engine,
GEM_TRACE("%s seqno=%x\n",
engine->name, request ? request->global_seqno : 0);
- reset_irq(engine);
+ /* See execlists_cancel_requests() for the irq/spinlock split. */
+ local_irq_save(flags);
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ reset_irq(engine);
/*
* Catch up with any missed context-switch interrupts.
@@ -1608,14 +1713,17 @@ static void reset_common_ring(struct intel_engine_cs *engine,
execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
+ spin_lock(&engine->timeline->lock);
__unwind_incomplete_requests(engine);
-
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock(&engine->timeline->lock);
/* Mark all CS interrupts as complete */
execlists->active = 0;
- /* If the request was innocent, we leave the request in the ELSP
+ local_irq_restore(flags);
+
+ /*
+ * If the request was innocent, we leave the request in the ELSP
* and will try to replay it on restarting. The context image may
* have been corrupted by the reset, in which case we may have
* to service a new GPU hang, but more likely we can continue on
@@ -1628,7 +1736,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
if (!request || request->fence.error != -EIO)
return;
- /* We want a simple context + ring to execute the breadcrumb update.
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
* so clear it and rebuild just what we need for the breadcrumb.
* All pending requests for this context will be zapped, and any
@@ -1651,15 +1760,15 @@ static void reset_common_ring(struct intel_engine_cs *engine,
unwind_wa_tail(request);
}
-static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+static int intel_logical_ring_emit_pdps(struct i915_request *rq)
{
- struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_engine_cs *engine = req->engine;
+ struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct intel_engine_cs *engine = rq->engine;
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
u32 *cs;
int i;
- cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+ cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1674,12 +1783,12 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
-static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
+static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
@@ -1692,18 +1801,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (req->ctx->ppgtt &&
- (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings) &&
- !i915_vm_is_48bit(&req->ctx->ppgtt->base) &&
- !intel_vgpu_active(req->i915)) {
- ret = intel_logical_ring_emit_pdps(req);
+ if (rq->ctx->ppgtt &&
+ (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
+ !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+ !intel_vgpu_active(rq->i915)) {
+ ret = intel_logical_ring_emit_pdps(rq);
if (ret)
return ret;
- req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
+ rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
}
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1732,7 +1841,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
(flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1751,7 +1860,7 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
-static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
+static int gen8_emit_flush(struct i915_request *request, u32 mode)
{
u32 cmd, *cs;
@@ -1783,7 +1892,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
return 0;
}
-static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
+static int gen8_emit_flush_render(struct i915_request *request,
u32 mode)
{
struct intel_engine_cs *engine = request->engine;
@@ -1858,7 +1967,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
+static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
{
/* Ensure there's always at least one preemption point per-request. */
*cs++ = MI_ARB_CHECK;
@@ -1866,7 +1975,7 @@ static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
request->wa_tail = intel_ring_offset(request, cs);
}
-static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
+static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
@@ -1882,8 +1991,7 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
}
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
-static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request,
- u32 *cs)
+static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1899,15 +2007,15 @@ static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request,
}
static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
-static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
+static int gen8_init_rcs_context(struct i915_request *rq)
{
int ret;
- ret = intel_ring_workarounds_emit(req);
+ ret = intel_ring_workarounds_emit(rq);
if (ret)
return ret;
- ret = intel_rcs_context_init_mocs(req);
+ ret = intel_rcs_context_init_mocs(rq);
/*
* Failing to program the MOCS is non-fatal.The system will not
* run at peak performance. So generate an error and carry on.
@@ -1915,7 +2023,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
- return i915_gem_render_state_emit(req);
+ return i915_gem_render_state_emit(rq);
}
/**
@@ -1963,6 +2071,12 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+
+ engine->i915->caps.scheduler =
+ I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY;
+ if (engine->i915->preempt_context)
+ engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
}
static void
@@ -1983,8 +2097,17 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->set_default_submission = execlists_set_default_submission;
- engine->irq_enable = gen8_logical_ring_enable_irq;
- engine->irq_disable = gen8_logical_ring_disable_irq;
+ if (INTEL_GEN(engine->i915) < 11) {
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
+ } else {
+ /*
+ * TODO: On Gen11 interrupt masks need to be clear
+ * to allow C6 entry. Keep interrupts enabled at
+ * and take the hit of generating extra interrupts
+ * until a more refined solution exists.
+ */
+ }
engine->emit_bb_start = gen8_emit_bb_start;
}
@@ -2036,8 +2159,20 @@ static int logical_ring_init(struct intel_engine_cs *engine)
if (ret)
goto error;
- engine->execlists.elsp =
- engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+ if (HAS_LOGICAL_RING_ELSQ(engine->i915)) {
+ engine->execlists.submit_reg = engine->i915->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
+ engine->execlists.ctrl_reg = engine->i915->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
+ } else {
+ engine->execlists.submit_reg = engine->i915->regs +
+ i915_mmio_reg_offset(RING_ELSP(engine));
+ }
+
+ engine->execlists.preempt_complete_status = ~0u;
+ if (engine->i915->preempt_context)
+ engine->execlists.preempt_complete_status =
+ upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
return 0;
@@ -2118,7 +2253,7 @@ make_rpcs(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
+ rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]) <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
@@ -2142,6 +2277,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
default:
MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
+ case 11:
+ indirect_ctx_offset =
+ GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ break;
case 10:
indirect_ctx_offset =
GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
@@ -2301,7 +2440,7 @@ populate_lr_context(struct i915_gem_context *ctx,
if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (ctx->hw_id == PREEMPT_ID)
+ if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 636ced41225d..59d7b86012e9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -42,6 +42,9 @@
#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510)
+#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550)
+#define EL_CTRL_LOAD (1 << 0)
/* The docs specify that the write pointer wraps around after 5h, "After status
* is written out to the last available status QW at offset 5h, this pointer
diff --git a/drivers/gpu/drm/i915/intel_lrc_reg.h b/drivers/gpu/drm/i915/intel_lrc_reg.h
index a53336e2fc97..169a2239d6c7 100644
--- a/drivers/gpu/drm/i915/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/intel_lrc_reg.h
@@ -63,5 +63,6 @@
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19
+#define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A
#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ef80499113ee..d35d2d50f595 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -189,7 +189,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->t4 = val * 1000;
- if (INTEL_INFO(dev_priv)->gen <= 4 &&
+ if (INTEL_GEN(dev_priv) <= 4 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
DRM_DEBUG_KMS("Panel power timings uninitialized, "
"setting defaults\n");
@@ -268,7 +268,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
temp |= pipe_config->gmch_pfit.lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
+
+ /*
+ * Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (lvds_encoder->is_dual_link)
@@ -276,7 +278,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ /*
+ * It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes. For now, let's just maintain the
* value we got from the BIOS.
@@ -284,12 +287,16 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
temp &= ~LVDS_A3_POWER_MASK;
temp |= lvds_encoder->a3_power;
- /* Set the dithering flag on LVDS as needed, note that there is no
+ /*
+ * Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
- * only controlled through the PIPECONF reg. */
+ * only controlled through the PIPECONF reg.
+ */
if (IS_GEN4(dev_priv)) {
- /* Bspec wording suggests that LVDS port dithering only exists
- * for 18bpp panels. */
+ /*
+ * Bspec wording suggests that LVDS port dithering only exists
+ * for 18bpp panels.
+ */
if (pipe_config->dither && pipe_config->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
@@ -304,7 +311,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(lvds_encoder->reg, temp);
}
-/**
+/*
* Sets the power state for the panel.
*/
static void intel_enable_lvds(struct intel_encoder *encoder,
@@ -441,7 +448,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
-/**
+/*
* Detect the LVDS connection.
*
* Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
@@ -464,7 +471,7 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-/**
+/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
@@ -893,7 +900,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
if (dmi_check_system(intel_dual_link_lvds))
return true;
- /* BIOS should set the proper LVDS register value at boot, but
+ /*
+ * BIOS should set the proper LVDS register value at boot, but
* in reality, it doesn't set the value when the lid is closed;
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
@@ -907,13 +915,17 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
{
- /* With the introduction of the PCH we gained a dedicated
- * LVDS presence pin, use it. */
+ /*
+ * With the introduction of the PCH we gained a dedicated
+ * LVDS presence pin, use it.
+ */
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
return true;
- /* Otherwise LVDS was only attached to mobile products,
- * except for the inglorious 830gm */
+ /*
+ * Otherwise LVDS was only attached to mobile products,
+ * except for the inglorious 830gm
+ */
if (INTEL_GEN(dev_priv) <= 4 &&
IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
return true;
@@ -923,7 +935,7 @@ static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @dev: drm device
+ * @dev_priv: i915 device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index f4c46b0b8f0a..c0b34b7943b9 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -187,7 +187,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
table->table = broxton_mocs_table;
result = true;
} else {
- WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
+ WARN_ONCE(INTEL_GEN(dev_priv) >= 9,
"Platform that should have a MOCS table does not.\n");
}
@@ -265,7 +265,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
/**
* emit_mocs_control_table() - emit the mocs control table
- * @req: Request to set up the MOCS table for.
+ * @rq: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
@@ -273,17 +273,17 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
*
* Return: 0 on success, otherwise the error status.
*/
-static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+static int emit_mocs_control_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
- enum intel_engine_id engine = req->engine->id;
+ enum intel_engine_id engine = rq->engine->id;
unsigned int index;
u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ cs = intel_ring_begin(rq, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -308,7 +308,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -323,7 +323,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
/**
* emit_mocs_l3cc_table() - emit the mocs control table
- * @req: Request to set up the MOCS table for.
+ * @rq: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
@@ -332,7 +332,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
*
* Return: 0 on success, otherwise the error status.
*/
-static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+static int emit_mocs_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
unsigned int i;
@@ -341,7 +341,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ cs = intel_ring_begin(rq, 2 + GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -370,7 +370,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -417,7 +417,7 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
/**
* intel_rcs_context_init_mocs() - program the MOCS register.
- * @req: Request to set up the MOCS tables for.
+ * @rq: Request to set up the MOCS tables for.
*
* This function will emit a batch buffer with the values required for
* programming the MOCS register values for all the currently supported
@@ -431,19 +431,19 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
*
* Return: 0 on success, otherwise the error status.
*/
-int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+int intel_rcs_context_init_mocs(struct i915_request *rq)
{
struct drm_i915_mocs_table t;
int ret;
- if (get_mocs_settings(req->i915, &t)) {
+ if (get_mocs_settings(rq->i915, &t)) {
/* Program the RCS control registers */
- ret = emit_mocs_control_table(req, &t);
+ ret = emit_mocs_control_table(rq, &t);
if (ret)
return ret;
/* Now program the l3cc registers */
- ret = emit_mocs_l3cc_table(req, &t);
+ ret = emit_mocs_l3cc_table(rq, &t);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index ce4a5dfa5f94..d1751f91c1a4 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -52,7 +52,7 @@
#include <drm/drmP.h>
#include "i915_drv.h"
-int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+int intel_rcs_context_init_mocs(struct i915_request *rq);
void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
int intel_mocs_init_engine(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 41e9465d44a8..36671a937fa4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -234,50 +234,50 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
}
static void intel_overlay_submit_request(struct intel_overlay *overlay,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
i915_gem_retire_fn retire)
{
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
&overlay->i915->drm.struct_mutex));
i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
&overlay->i915->drm.struct_mutex);
- i915_gem_active_set(&overlay->last_flip, req);
- i915_add_request(req);
+ i915_gem_active_set(&overlay->last_flip, rq);
+ i915_request_add(rq);
}
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
i915_gem_retire_fn retire)
{
- intel_overlay_submit_request(overlay, req, retire);
+ intel_overlay_submit_request(overlay, rq, retire);
return i915_gem_active_retire(&overlay->last_flip,
&overlay->i915->drm.struct_mutex);
}
-static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
+static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = dev_priv->engine[RCS];
- return i915_gem_request_alloc(engine, dev_priv->kernel_context);
+ return i915_request_alloc(engine, dev_priv->kernel_context);
}
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
u32 *cs;
WARN_ON(overlay->active);
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
@@ -290,9 +290,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
*cs++ = overlay->flip_addr | OFC_UPDATE;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- return intel_overlay_do_wait_request(overlay, req, NULL);
+ return intel_overlay_do_wait_request(overlay, rq, NULL);
}
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
@@ -322,7 +322,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
u32 flip_addr = overlay->flip_addr;
u32 tmp, *cs;
@@ -336,23 +336,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
*cs++ = flip_addr;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, vma);
- intel_overlay_submit_request(overlay, req, NULL);
+ intel_overlay_submit_request(overlay, rq, NULL);
return 0;
}
@@ -373,7 +373,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
}
static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
@@ -382,7 +382,7 @@ static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
}
static void intel_overlay_off_tail(struct i915_gem_active *active,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
@@ -401,7 +401,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
u32 *cs, flip_addr = overlay->flip_addr;
WARN_ON(!overlay->active);
@@ -412,13 +412,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
@@ -432,11 +432,11 @@ static int intel_overlay_off(struct intel_overlay *overlay)
*cs++ = flip_addr;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, NULL);
- return intel_overlay_do_wait_request(overlay, req,
+ return intel_overlay_do_wait_request(overlay, rq,
intel_overlay_off_tail);
}
@@ -468,23 +468,23 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- ret = intel_overlay_do_wait_request(overlay, req,
+ ret = intel_overlay_do_wait_request(overlay, rq,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
@@ -801,7 +801,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ vma = i915_gem_object_pin_to_display_plane(new_bo,
+ 0, NULL, PIN_MAPPABLE);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e702a6487aa9..41d00b1603e3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -397,8 +397,11 @@ intel_panel_detect(struct drm_i915_private *dev_priv)
/**
* scale - scale values from one range to another
- *
* @source_val: value in range [@source_min..@source_max]
+ * @source_min: minimum legal value for @source_val
+ * @source_max: maximum legal value for @source_val
+ * @target_min: corresponding target value for @source_min
+ * @target_max: corresponding target value for @source_max
*
* Return @source_val in range [@source_min..@source_max] scaled to range
* [@target_min..@target_max].
@@ -416,8 +419,9 @@ static uint32_t scale(uint32_t source_val,
source_val = clamp(source_val, source_min, source_max);
/* avoid overflows */
- target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
- (target_max - target_min), source_max - source_min);
+ target_val = mul_u32_u32(source_val - source_min,
+ target_max - target_min);
+ target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min);
target_val += target_min;
return target_val;
@@ -497,7 +501,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
u32 val;
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (INTEL_INFO(dev_priv)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
val >>= 1;
if (panel->backlight.combination_mode) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eb68abf6a8e9..b8da4dcdd584 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -729,6 +729,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* intel_calculate_wm - calculate watermark level
* @pixel_rate: pixel clock
* @wm: chip FIFO params
+ * @fifo_size: size of the FIFO buffer
* @cpp: bytes per pixel
* @latency_ns: memory latency for the platform
*
@@ -2916,10 +2917,6 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
/* ILK cursor LP0 latency is 1300 ns */
if (IS_GEN5(dev_priv))
wm[0] = 13;
-
- /* WaDoubleCursorLP3Latency:ivb */
- if (IS_IVYBRIDGE(dev_priv))
- wm[3] *= 2;
}
int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
@@ -4596,7 +4593,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
min_disp_buf_needed = res_blocks;
}
- if (res_blocks >= ddb_allocation || res_lines > 31 ||
+ if ((level > 0 && res_lines > 31) ||
+ res_blocks >= ddb_allocation ||
min_disp_buf_needed >= ddb_allocation) {
*enabled = false;
@@ -4617,8 +4615,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
}
+ /* The number of lines are ignored for the level 0 watermark. */
+ *out_lines = level ? res_lines : 0;
*out_blocks = res_blocks;
- *out_lines = res_lines;
*enabled = true;
return 0;
@@ -4710,6 +4709,7 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
if (!dev_priv->ipc_enabled)
goto exit;
+ trans_min = 0;
if (INTEL_GEN(dev_priv) >= 10)
trans_min = 4;
@@ -5864,6 +5864,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
+ * @crtc: the #intel_crtc on which to compute the WM
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
@@ -6359,7 +6360,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->pcu_lock);
}
-void gen6_rps_boost(struct drm_i915_gem_request *rq,
+void gen6_rps_boost(struct i915_request *rq,
struct intel_rps_client *rps_client)
{
struct intel_rps *rps = &rq->i915->gt_pm.rps;
@@ -6372,12 +6373,15 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq,
if (!rps->enabled)
return;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return;
+
+ /* Serializes with i915_request_retire() */
boost = false;
spin_lock_irqsave(&rq->lock, flags);
- if (!rq->waitboost && !i915_gem_request_completed(rq)) {
- atomic_inc(&rps->num_waiters);
+ if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
+ boost = !atomic_fetch_inc(&rps->num_waiters);
rq->waitboost = true;
- boost = true;
}
spin_unlock_irqrestore(&rq->lock, flags);
if (!boost)
@@ -6711,7 +6715,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
/*
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
- * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
+ * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
*/
if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
I915_WRITE(GEN9_PG_ENABLE, 0);
@@ -6938,7 +6942,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
* No floor required for ring frequency on SKL.
*/
ring_freq = gpu_freq;
- } else if (INTEL_INFO(dev_priv)->gen >= 8) {
+ } else if (INTEL_GEN(dev_priv) >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(min_ring_freq, gpu_freq);
} else if (IS_HASWELL(dev_priv)) {
@@ -7549,7 +7553,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
unsigned long val;
- if (INTEL_INFO(dev_priv)->gen != 5)
+ if (!IS_GEN5(dev_priv))
return 0;
spin_lock_irq(&mchdev_lock);
@@ -7633,7 +7637,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen != 5)
+ if (!IS_GEN5(dev_priv))
return;
spin_lock_irq(&mchdev_lock);
@@ -7684,7 +7688,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long val;
- if (INTEL_INFO(dev_priv)->gen != 5)
+ if (!IS_GEN5(dev_priv))
return 0;
spin_lock_irq(&mchdev_lock);
@@ -8022,7 +8026,10 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
intel_disable_gt_powersave(dev_priv);
- gen6_reset_rps_interrupts(dev_priv);
+ if (INTEL_GEN(dev_priv) < 11)
+ gen6_reset_rps_interrupts(dev_priv);
+ else
+ WARN_ON_ONCE(1);
}
static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
@@ -8135,6 +8142,8 @@ static void intel_enable_rps(struct drm_i915_private *dev_priv)
cherryview_enable_rps(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv)) {
valleyview_enable_rps(dev_priv);
+ } else if (WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11)) {
+ /* TODO */
} else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rps(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
@@ -8483,7 +8492,7 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
if (!HAS_PCH_CNP(dev_priv))
return;
- /* Display WA #1181: cnp */
+ /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
CNP_PWM_CGE_GATING_DISABLE);
}
@@ -8513,7 +8522,13 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
val |= SARBUNIT_CLKGATE_DIS;
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+ /* Wa_2201832410:cnl */
+ val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
+ val |= GWUNIT_CLKGATE_DIS;
+ I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
+
/* WaDisableVFclkgate:cnl */
+ /* WaVFUnitClockGatingDisable:cnl */
val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
val |= VFUNIT_CLKGATE_DIS;
I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
@@ -9415,15 +9430,16 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
u32 lower, upper, tmp;
- unsigned long flags;
int loop = 2;
- /* The register accessed do not need forcewake. We borrow
+ /*
+ * The register accessed do not need forcewake. We borrow
* uncore lock to prevent concurrent access to range reg.
*/
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ lockdep_assert_held(&dev_priv->uncore.lock);
- /* vlv and chv residency counters are 40 bits in width.
+ /*
+ * vlv and chv residency counters are 40 bits in width.
* With a control bit, we can choose between upper or lower
* 32bit window into this counter.
*
@@ -9447,29 +9463,49 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
upper = I915_READ_FW(reg);
} while (upper != tmp && --loop);
- /* Everywhere else we always use VLV_COUNTER_CONTROL with the
+ /*
+ * Everywhere else we always use VLV_COUNTER_CONTROL with the
* VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
* now.
*/
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
-
return lower | (u64)upper << 8;
}
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
- u64 time_hw;
+ u64 time_hw, prev_hw, overflow_hw;
+ unsigned int fw_domains;
+ unsigned long flags;
+ unsigned int i;
u32 mul, div;
if (!HAS_RC6(dev_priv))
return 0;
+ /*
+ * Store previous hw counter values for counter wrap-around handling.
+ *
+ * There are only four interesting registers and they live next to each
+ * other so we can use the relative address, compared to the smallest
+ * one as the index into driver storage.
+ */
+ i = (i915_mmio_reg_offset(reg) -
+ i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
+ if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
+ return 0;
+
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
mul = 1000000;
div = dev_priv->czclk_freq;
+ overflow_hw = BIT_ULL(40);
time_hw = vlv_residency_raw(dev_priv, reg);
} else {
/* 833.33ns units on Gen9LP, 1.28us elsewhere. */
@@ -9481,10 +9517,33 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
div = 1;
}
- time_hw = I915_READ(reg);
+ overflow_hw = BIT_ULL(32);
+ time_hw = I915_READ_FW(reg);
}
- return DIV_ROUND_UP_ULL(time_hw * mul, div);
+ /*
+ * Counter wrap handling.
+ *
+ * But relying on a sufficient frequency of queries otherwise counters
+ * can still wrap.
+ */
+ prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
+ dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
+
+ /* RC6 delta from last sample. */
+ if (time_hw >= prev_hw)
+ time_hw -= prev_hw;
+ else
+ time_hw += overflow_hw - prev_hw;
+
+ /* Add delta to RC6 extended raw driver copy. */
+ time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
+ dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ return mul_u64_u32_div(time_hw, mul, div);
}
u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index e9feffdea899..23175c5c4a50 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -56,6 +56,111 @@
#include "intel_drv.h"
#include "i915_drv.h"
+static inline enum intel_display_power_domain
+psr_aux_domain(struct intel_dp *intel_dp)
+{
+ /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
+ * However, for non-A AUX ports the corresponding non-EDP transcoders
+ * would have already enabled power well 2 and DC_OFF. This means we can
+ * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
+ * specific AUX_IO reference without powering up any extra wells.
+ * Note that PSR is enabled only on Port A even though this function
+ * returns the correct domain for other ports too.
+ */
+ return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+ intel_dp->aux_power_domain;
+}
+
+static void psr_aux_io_power_get(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+
+ if (INTEL_GEN(dev_priv) < 10)
+ return;
+
+ intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
+}
+
+static void psr_aux_io_power_put(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+
+ if (INTEL_GEN(dev_priv) < 10)
+ return;
+
+ intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
+}
+
+static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
+{
+ uint8_t psr_caps = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
+ return false;
+ return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+}
+
+static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+ uint8_t dprx = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dprx) != 1)
+ return false;
+ return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
+static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
+{
+ uint8_t alpm_caps = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+ &alpm_caps) != 1)
+ return false;
+ return alpm_caps & DP_ALPM_CAP;
+}
+
+void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+
+ if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+ dev_priv->psr.sink_support = true;
+ DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ }
+
+ if (INTEL_GEN(dev_priv) >= 9 &&
+ (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
+ uint8_t frame_sync_cap;
+
+ dev_priv->psr.sink_support = true;
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+ &frame_sync_cap) != 1)
+ frame_sync_cap = 0;
+ dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
+ /* PSR2 needs frame sync as well */
+ dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+ DRM_DEBUG_KMS("PSR2 %s on sink",
+ dev_priv->psr.psr2_support ? "supported" : "not supported");
+
+ if (dev_priv->psr.psr2_support) {
+ dev_priv->psr.y_cord_support =
+ intel_dp_get_y_cord_status(intel_dp);
+ dev_priv->psr.colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+ dev_priv->psr.alpm =
+ intel_dp_get_alpm_status(intel_dp);
+ }
+ }
+}
+
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -126,7 +231,7 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
- if (INTEL_INFO(dev_priv)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return DP_AUX_CH_CTL(port);
else
return EDP_PSR_AUX_CTL;
@@ -135,7 +240,7 @@ static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
- if (INTEL_INFO(dev_priv)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return DP_AUX_CH_DATA(port, index);
else
return EDP_PSR_AUX_DATA(index);
@@ -341,6 +446,50 @@ static void hsw_psr_activate(struct intel_dp *intel_dp)
hsw_activate_psr1(intel_dp);
}
+static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
+ int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
+ int psr_max_h = 0, psr_max_v = 0;
+
+ /*
+ * FIXME psr2_support is messed up. It's both computed
+ * dynamically during PSR enable, and extracted from sink
+ * caps during eDP detection.
+ */
+ if (!dev_priv->psr.psr2_support)
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ psr_max_h = 4096;
+ psr_max_v = 2304;
+ } else if (IS_GEN9(dev_priv)) {
+ psr_max_h = 3640;
+ psr_max_v = 2304;
+ }
+
+ if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
+ DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+ crtc_hdisplay, crtc_vdisplay,
+ psr_max_h, psr_max_v);
+ return false;
+ }
+
+ /*
+ * FIXME:enable psr2 only for y-cordinate psr2 panels
+ * After gtc implementation , remove this restriction.
+ */
+ if (!dev_priv->psr.y_cord_support) {
+ DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n");
+ return false;
+ }
+
+ return true;
+}
+
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -403,34 +552,14 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- /*
- * FIXME psr2_support is messed up. It's both computed
- * dynamically during PSR enable, and extracted from sink
- * caps during eDP detection.
- */
- if (!dev_priv->psr.psr2_support) {
- crtc_state->has_psr = true;
- return;
- }
-
- /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (adjusted_mode->crtc_hdisplay > 3200 ||
- adjusted_mode->crtc_vdisplay > 2000) {
- DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n");
- return;
- }
-
- /*
- * FIXME:enable psr2 only for y-cordinate psr2 panels
- * After gtc implementation , remove this restriction.
- */
- if (!dev_priv->psr.y_cord_support) {
- DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
+ if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+ DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
return;
}
crtc_state->has_psr = true;
- crtc_state->has_psr2 = true;
+ crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+ DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
}
static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -459,6 +588,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 chicken;
+ psr_aux_io_power_get(intel_dp);
+
if (dev_priv->psr.psr2_support) {
chicken = PSR2_VSC_ENABLE_PROG_HEADER;
if (dev_priv->psr.y_cord_support)
@@ -617,6 +748,8 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
+
+ psr_aux_io_power_put(intel_dp);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e2085820b586..1d599524a759 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -66,7 +66,7 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
}
static int
-gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
@@ -75,19 +75,19 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen4_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
@@ -122,22 +122,22 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
- if (IS_G4X(req->i915) || IS_GEN5(req->i915))
+ if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
cmd |= MI_INVALIDATE_ISP;
}
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
-/**
+/*
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
* implementing two workarounds on gen6. From section 1.4.7.1
* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
@@ -175,13 +175,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
+intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 *cs;
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -191,9 +191,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
*cs++ = 0; /* low dword */
*cs++ = 0; /* high dword */
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -203,21 +203,21 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- ret = intel_emit_post_sync_nonzero_flush(req);
+ ret = intel_emit_post_sync_nonzero_flush(rq);
if (ret)
return ret;
@@ -247,7 +247,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -255,17 +255,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
*cs++ = flags;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
+gen7_render_ring_cs_stall_wa(struct i915_request *rq)
{
u32 *cs;
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -273,16 +273,16 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = 0;
*cs++ = 0;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@@ -324,10 +324,10 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
- gen7_render_ring_cs_stall_wa(req);
+ gen7_render_ring_cs_stall_wa(rq);
}
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -335,7 +335,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
*cs++ = flags;
*cs++ = scratch_addr;
*cs++ = 0;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -453,13 +453,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
- DRM_DEBUG_KMS("%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- I915_READ_CTL(engine),
- I915_READ_HEAD(engine),
- I915_READ_TAIL(engine),
- I915_READ_START(engine));
+ DRM_DEBUG_DRIVER("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ I915_READ_CTL(engine),
+ I915_READ_HEAD(engine),
+ I915_READ_TAIL(engine),
+ I915_READ_START(engine));
if (!stop_ring(engine)) {
DRM_ERROR("failed to set %s head to zero "
@@ -492,8 +492,8 @@ static int init_ring_common(struct intel_engine_cs *engine)
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (I915_READ_HEAD(engine))
- DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
- engine->name, I915_READ_HEAD(engine));
+ DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
+ engine->name, I915_READ_HEAD(engine));
intel_ring_update_space(ring);
I915_WRITE_HEAD(engine, ring->head);
@@ -531,7 +531,7 @@ out:
}
static void reset_ring_common(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/*
* RC6 must be prevented until the reset is complete and the engine
@@ -595,15 +595,15 @@ static void reset_ring_common(struct intel_engine_cs *engine,
}
}
-static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
+static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
- ret = intel_ring_workarounds_emit(req);
+ ret = intel_ring_workarounds_emit(rq);
if (ret != 0)
return ret;
- ret = i915_gem_render_state_emit(req);
+ ret = i915_gem_render_state_emit(rq);
if (ret)
return ret;
@@ -655,15 +655,15 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (INTEL_INFO(dev_priv)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
return init_workarounds_ring(engine);
}
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
+static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
{
- struct drm_i915_private *dev_priv = req->i915;
+ struct drm_i915_private *dev_priv = rq->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int num_rings = 0;
@@ -674,11 +674,11 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
continue;
- mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
+ mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(mbox_reg);
- *cs++ = req->global_seqno;
+ *cs++ = rq->global_seqno;
num_rings++;
}
}
@@ -690,7 +690,7 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
static void cancel_requests(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->timeline->lock, flags);
@@ -698,7 +698,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline->requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (!i915_gem_request_completed(request))
+ if (!i915_request_completed(request))
dma_fence_set_error(&request->fence, -EIO);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -706,56 +706,46 @@ static void cancel_requests(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-static void i9xx_submit_request(struct drm_i915_gem_request *request)
+static void i9xx_submit_request(struct i915_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
- i915_gem_request_submit(request);
+ i915_request_submit(request);
I915_WRITE_TAIL(request->engine,
intel_ring_set_tail(request->ring, request->tail));
}
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
+static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
- *cs++ = req->global_seqno;
+ *cs++ = rq->global_seqno;
*cs++ = MI_USER_INTERRUPT;
- req->tail = intel_ring_offset(req, cs);
- assert_ring_tail_valid(req->ring, req->tail);
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
}
static const int i9xx_emit_breadcrumb_sz = 4;
-/**
- * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
- *
- * @request - request to write to the ring
- *
- * Update the mailbox registers in the *other* rings with the current seqno.
- * This acts like a signal in the canonical semaphore.
- */
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
+static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- return i9xx_emit_breadcrumb(req,
- req->engine->semaphore.signal(req, cs));
+ return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
}
static int
-gen6_ring_sync_to(struct drm_i915_gem_request *req,
- struct drm_i915_gem_request *signal)
+gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
{
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
- u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
+ u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
u32 *cs;
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -767,7 +757,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
*cs++ = signal->global_seqno - 1;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -866,17 +856,17 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
}
static int
-bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+bsd_ring_flush(struct i915_request *rq, u32 mode)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -919,20 +909,20 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
}
static int
-i965_emit_bb_start(struct drm_i915_gem_request *req,
+i965_emit_bb_start(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
*cs++ = offset;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -942,13 +932,13 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
-i830_emit_bb_start(struct drm_i915_gem_request *req,
+i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
+ u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -959,13 +949,13 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
*cs++ = cs_offset;
*cs++ = 0xdeadbeef;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- cs = intel_ring_begin(req, 6 + 2);
+ cs = intel_ring_begin(rq, 6 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -982,39 +972,39 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
/* ... and execute it. */
offset = cs_offset;
}
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
MI_BATCH_NON_SECURE);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-i915_emit_bb_start(struct drm_i915_gem_request *req,
+i915_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
MI_BATCH_NON_SECURE);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1385,7 +1375,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
intel_ring_reset(engine->buffer, 0);
}
-static inline int mi_set_context(struct drm_i915_gem_request *rq, u32 flags)
+static inline int mi_set_context(struct i915_request *rq, u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
@@ -1471,7 +1461,7 @@ static inline int mi_set_context(struct drm_i915_gem_request *rq, u32 flags)
return 0;
}
-static int remap_l3(struct drm_i915_gem_request *rq, int slice)
+static int remap_l3(struct i915_request *rq, int slice)
{
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
int i;
@@ -1499,7 +1489,7 @@ static int remap_l3(struct drm_i915_gem_request *rq, int slice)
return 0;
}
-static int switch_context(struct drm_i915_gem_request *rq)
+static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *to_ctx = rq->ctx;
@@ -1569,7 +1559,7 @@ err:
return ret;
}
-static int ring_request_alloc(struct drm_i915_gem_request *request)
+static int ring_request_alloc(struct i915_request *request)
{
int ret;
@@ -1595,7 +1585,7 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
{
- struct drm_i915_gem_request *target;
+ struct i915_request *target;
long timeout;
lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
@@ -1613,13 +1603,13 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- timeout = i915_wait_request(target,
+ timeout = i915_request_wait(target,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0)
return timeout;
- i915_gem_request_retire_upto(target);
+ i915_request_retire_upto(target);
intel_ring_update_space(ring);
GEM_BUG_ON(ring->space < bytes);
@@ -1642,10 +1632,9 @@ int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
return 0;
}
-u32 *intel_ring_begin(struct drm_i915_gem_request *req,
- unsigned int num_dwords)
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
{
- struct intel_ring *ring = req->ring;
+ struct intel_ring *ring = rq->ring;
const unsigned int remain_usable = ring->effective_size - ring->emit;
const unsigned int bytes = num_dwords * sizeof(u32);
unsigned int need_wrap = 0;
@@ -1655,7 +1644,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
/* Packets must be qword aligned. */
GEM_BUG_ON(num_dwords & 1);
- total_bytes = bytes + req->reserved_space;
+ total_bytes = bytes + rq->reserved_space;
GEM_BUG_ON(total_bytes > ring->effective_size);
if (unlikely(total_bytes > remain_usable)) {
@@ -1676,7 +1665,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
* wrap and only need to effectively wait for the
* reserved size from the start of ringbuffer.
*/
- total_bytes = req->reserved_space + remain_actual;
+ total_bytes = rq->reserved_space + remain_actual;
}
}
@@ -1690,9 +1679,9 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
* overallocation and the assumption is that then we never need
* to wait (which has the risk of failing with EINTR).
*
- * See also i915_gem_request_alloc() and i915_add_request().
+ * See also i915_request_alloc() and i915_request_add().
*/
- GEM_BUG_ON(!req->reserved_space);
+ GEM_BUG_ON(!rq->reserved_space);
ret = wait_for_space(ring, total_bytes);
if (unlikely(ret))
@@ -1721,29 +1710,28 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
+int intel_ring_cacheline_align(struct i915_request *rq)
{
- int num_dwords =
- (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ int num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
u32 *cs;
if (num_dwords == 0)
return 0;
- num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
- cs = intel_ring_begin(req, num_dwords);
+ num_dwords = CACHELINE_BYTES / sizeof(u32) - num_dwords;
+ cs = intel_ring_begin(rq, num_dwords);
if (IS_ERR(cs))
return PTR_ERR(cs);
while (num_dwords--)
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
-static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
+static void gen6_bsd_submit_request(struct i915_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
@@ -1780,11 +1768,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1810,18 +1798,18 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-hsw_emit_bb_start(struct drm_i915_gem_request *req,
+hsw_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1831,19 +1819,19 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
MI_BATCH_RESOURCE_STREAMER : 0);
/* bit0-7 is the length on GEN6+ */
*cs++ = offset;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen6_emit_bb_start(struct drm_i915_gem_request *req,
+gen6_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1851,18 +1839,18 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
0 : MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
*cs++ = offset;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
/* Blitter support (SandyBridge+) */
-static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+static int gen6_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1887,7 +1875,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a0e7a6c2a57c..0320c2c4cfba 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -3,10 +3,12 @@
#define _INTEL_RINGBUFFER_H_
#include <linux/hashtable.h>
+
#include "i915_gem_batch_pool.h"
-#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+
#include "i915_pmu.h"
+#include "i915_request.h"
#include "i915_selftest.h"
struct drm_printer;
@@ -90,7 +92,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
#define instdone_subslice_mask(dev_priv__) \
(INTEL_GEN(dev_priv__) == 7 ? \
- 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
+ 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
for ((slice__) = 0, (subslice__) = 0; \
@@ -115,7 +117,7 @@ struct intel_engine_hangcheck {
unsigned long action_timestamp;
int deadlock;
struct intel_instdone instdone;
- struct drm_i915_gem_request *active_request;
+ struct i915_request *active_request;
bool stalled;
};
@@ -156,7 +158,10 @@ struct i915_ctx_workarounds {
struct i915_vma *vma;
};
-struct drm_i915_gem_request;
+struct i915_request;
+
+#define I915_MAX_VCS 4
+#define I915_MAX_VECS 2
/*
* Engine IDs definitions.
@@ -167,8 +172,12 @@ enum intel_engine_id {
BCS,
VCS,
VCS2,
+ VCS3,
+ VCS4,
#define _VCS(n) (VCS + (n))
- VECS
+ VECS,
+ VECS2
+#define _VECS(n) (VECS + (n))
};
struct i915_priolist {
@@ -200,9 +209,17 @@ struct intel_engine_execlists {
bool no_priolist;
/**
- * @elsp: the ExecList Submission Port register
+ * @submit_reg: gen-specific execlist submission register
+ * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
+ * the ExecList Submission Queue Contents register array for Gen11+
*/
- u32 __iomem *elsp;
+ u32 __iomem *submit_reg;
+
+ /**
+ * @ctrl_reg: the enhanced execlists control register, used to load the
+ * submit queue on the HW and to request preemptions to idle
+ */
+ u32 __iomem *ctrl_reg;
/**
* @port: execlist port states
@@ -218,7 +235,7 @@ struct intel_engine_execlists {
/**
* @request_count: combined request and submission count
*/
- struct drm_i915_gem_request *request_count;
+ struct i915_request *request_count;
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
@@ -256,6 +273,16 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
+ * @queue_priority: Highest pending priority.
+ *
+ * When we add requests into the queue, or adjust the priority of
+ * executing requests, we compute the maximum priority of those
+ * pending requests. We can then use this value to determine if
+ * we need to preempt the executing requests to service the queue.
+ */
+ int queue_priority;
+
+ /**
* @queue: queue of requests, in priority lists
*/
struct rb_root queue;
@@ -279,6 +306,11 @@ struct intel_engine_execlists {
* @csb_use_mmio: access csb through mmio, instead of hwsp
*/
bool csb_use_mmio;
+
+ /**
+ * @preempt_complete_status: expected CSB upon completing preemption
+ */
+ u32 preempt_complete_status;
};
#define INTEL_ENGINE_CS_MAX_NAME 8
@@ -332,9 +364,9 @@ struct intel_engine_cs {
spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
struct rb_root waiters; /* sorted by retirement, priority */
- struct rb_root signals; /* sorted by retirement */
+ struct list_head signals; /* sorted by retirement */
struct task_struct *signaler; /* used for fence signalling */
- struct drm_i915_gem_request __rcu *first_signal;
+
struct timer_list fake_irq; /* used after a missed interrupt */
struct timer_list hangcheck; /* detect missed interrupts */
@@ -386,7 +418,7 @@ struct intel_engine_cs {
int (*init_hw)(struct intel_engine_cs *engine);
void (*reset_hw)(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *req);
+ struct i915_request *rq);
void (*park)(struct intel_engine_cs *engine);
void (*unpark)(struct intel_engine_cs *engine);
@@ -397,22 +429,20 @@ struct intel_engine_cs {
struct i915_gem_context *ctx);
void (*context_unpin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
- int (*request_alloc)(struct drm_i915_gem_request *req);
- int (*init_context)(struct drm_i915_gem_request *req);
+ int (*request_alloc)(struct i915_request *rq);
+ int (*init_context)(struct i915_request *rq);
- int (*emit_flush)(struct drm_i915_gem_request *request,
- u32 mode);
+ int (*emit_flush)(struct i915_request *request, u32 mode);
#define EMIT_INVALIDATE BIT(0)
#define EMIT_FLUSH BIT(1)
#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
- int (*emit_bb_start)(struct drm_i915_gem_request *req,
+ int (*emit_bb_start)(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
- void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
- u32 *cs);
+ void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
@@ -421,7 +451,7 @@ struct intel_engine_cs {
* This is called from an atomic context with irqs disabled; must
* be irq safe.
*/
- void (*submit_request)(struct drm_i915_gem_request *req);
+ void (*submit_request)(struct i915_request *rq);
/* Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
@@ -429,8 +459,7 @@ struct intel_engine_cs {
*
* Called under the struct_mutex.
*/
- void (*schedule)(struct drm_i915_gem_request *request,
- int priority);
+ void (*schedule)(struct i915_request *request, int priority);
/*
* Cancel all requests on the hardware, or queued for execution.
@@ -498,9 +527,9 @@ struct intel_engine_cs {
} mbox;
/* AKA wait() */
- int (*sync_to)(struct drm_i915_gem_request *req,
- struct drm_i915_gem_request *signal);
- u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
+ int (*sync_to)(struct i915_request *rq,
+ struct i915_request *signal);
+ u32 *(*signal)(struct i915_request *rq, u32 *cs);
} semaphore;
struct intel_engine_execlists execlists;
@@ -654,7 +683,7 @@ intel_engine_flag(const struct intel_engine_cs *engine)
}
static inline u32
-intel_read_status_page(struct intel_engine_cs *engine, int reg)
+intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
return READ_ONCE(engine->status_page.page_addr[reg]);
@@ -721,14 +750,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
+int __must_check intel_ring_cacheline_align(struct i915_request *rq);
int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
-u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
- unsigned int n);
+u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
-static inline void
-intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
+static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
{
/* Dummy function.
*
@@ -738,22 +765,20 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
- GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
+ GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
}
-static inline u32
-intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
{
return pos & (ring->size - 1);
}
-static inline u32
-intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
+static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- u32 offset = addr - req->ring->vaddr;
- GEM_BUG_ON(offset > req->ring->size);
- return intel_ring_wrap(req->ring, offset);
+ u32 offset = addr - rq->ring->vaddr;
+ GEM_BUG_ON(offset > rq->ring->size);
+ return intel_ring_wrap(rq->ring, offset);
}
static inline void
@@ -791,7 +816,7 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
/* Whilst writes to the tail are strictly order, there is no
* serialisation between readers and the writers. The tail may be
- * read by i915_gem_request_retire() just as it is being updated
+ * read by i915_request_retire() just as it is being updated
* by execlists, as although the breadcrumb is complete, the context
* switch hasn't been seen.
*/
@@ -812,8 +837,8 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
-u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
@@ -833,7 +858,7 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
}
int init_workarounds_ring(struct intel_engine_cs *engine);
-int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
+int intel_ring_workarounds_emit(struct i915_request *rq);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
@@ -861,7 +886,7 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
static inline void intel_wait_init(struct intel_wait *wait,
- struct drm_i915_gem_request *rq)
+ struct i915_request *rq)
{
wait->tsk = current;
wait->request = rq;
@@ -887,9 +912,9 @@ intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
static inline bool
intel_wait_update_request(struct intel_wait *wait,
- const struct drm_i915_gem_request *rq)
+ const struct i915_request *rq)
{
- return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
+ return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
}
static inline bool
@@ -900,9 +925,9 @@ intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
static inline bool
intel_wait_check_request(const struct intel_wait *wait,
- const struct drm_i915_gem_request *rq)
+ const struct i915_request *rq)
{
- return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
+ return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
}
static inline bool intel_wait_complete(const struct intel_wait *wait)
@@ -914,9 +939,8 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
- bool wakeup);
-void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
+void intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
+void intel_engine_cancel_signaling(struct i915_request *request);
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
{
@@ -935,7 +959,6 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 70e659772a7a..53ea564f971e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -130,6 +130,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_D";
case POWER_DOMAIN_AUX_F:
return "AUX_F";
+ case POWER_DOMAIN_AUX_IO_A:
+ return "AUX_IO_A";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -1853,6 +1855,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -2646,6 +2649,48 @@ static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
DRM_ERROR("DBuf power disable timeout!\n");
}
+/*
+ * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
+ * needed and keep it disabled as much as possible.
+ */
+static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
+ I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL_S2);
+
+ udelay(10);
+
+ if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+ !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power enable timeout\n");
+}
+
+static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
+ I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL_S2);
+
+ udelay(10);
+
+ if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+ (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power disable timeout!\n");
+}
+
+static void icl_mbus_init(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
+ MBUS_ABOX_BT_CREDIT_POOL2(16) |
+ MBUS_ABOX_B_CREDIT(1) |
+ MBUS_ABOX_BW_CREDIT(1);
+
+ I915_WRITE(MBUS_ABOX_CTL, val);
+}
+
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
@@ -2794,12 +2839,19 @@ static const struct cnl_procmon {
{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
};
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv)
+/*
+ * CNL has just one set of registers, while ICL has two sets: one for port A and
+ * the other for port B. The CNL registers are equivalent to the ICL port A
+ * registers, that's why we call the ICL macros even though the function has CNL
+ * on its name.
+ */
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum port port)
{
const struct cnl_procmon *procmon;
u32 val;
- val = I915_READ(CNL_PORT_COMP_DW3);
+ val = I915_READ(ICL_PORT_COMP_DW3(port));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -2820,13 +2872,13 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv)
break;
}
- val = I915_READ(CNL_PORT_COMP_DW1);
+ val = I915_READ(ICL_PORT_COMP_DW1(port));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
- I915_WRITE(CNL_PORT_COMP_DW1, val);
+ I915_WRITE(ICL_PORT_COMP_DW1(port), val);
- I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
- I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
+ I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
+ I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
}
static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
@@ -2847,7 +2899,8 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
val &= ~CNL_COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
- cnl_set_procmon_ref_values(dev_priv);
+ /* Dummy PORT_A to get the correct CNL register from the ICL macro */
+ cnl_set_procmon_ref_values(dev_priv, PORT_A);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
@@ -2911,6 +2964,80 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_MISC_2, val);
}
+static void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ enum port port;
+ u32 val;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Enable PCH reset handshake. */
+ val = I915_READ(HSW_NDE_RSTWRN_OPT);
+ val |= RESET_PCH_HANDSHAKE_ENABLE;
+ I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+
+ for (port = PORT_A; port <= PORT_B; port++) {
+ /* 2. Enable DDI combo PHY comp. */
+ val = I915_READ(ICL_PHY_MISC(port));
+ val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+
+ cnl_set_procmon_ref_values(dev_priv, port);
+
+ val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val |= COMP_INIT;
+ I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+
+ /* 3. Set power down enable. */
+ val = I915_READ(ICL_PORT_CL_DW5(port));
+ val |= CL_POWER_DOWN_ENABLE;
+ I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ }
+
+ /* 4. Enable power well 1 (PG1) and aux IO power. */
+ /* FIXME: ICL power wells code not here yet. */
+
+ /* 5. Enable CDCLK. */
+ icl_init_cdclk(dev_priv);
+
+ /* 6. Enable DBUF. */
+ icl_dbuf_enable(dev_priv);
+
+ /* 7. Setup MBUS. */
+ icl_mbus_init(dev_priv);
+
+ /* 8. CHICKEN_DCPR_1 */
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ CNL_DDI_CLOCK_REG_ACCESS_ON);
+}
+
+static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ enum port port;
+ u32 val;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Disable all display engine functions -> aready done */
+
+ /* 2. Disable DBUF */
+ icl_dbuf_disable(dev_priv);
+
+ /* 3. Disable CD clock */
+ icl_uninit_cdclk(dev_priv);
+
+ /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
+ /* FIXME: ICL power wells code not here yet. */
+
+ /* 5. Disable Comp */
+ for (port = PORT_A; port <= PORT_B; port++) {
+ val = I915_READ(ICL_PHY_MISC(port));
+ val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+ }
+}
+
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn_bc =
@@ -3043,7 +3170,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
power_domains->initializing = true;
- if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ icl_display_core_init(dev_priv, resume);
+ } else if (IS_CANNONLAKE(dev_priv)) {
cnl_display_core_init(dev_priv, resume);
} else if (IS_GEN9_BC(dev_priv)) {
skl_display_core_init(dev_priv, resume);
@@ -3084,7 +3213,9 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (!i915_modparams.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_display_core_uninit(dev_priv);
+ else if (IS_CANNONLAKE(dev_priv))
cnl_display_core_uninit(dev_priv);
else if (IS_GEN9_BC(dev_priv))
skl_display_core_uninit(dev_priv);
@@ -3200,18 +3331,19 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
* @dev_priv: i915 device instance
*
* This function grabs a device-level runtime pm reference if the device is
- * already in use and ensures that it is powered up.
+ * already in use and ensures that it is powered up. It is illegal to try
+ * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: True if the wakeref was acquired, or False otherwise.
*/
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct device *kdev = &pdev->dev;
-
if (IS_ENABLED(CONFIG_PM)) {
- int ret = pm_runtime_get_if_in_use(kdev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct device *kdev = &pdev->dev;
/*
* In cases runtime PM is disabled by the RPM core and we get
@@ -3219,9 +3351,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
- WARN_ONCE(ret < 0,
- "pm_runtime_get_if_in_use() failed: %d\n", ret);
- if (ret <= 0)
+ if (pm_runtime_get_if_in_use(kdev) <= 0)
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0bf97ed5ffac..96e213ec202d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -214,7 +214,7 @@ static bool
intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector);
-/**
+/*
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
@@ -250,10 +250,10 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* writing them only once doesn't appear to 'stick'.
* The BIOS does this too. Yay, magic
*/
- for (i = 0; i < 2; i++)
- {
+ for (i = 0; i < 2; i++) {
I915_WRITE(GEN3_SDVOB, bval);
POSTING_READ(GEN3_SDVOB);
+
I915_WRITE(GEN3_SDVOC, cval);
POSTING_READ(GEN3_SDVOC);
}
@@ -643,7 +643,7 @@ static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
&targets, sizeof(targets));
}
-/**
+/*
* Return whether each input is trained.
*
* This function is making an assumption about the layout of the response,
@@ -1061,8 +1061,10 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
return true;
}
-/* Asks the sdvo controller for the preferred input mode given the output mode.
- * Unfortunately we have to set up the full output mode to do that. */
+/*
+ * Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that.
+ */
static bool
intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *mode,
@@ -1095,8 +1097,10 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
+ /*
+ * SDVO TV has fixed PLL values depend on its clock range,
+ * this mirrors vbios setting.
+ */
if (dotclock >= 100000 && dotclock < 140500) {
clock->p1 = 2;
clock->p2 = 10;
@@ -1132,7 +1136,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
- /* We need to construct preferred input timings based on our
+ /*
+ * We need to construct preferred input timings based on our
* output timings. To do that, we have to set the output
* timings, even though this isn't really the right place in
* the sequence to do it. Oh well.
@@ -1155,7 +1160,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
adjusted_mode);
}
- /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ /*
+ * Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
*/
pipe_config->pixel_multiplier =
@@ -1169,9 +1175,12 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->has_audio = true;
if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /* See CEA-861-E - 5.1 Default Encoding Parameters */
- /* FIXME: This bit is only valid when using TMDS encoding and 8
- * bit per color mode. */
+ /*
+ * See CEA-861-E - 5.1 Default Encoding Parameters
+ *
+ * FIXME: This bit is only valid when using TMDS encoding and 8
+ * bit per color mode.
+ */
if (pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
pipe_config->limited_color_range = true;
@@ -1272,7 +1281,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
intel_sdvo_update_props(intel_sdvo, sdvo_state);
- /* First, set the input mapping for the first input to our controlled
+ /*
+ * First, set the input mapping for the first input to our controlled
* output. This is only correct if we're a single-input device, in
* which case the first input is the output from the appropriate SDVO
* channel on the motherboard. In a two-input device, the first input
@@ -1435,8 +1445,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
- /* Some sdvo encoders are not spec compliant and don't
- * implement the mandatory get_timings function. */
+ /*
+ * Some sdvo encoders are not spec compliant and don't
+ * implement the mandatory get_timings function.
+ */
DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
} else {
@@ -1585,7 +1597,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
- /* Warn if the device reported failure to sync.
+ /*
+ * Warn if the device reported failure to sync.
+ *
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
@@ -1672,8 +1686,10 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
if (!I915_HAS_HOTPLUG(dev_priv))
return 0;
- /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
- * on the line. */
+ /*
+ * HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+ * on the line.
+ */
if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
return 0;
@@ -1689,7 +1705,15 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
- &intel_sdvo->hotplug_active, 2);
+ &intel_sdvo->hotplug_active, 2);
+}
+
+static bool intel_sdvo_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ intel_sdvo_enable_hotplug(encoder);
+
+ return intel_encoder_hotplug(encoder, connector);
}
static bool
@@ -1957,7 +1981,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- /* Read the list of supported input resolutions for the selected TV
+ /*
+ * Read the list of supported input resolutions for the selected TV
* format.
*/
format_map = 1 << conn_state->tv.mode;
@@ -2268,7 +2293,8 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
uint16_t mask = 0;
unsigned int num_bits;
- /* Make a mask of outputs less than or equal to our own priority in the
+ /*
+ * Make a mask of outputs less than or equal to our own priority in the
* list.
*/
switch (sdvo->controlled_output) {
@@ -2298,7 +2324,7 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
sdvo->ddc_bus = 1 << num_bits;
}
-/**
+/*
* Choose the appropriate DDC bus for control bus switch command for this
* SDVO output based on the controlled output.
*
@@ -2342,9 +2368,11 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
- /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ /*
+ * With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
* our code totally fails once we start using gmbus. Hence fall back to
- * bit banging for now. */
+ * bit banging for now.
+ */
intel_gmbus_force_bit(sdvo->i2c, true);
}
@@ -2379,7 +2407,8 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
if (my_mapping->slave_addr)
return my_mapping->slave_addr;
- /* If the BIOS only described a different SDVO device, use the
+ /*
+ * If the BIOS only described a different SDVO device, use the
* address that it isn't using.
*/
if (other_mapping->slave_addr) {
@@ -2389,7 +2418,8 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
return 0x70;
}
- /* No SDVO device info is found for another DVO port,
+ /*
+ * No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
if (sdvo->port == PORT_B)
@@ -2490,10 +2520,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
if (intel_sdvo_get_hotplug_support(intel_sdvo) &
intel_sdvo_connector->output_flag) {
intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
- /* Some SDVO devices have one-shot hotplug interrupts.
+ /*
+ * Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
- intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+ intel_encoder->hotplug = intel_sdvo_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
} else {
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
@@ -2789,7 +2820,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
to_intel_sdvo_connector_state(conn_state);
uint16_t response, data_value[2];
- /* when horizontal overscan is supported, Add the left/right property */
+ /* when horizontal overscan is supported, Add the left/right property */
if (enhancements.overscan_h) {
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_MAX_OVERSCAN_H,
@@ -3074,7 +3105,8 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
goto err_output;
}
- /* Only enable the hotplug irq if we need it, to work around noisy
+ /*
+ * Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
if (intel_sdvo->hotplug_active) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 3714836879b2..dbdcf85032df 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1139,8 +1139,8 @@ intel_check_sprite_plane(struct intel_plane *plane,
return 0;
}
-int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
@@ -1153,6 +1153,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
/* ignore the pointless "none" flag */
set->flags &= ~I915_SET_COLORKEY_NONE;
+ if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b3dabc219e6a..885fc3809f7f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -43,7 +43,6 @@ enum tv_margin {
TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
};
-/** Private structure for the integrated TV support */
struct intel_tv {
struct intel_encoder base;
@@ -370,12 +369,11 @@ struct tv_mode {
* The constants below were all computed using a 107.520MHz clock
*/
-/**
+/*
* Register programming values for TV modes.
*
* These values account for -1s required.
*/
-
static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-M",
@@ -1126,14 +1124,6 @@ static const struct drm_display_mode reported_modes[] = {
},
};
-/**
- * Detects TV presence by checking for load.
- *
- * Requires that the current pipe's DPLL is active.
-
- * \return true if TV is connected.
- * \return false if TV is disconnected.
- */
static int
intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
@@ -1259,12 +1249,6 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
connector->state->tv.mode = i;
}
-/**
- * Detect the TV connection.
- *
- * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
- * we have a pipe programmed in order to probe the TV.
- */
static int
intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
@@ -1339,13 +1323,6 @@ intel_tv_choose_preferred_modes(const struct tv_mode *tv_mode,
}
}
-/**
- * Stub get_modes function.
- *
- * This should probably return a set of fixed modes, unless we can figure out
- * how to probe modes off of TV connections.
- */
-
static int
intel_tv_get_modes(struct drm_connector *connector)
{
@@ -1512,7 +1489,8 @@ intel_tv_init(struct drm_i915_private *dev_priv)
connector = &intel_connector->base;
state = connector->state;
- /* The documentation, for the older chipsets at least, recommend
+ /*
+ * The documentation, for the older chipsets at least, recommend
* using a polling method rather than hotplug detection for TVs.
* This is because in order to perform the hotplug detection, the PLLs
* for the TV must be kept alive increasing power drain and starving
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 9f1bac6398fb..e5bf0d37bf43 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -361,7 +361,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
goto err_out;
if (USES_HUC(dev_priv)) {
- ret = intel_huc_init_hw(huc);
+ ret = intel_huc_fw_upload(huc);
if (ret)
goto err_out;
}
@@ -445,3 +445,48 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
if (USES_GUC_SUBMISSION(dev_priv))
gen9_disable_guc_interrupts(dev_priv);
}
+
+int intel_uc_suspend(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+ int err;
+
+ if (!USES_GUC(i915))
+ return 0;
+
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return 0;
+
+ err = intel_guc_suspend(guc);
+ if (err) {
+ DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+ return err;
+ }
+
+ gen9_disable_guc_interrupts(i915);
+
+ return 0;
+}
+
+int intel_uc_resume(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+ int err;
+
+ if (!USES_GUC(i915))
+ return 0;
+
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return 0;
+
+ if (i915_modparams.guc_log_level)
+ gen9_enable_guc_interrupts(i915);
+
+ err = intel_guc_resume(guc);
+ if (err) {
+ DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index f2984e01e257..f76d51d1ce70 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -39,6 +39,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_uc_init(struct drm_i915_private *dev_priv);
void intel_uc_fini(struct drm_i915_private *dev_priv);
+int intel_uc_suspend(struct drm_i915_private *dev_priv);
+int intel_uc_resume(struct drm_i915_private *dev_priv);
static inline bool intel_uc_is_using_guc(void)
{
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index 784eff9cdfc8..3ec0ce505b76 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -197,11 +197,12 @@ fail:
/**
* intel_uc_fw_upload - load uC firmware using custom loader
- *
* @uc_fw: uC firmware
- * @loader: custom uC firmware loader function
+ * @xfer: custom uC firmware loader function
*
* Loads uC firmware using custom loader and updates internal flags.
+ *
+ * Return: 0 on success, non-zero on failure.
*/
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
int (*xfer)(struct intel_uc_fw *uc_fw,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 164dbb8cfa36..4df7c2ef8576 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -37,6 +37,12 @@ static const char * const forcewake_domain_names[] = {
"render",
"blitter",
"media",
+ "vdbox0",
+ "vdbox1",
+ "vdbox2",
+ "vdbox3",
+ "vebox0",
+ "vebox1",
};
const char *
@@ -774,6 +780,9 @@ void assert_forcewakes_active(struct drm_i915_private *dev_priv,
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
+#define GEN11_NEEDS_FORCE_WAKE(reg) \
+ ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
+
#define __gen6_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd; \
@@ -826,6 +835,14 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
if (!entry)
return 0;
+ /*
+ * The list of FW domains depends on the SKU in gen11+ so we
+ * can't determine it statically. We use FORCEWAKE_ALL and
+ * translate it here to the list of available domains.
+ */
+ if (entry->domains == FORCEWAKE_ALL)
+ return dev_priv->uncore.fw_domains;
+
WARN(entry->domains & ~dev_priv->uncore.fw_domains,
"Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
entry->domains & ~dev_priv->uncore.fw_domains, offset);
@@ -860,6 +877,14 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
__fwd; \
})
+#define __gen11_fwtable_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (GEN11_NEEDS_FORCE_WAKE((offset))) \
+ __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd; \
+})
+
/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
@@ -871,6 +896,20 @@ static const i915_reg_t gen8_shadowed_regs[] = {
/* TODO: Other registers are not yet used */
};
+static const i915_reg_t gen11_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
+ RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
+ RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
+ RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
+ RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
+ RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
+ RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
+ /* TODO: Other registers are not yet used */
+};
+
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
{
u32 offset = i915_mmio_reg_offset(*reg);
@@ -883,14 +922,17 @@ static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
return 0;
}
-static bool is_gen8_shadowed(u32 offset)
-{
- const i915_reg_t *regs = gen8_shadowed_regs;
-
- return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
- mmio_reg_cmp);
+#define __is_genX_shadowed(x) \
+static bool is_gen##x##_shadowed(u32 offset) \
+{ \
+ const i915_reg_t *regs = gen##x##_shadowed_regs; \
+ return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
+ mmio_reg_cmp); \
}
+__is_genX_shadowed(8)
+__is_genX_shadowed(11)
+
#define __gen8_reg_write_fw_domains(offset) \
({ \
enum forcewake_domains __fwd; \
@@ -929,6 +971,14 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
__fwd; \
})
+#define __gen11_fwtable_reg_write_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
+ __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd; \
+})
+
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
@@ -965,6 +1015,40 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen11_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x40000, 0x1bffff, 0),
+ GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
+ GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
+ GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
+ GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
+ GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
+};
+
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -1095,7 +1179,12 @@ func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) {
}
#define __gen6_read(x) __gen_read(gen6, x)
#define __fwtable_read(x) __gen_read(fwtable, x)
+#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
+__gen11_fwtable_read(8)
+__gen11_fwtable_read(16)
+__gen11_fwtable_read(32)
+__gen11_fwtable_read(64)
__fwtable_read(8)
__fwtable_read(16)
__fwtable_read(32)
@@ -1105,6 +1194,7 @@ __gen6_read(16)
__gen6_read(32)
__gen6_read(64)
+#undef __gen11_fwtable_read
#undef __fwtable_read
#undef __gen6_read
#undef GEN6_READ_FOOTER
@@ -1181,7 +1271,11 @@ func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, boo
}
#define __gen8_write(x) __gen_write(gen8, x)
#define __fwtable_write(x) __gen_write(fwtable, x)
+#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
+__gen11_fwtable_write(8)
+__gen11_fwtable_write(16)
+__gen11_fwtable_write(32)
__fwtable_write(8)
__fwtable_write(16)
__fwtable_write(32)
@@ -1192,6 +1286,7 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
+#undef __gen11_fwtable_write
#undef __fwtable_write
#undef __gen8_write
#undef __gen6_write
@@ -1240,6 +1335,13 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
+
d->mask = BIT(domain_id);
@@ -1267,7 +1369,34 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
}
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ int i;
+
+ dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_RENDER_GEN9,
+ FORCEWAKE_ACK_RENDER_GEN9);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ FORCEWAKE_BLITTER_GEN9,
+ FORCEWAKE_ACK_BLITTER_GEN9);
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ continue;
+
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
+ FORCEWAKE_MEDIA_VDBOX_GEN11(i),
+ FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
+ }
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ continue;
+
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
+ FORCEWAKE_MEDIA_VEBOX_GEN11(i),
+ FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
+ }
+ } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1422,10 +1551,14 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
}
- } else {
+ } else if (IS_GEN(dev_priv, 9, 10)) {
ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ } else {
+ ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
}
iosf_mbi_register_pmic_bus_access_notifier(
@@ -1522,9 +1655,11 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
engine->name);
I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
+ POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
I915_WRITE_FW(RING_HEAD(base), 0);
I915_WRITE_FW(RING_TAIL(base), 0);
+ POSTING_READ_FW(RING_TAIL(base));
/* The ring must be empty before it is disabled */
I915_WRITE_FW(RING_CTL(base), 0);
@@ -1548,24 +1683,31 @@ static void i915_stop_engines(struct drm_i915_private *dev_priv,
gen3_stop_engine(engine);
}
-static bool i915_reset_complete(struct pci_dev *pdev)
+static bool i915_in_reset(struct pci_dev *pdev)
{
u8 gdrst;
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_STATUS) == 0;
+ return gdrst & GRDOM_RESET_STATUS;
}
static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
+ int err;
- /* assert reset for at least 20 usec */
+ /* Assert reset for at least 20 usec, and wait for acknowledgement. */
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
usleep_range(50, 200);
+ err = wait_for(i915_in_reset(pdev), 500);
+
+ /* Clear the reset request. */
pci_write_config_byte(pdev, I915_GDRST, 0);
+ usleep_range(50, 200);
+ if (!err)
+ err = wait_for(!i915_in_reset(pdev), 500);
- return wait_for(i915_reset_complete(pdev), 500);
+ return err;
}
static bool g4x_reset_complete(struct pci_dev *pdev)
@@ -1874,9 +2016,9 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
if (!i915_modparams.reset)
return NULL;
- if (INTEL_INFO(dev_priv)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return gen8_reset_engines;
- else if (INTEL_INFO(dev_priv)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
return gen6_reset_engines;
else if (IS_GEN5(dev_priv))
return ironlake_do_reset;
@@ -1884,7 +2026,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
return g4x_do_reset;
else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
return g33_do_reset;
- else if (INTEL_INFO(dev_priv)->gen >= 3)
+ else if (INTEL_GEN(dev_priv) >= 3)
return i915_do_reset;
else
return NULL;
@@ -1985,7 +2127,9 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (HAS_FWTABLE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ fw_domains = __gen11_fwtable_reg_read_fw_domains(offset);
+ } else if (HAS_FWTABLE(dev_priv)) {
fw_domains = __fwtable_reg_read_fw_domains(offset);
} else if (INTEL_GEN(dev_priv) >= 6) {
fw_domains = __gen6_reg_read_fw_domains(offset);
@@ -2006,7 +2150,9 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
+ } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
fw_domains = __fwtable_reg_write_fw_domains(offset);
} else if (IS_GEN8(dev_priv)) {
fw_domains = __gen8_reg_write_fw_domains(offset);
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index bed019ef000f..dfdf444e4bcc 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -37,17 +37,28 @@ enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
FW_DOMAIN_ID_BLITTER,
FW_DOMAIN_ID_MEDIA,
+ FW_DOMAIN_ID_MEDIA_VDBOX0,
+ FW_DOMAIN_ID_MEDIA_VDBOX1,
+ FW_DOMAIN_ID_MEDIA_VDBOX2,
+ FW_DOMAIN_ID_MEDIA_VDBOX3,
+ FW_DOMAIN_ID_MEDIA_VEBOX0,
+ FW_DOMAIN_ID_MEDIA_VEBOX1,
FW_DOMAIN_ID_COUNT
};
enum forcewake_domains {
- FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
- FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
- FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
- FORCEWAKE_ALL = (FORCEWAKE_RENDER |
- FORCEWAKE_BLITTER |
- FORCEWAKE_MEDIA)
+ FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
+ FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
+ FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
+ FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0),
+ FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
+ FORCEWAKE_MEDIA_VDBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2),
+ FORCEWAKE_MEDIA_VDBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3),
+ FORCEWAKE_MEDIA_VEBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0),
+ FORCEWAKE_MEDIA_VEBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1),
+
+ FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1
};
struct intel_uncore_funcs {
@@ -198,4 +209,9 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
2, timeout_ms, NULL);
}
+#define raw_reg_read(base, reg) \
+ readl(base + i915_mmio_reg_offset(reg))
+#define raw_reg_write(base, reg, value) \
+ writel(value, base + i915_mmio_reg_offset(reg))
+
#endif /* !__INTEL_UNCORE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index a2632df39173..391f3d9ffdf1 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -129,8 +129,8 @@ huge_gem_object(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
i915_gem_object_init(obj, &huge_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->scratch = phys_size;
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 2ea69394f428..05bbef363fff 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -178,8 +178,8 @@ huge_pages_object(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &huge_page_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
obj->mm.page_mask = page_mask;
@@ -329,8 +329,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
else
i915_gem_object_init(obj, &fake_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
return obj;
@@ -964,7 +964,7 @@ static int gpu_write(struct i915_vma *vma,
u32 dword,
u32 value)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *batch;
int flags = 0;
int err;
@@ -975,7 +975,7 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -1003,7 +1003,7 @@ static int gpu_write(struct i915_vma *vma,
reservation_object_unlock(vma->resv);
err_request:
- __i915_add_request(rq, err == 0);
+ __i915_request_add(rq, err == 0);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 7a0d1e17c1ad..340a98c0c804 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -178,7 +178,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
u32 v)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
@@ -191,7 +191,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -199,7 +199,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
i915_vma_unpin(vma);
return PTR_ERR(cs);
}
@@ -229,7 +229,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
return 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 56a803d11916..7ecaed50d0b9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -114,7 +114,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
unsigned int flags;
@@ -152,7 +152,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
goto err_vma;
}
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
@@ -180,12 +180,12 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
return 0;
err_request:
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
err_batch:
i915_vma_unpin(batch);
err_vma:
@@ -215,8 +215,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
}
i915_gem_obj_finish_shmem_access(obj);
- obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
+ obj->write_domain = 0;
return 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index e1ddad635d73..ab9d7bee0aae 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -407,7 +407,7 @@ static int igt_evict_contexts(void *arg)
mutex_lock(&i915->drm.struct_mutex);
onstack_fence_init(&fence);
do {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_gem_context *ctx;
ctx = live_context(i915, file);
@@ -416,7 +416,7 @@ static int igt_evict_contexts(void *arg)
/* We will need some GGTT space for the rq's context */
igt_evict_ctl.fail_if_busy = true;
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
igt_evict_ctl.fail_if_busy = false;
if (IS_ERR(rq)) {
@@ -437,7 +437,7 @@ static int igt_evict_contexts(void *arg)
if (err < 0)
break;
- i915_add_request(rq);
+ i915_request_add(rq);
count++;
err = 0;
} while(1);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index d8064276431c..f7dc926f4ef1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -113,8 +113,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &fake_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
/* Preallocate the "backing storage" */
@@ -927,7 +927,7 @@ static int shrink_boom(struct drm_i915_private *i915,
explode = fake_dma_object(i915, size);
if (IS_ERR(explode)) {
- err = PTR_ERR(purge);
+ err = PTR_ERR(explode);
goto err_purge;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index f32aa6bb79e2..fbdb2419d418 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -212,8 +212,11 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return -EINTR;
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
- if (err)
+ if (err) {
+ pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
+ tile->tiling, tile->stride, err);
return err;
+ }
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
@@ -230,13 +233,16 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
GEM_BUG_ON(view.partial.size > nreal);
err = i915_gem_object_set_to_gtt_domain(obj, true);
- if (err)
+ if (err) {
+ pr_err("Failed to flush to GTT write domain; err=%d\n",
+ err);
return err;
+ }
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
- pr_err("Failed to pin partial view: offset=%lu\n",
- page);
+ pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(vma));
return PTR_ERR(vma);
}
@@ -246,8 +252,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
io = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(io)) {
- pr_err("Failed to iomap partial view: offset=%lu\n",
- page);
+ pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(io));
return PTR_ERR(io);
}
@@ -430,7 +436,7 @@ out:
static int make_obj_busy(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *vma;
int err;
@@ -442,14 +448,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
}
i915_vma_move_to_active(vma, rq, 0);
- i915_add_request(rq);
+ i915_request_add(rq);
i915_gem_object_set_active_reference(obj);
i915_vma_unpin(vma);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 088f45bc6199..9c76f0305b6a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -11,7 +11,7 @@
*/
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
-selftest(requests, i915_gem_request_live_selftests)
+selftest(requests, i915_request_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 19c6fce837df..9a48aa441743 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -16,7 +16,7 @@ selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
selftest(timelines, i915_gem_timeline_mock_selftests)
-selftest(requests, i915_gem_request_mock_selftests)
+selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 647bf2bbd799..94bc2e1898a4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -32,7 +32,7 @@
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err = -ENOMEM;
/* Basic preliminary test to create a request and let it loose! */
@@ -44,7 +44,7 @@ static int igt_add_request(void *arg)
if (!request)
goto out_unlock;
- i915_add_request(request);
+ i915_request_add(request);
err = 0;
out_unlock:
@@ -56,7 +56,7 @@ static int igt_wait_request(void *arg)
{
const long T = HZ / 4;
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err = -EINVAL;
/* Submit a request, then wait upon it */
@@ -68,49 +68,49 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
- if (i915_gem_request_completed(request)) {
+ if (i915_request_completed(request)) {
pr_err("request completed before submit!!\n");
goto out_unlock;
}
- i915_add_request(request);
+ i915_request_add(request);
- if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
goto out_unlock;
}
- if (i915_gem_request_completed(request)) {
+ if (i915_request_completed(request)) {
pr_err("request completed immediately!\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
pr_err("request wait timed out!\n");
goto out_unlock;
}
- if (!i915_gem_request_completed(request)) {
+ if (!i915_request_completed(request)) {
pr_err("request not complete after waiting!\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
goto out_unlock;
}
@@ -126,7 +126,7 @@ static int igt_fence_wait(void *arg)
{
const long T = HZ / 4;
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err = -EINVAL;
/* Submit a request, treat it as a fence and wait upon it */
@@ -145,7 +145,7 @@ static int igt_fence_wait(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
- i915_add_request(request);
+ i915_request_add(request);
mutex_unlock(&i915->drm.struct_mutex);
if (dma_fence_is_signaled(&request->fence)) {
@@ -185,7 +185,7 @@ out_locked:
static int igt_request_rewind(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request, *vip;
+ struct i915_request *request, *vip;
struct i915_gem_context *ctx[2];
int err = -EINVAL;
@@ -197,8 +197,8 @@ static int igt_request_rewind(void *arg)
goto err_context_0;
}
- i915_gem_request_get(request);
- i915_add_request(request);
+ i915_request_get(request);
+ i915_request_add(request);
ctx[1] = mock_context(i915, "B");
vip = mock_request(i915->engine[RCS], ctx[1], 0);
@@ -210,35 +210,35 @@ static int igt_request_rewind(void *arg)
/* Simulate preemption by manual reordering */
if (!mock_cancel_request(request)) {
pr_err("failed to cancel request (already executed)!\n");
- i915_add_request(vip);
+ i915_request_add(vip);
goto err_context_1;
}
- i915_gem_request_get(vip);
- i915_add_request(vip);
+ i915_request_get(vip);
+ i915_request_add(vip);
rcu_read_lock();
request->engine->submit_request(request);
rcu_read_unlock();
mutex_unlock(&i915->drm.struct_mutex);
- if (i915_wait_request(vip, 0, HZ) == -ETIME) {
+ if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
goto err;
}
- if (i915_gem_request_completed(request)) {
+ if (i915_request_completed(request)) {
pr_err("low priority request already completed\n");
goto err;
}
err = 0;
err:
- i915_gem_request_put(vip);
+ i915_request_put(vip);
mutex_lock(&i915->drm.struct_mutex);
err_context_1:
mock_context_close(ctx[1]);
- i915_gem_request_put(request);
+ i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
mock_device_flush(i915);
@@ -246,7 +246,7 @@ err_context_0:
return err;
}
-int i915_gem_request_mock_selftests(void)
+int i915_request_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_add_request),
@@ -303,7 +303,7 @@ static int end_live_test(struct live_test *t)
{
struct drm_i915_private *i915 = t->i915;
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
if (wait_for(intel_engines_are_idle(i915), 10)) {
pr_err("%s(%s): GPU not idle\n", t->func, t->name);
@@ -343,7 +343,7 @@ static int live_nop_request(void *arg)
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
unsigned long n, prime;
ktime_t times[2] = {};
@@ -355,8 +355,8 @@ static int live_nop_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
- request = i915_gem_request_alloc(engine,
- i915->kernel_context);
+ request = i915_request_alloc(engine,
+ i915->kernel_context);
if (IS_ERR(request)) {
err = PTR_ERR(request);
goto out_unlock;
@@ -375,9 +375,9 @@ static int live_nop_request(void *arg)
* for latency.
*/
- i915_add_request(request);
+ i915_request_add(request);
}
- i915_wait_request(request,
+ i915_request_wait(request,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
@@ -447,15 +447,14 @@ err:
return ERR_PTR(err);
}
-static struct drm_i915_gem_request *
+static struct i915_request *
empty_request(struct intel_engine_cs *engine,
struct i915_vma *batch)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err;
- request = i915_gem_request_alloc(engine,
- engine->i915->kernel_context);
+ request = i915_request_alloc(engine, engine->i915->kernel_context);
if (IS_ERR(request))
return request;
@@ -467,7 +466,7 @@ empty_request(struct intel_engine_cs *engine,
goto out_request;
out_request:
- __i915_add_request(request, err == 0);
+ __i915_request_add(request, err == 0);
return err ? ERR_PTR(err) : request;
}
@@ -495,7 +494,7 @@ static int live_empty_request(void *arg)
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
unsigned long n, prime;
ktime_t times[2] = {};
@@ -509,7 +508,7 @@ static int live_empty_request(void *arg)
err = PTR_ERR(request);
goto out_batch;
}
- i915_wait_request(request,
+ i915_request_wait(request,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
@@ -523,7 +522,7 @@ static int live_empty_request(void *arg)
goto out_batch;
}
}
- i915_wait_request(request,
+ i915_request_wait(request,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
@@ -633,7 +632,7 @@ static int live_all_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request[I915_NUM_ENGINES];
+ struct i915_request *request[I915_NUM_ENGINES];
struct i915_vma *batch;
struct live_test t;
unsigned int id;
@@ -658,8 +657,7 @@ static int live_all_engines(void *arg)
}
for_each_engine(engine, i915, id) {
- request[id] = i915_gem_request_alloc(engine,
- i915->kernel_context);
+ request[id] = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed with err=%d\n",
@@ -680,12 +678,12 @@ static int live_all_engines(void *arg)
}
i915_vma_move_to_active(batch, request[id], 0);
- i915_gem_request_get(request[id]);
- i915_add_request(request[id]);
+ i915_request_get(request[id]);
+ i915_request_add(request[id]);
}
for_each_engine(engine, i915, id) {
- if (i915_gem_request_completed(request[id])) {
+ if (i915_request_completed(request[id])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
@@ -702,7 +700,7 @@ static int live_all_engines(void *arg)
for_each_engine(engine, i915, id) {
long timeout;
- timeout = i915_wait_request(request[id],
+ timeout = i915_request_wait(request[id],
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
@@ -712,8 +710,8 @@ static int live_all_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_gem_request_completed(request[id]));
- i915_gem_request_put(request[id]);
+ GEM_BUG_ON(!i915_request_completed(request[id]));
+ i915_request_put(request[id]);
request[id] = NULL;
}
@@ -722,7 +720,7 @@ static int live_all_engines(void *arg)
out_request:
for_each_engine(engine, i915, id)
if (request[id])
- i915_gem_request_put(request[id]);
+ i915_request_put(request[id]);
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
@@ -733,8 +731,8 @@ out_unlock:
static int live_sequential_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
- struct drm_i915_gem_request *prev = NULL;
+ struct i915_request *request[I915_NUM_ENGINES] = {};
+ struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
struct live_test t;
unsigned int id;
@@ -763,8 +761,7 @@ static int live_sequential_engines(void *arg)
goto out_unlock;
}
- request[id] = i915_gem_request_alloc(engine,
- i915->kernel_context);
+ request[id] = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
@@ -773,10 +770,10 @@ static int live_sequential_engines(void *arg)
}
if (prev) {
- err = i915_gem_request_await_dma_fence(request[id],
- &prev->fence);
+ err = i915_request_await_dma_fence(request[id],
+ &prev->fence);
if (err) {
- i915_add_request(request[id]);
+ i915_request_add(request[id]);
pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
@@ -794,8 +791,8 @@ static int live_sequential_engines(void *arg)
i915_gem_object_set_active_reference(batch->obj);
i915_vma_get(batch);
- i915_gem_request_get(request[id]);
- i915_add_request(request[id]);
+ i915_request_get(request[id]);
+ i915_request_add(request[id]);
prev = request[id];
}
@@ -803,7 +800,7 @@ static int live_sequential_engines(void *arg)
for_each_engine(engine, i915, id) {
long timeout;
- if (i915_gem_request_completed(request[id])) {
+ if (i915_request_completed(request[id])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
@@ -817,7 +814,7 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- timeout = i915_wait_request(request[id],
+ timeout = i915_request_wait(request[id],
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
@@ -827,7 +824,7 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+ GEM_BUG_ON(!i915_request_completed(request[id]));
}
err = end_live_test(&t);
@@ -849,14 +846,14 @@ out_request:
}
i915_vma_put(request[id]->batch);
- i915_gem_request_put(request[id]);
+ i915_request_put(request[id]);
}
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
-int i915_gem_request_live_selftests(struct drm_i915_private *i915)
+int i915_request_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_request),
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 3f9016466dea..fb74e2cf8a0a 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -87,7 +87,7 @@ static int validate_client(struct intel_guc_client *client,
static bool client_doorbell_in_sync(struct intel_guc_client *client)
{
- return doorbell_ok(client->guc, client->doorbell_id);
+ return !client || doorbell_ok(client->guc, client->doorbell_id);
}
/*
@@ -137,7 +137,6 @@ static int igt_guc_clients(void *args)
goto unlock;
}
GEM_BUG_ON(!guc->execbuf_client);
- GEM_BUG_ON(!guc->preempt_client);
err = validate_client(guc->execbuf_client,
GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
@@ -146,16 +145,18 @@ static int igt_guc_clients(void *args)
goto out;
}
- err = validate_client(guc->preempt_client,
- GUC_CLIENT_PRIORITY_KMD_HIGH, true);
- if (err) {
- pr_err("preempt client validation failed\n");
- goto out;
+ if (guc->preempt_client) {
+ err = validate_client(guc->preempt_client,
+ GUC_CLIENT_PRIORITY_KMD_HIGH, true);
+ if (err) {
+ pr_err("preempt client validation failed\n");
+ goto out;
+ }
}
/* each client should now have reserved a doorbell */
if (!has_doorbell(guc->execbuf_client) ||
- !has_doorbell(guc->preempt_client)) {
+ (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
pr_err("guc_clients_create didn't reserve doorbells\n");
err = -EINVAL;
goto out;
@@ -224,7 +225,8 @@ out:
* clients during unload.
*/
destroy_doorbell(guc->execbuf_client);
- destroy_doorbell(guc->preempt_client);
+ if (guc->preempt_client)
+ destroy_doorbell(guc->preempt_client);
guc_clients_destroy(guc);
guc_clients_create(guc);
guc_clients_doorbell_init(guc);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index d1d2c2456f69..df7898c8edcb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -92,13 +92,13 @@ err_ctx:
}
static u64 hws_address(const struct i915_vma *hws,
- const struct drm_i915_gem_request *rq)
+ const struct i915_request *rq)
{
return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
}
static int emit_recurse_batch(struct hang *h,
- struct drm_i915_gem_request *rq)
+ struct i915_request *rq)
{
struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
@@ -204,10 +204,10 @@ unpin_vma:
return err;
}
-static struct drm_i915_gem_request *
+static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
int err;
if (i915_gem_object_is_active(h->obj)) {
@@ -232,21 +232,20 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
h->batch = vaddr;
}
- rq = i915_gem_request_alloc(engine, h->ctx);
+ rq = i915_request_alloc(engine, h->ctx);
if (IS_ERR(rq))
return rq;
err = emit_recurse_batch(h, rq);
if (err) {
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
return ERR_PTR(err);
}
return rq;
}
-static u32 hws_seqno(const struct hang *h,
- const struct drm_i915_gem_request *rq)
+static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
{
return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
}
@@ -319,7 +318,7 @@ static void hang_fini(struct hang *h)
flush_test(h->i915, I915_WAIT_LOCKED);
}
-static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
+static bool wait_for_hang(struct hang *h, struct i915_request *rq)
{
return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
rq->fence.seqno),
@@ -332,7 +331,7 @@ static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
static int igt_hang_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
@@ -359,17 +358,17 @@ static int igt_hang_sanitycheck(void *arg)
goto fini;
}
- i915_gem_request_get(rq);
+ i915_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
- timeout = i915_wait_request(rq,
+ timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
if (timeout < 0) {
err = timeout;
@@ -485,7 +484,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
if (active) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
mutex_lock(&i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
@@ -495,8 +494,8 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_for_hang(&h, rq)) {
@@ -507,12 +506,12 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
intel_engine_dump(engine, &p,
"%s\n", engine->name);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
err = -EIO;
break;
}
- i915_gem_request_put(rq);
+ i915_request_put(rq);
}
engine->hangcheck.stalled = true;
@@ -577,7 +576,7 @@ static int igt_reset_active_engine(void *arg)
static int active_engine(void *data)
{
struct intel_engine_cs *engine = data;
- struct drm_i915_gem_request *rq[2] = {};
+ struct i915_request *rq[2] = {};
struct i915_gem_context *ctx[2];
struct drm_file *file;
unsigned long count = 0;
@@ -606,29 +605,29 @@ static int active_engine(void *data)
while (!kthread_should_stop()) {
unsigned int idx = count++ & 1;
- struct drm_i915_gem_request *old = rq[idx];
- struct drm_i915_gem_request *new;
+ struct i915_request *old = rq[idx];
+ struct i915_request *new;
mutex_lock(&engine->i915->drm.struct_mutex);
- new = i915_gem_request_alloc(engine, ctx[idx]);
+ new = i915_request_alloc(engine, ctx[idx]);
if (IS_ERR(new)) {
mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
break;
}
- rq[idx] = i915_gem_request_get(new);
- i915_add_request(new);
+ rq[idx] = i915_request_get(new);
+ i915_request_add(new);
mutex_unlock(&engine->i915->drm.struct_mutex);
if (old) {
- i915_wait_request(old, 0, MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(old);
+ i915_request_wait(old, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(old);
}
}
for (count = 0; count < ARRAY_SIZE(rq); count++)
- i915_gem_request_put(rq[count]);
+ i915_request_put(rq[count]);
err_file:
mock_file_free(engine->i915, file);
@@ -692,7 +691,7 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
if (active) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
mutex_lock(&i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
@@ -702,8 +701,8 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
break;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_for_hang(&h, rq)) {
@@ -714,12 +713,12 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
intel_engine_dump(engine, &p,
"%s\n", engine->name);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
err = -EIO;
break;
}
- i915_gem_request_put(rq);
+ i915_request_put(rq);
}
engine->hangcheck.stalled = true;
@@ -814,7 +813,7 @@ static int igt_reset_active_engine_others(void *arg)
return __igt_reset_engine_others(arg, true);
}
-static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
+static u32 fake_hangcheck(struct i915_request *rq)
{
u32 reset_count;
@@ -832,7 +831,7 @@ static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
static int igt_wait_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
@@ -856,8 +855,8 @@ static int igt_wait_reset(void *arg)
goto fini;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -875,9 +874,9 @@ static int igt_wait_reset(void *arg)
reset_count = fake_hangcheck(rq);
- timeout = i915_wait_request(rq, I915_WAIT_LOCKED, 10);
+ timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
if (timeout < 0) {
- pr_err("i915_wait_request failed on a stuck request: err=%ld\n",
+ pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
timeout);
err = timeout;
goto out_rq;
@@ -891,7 +890,7 @@ static int igt_wait_reset(void *arg)
}
out_rq:
- i915_gem_request_put(rq);
+ i915_request_put(rq);
fini:
hang_fini(&h);
unlock:
@@ -922,7 +921,7 @@ static int igt_reset_queue(void *arg)
goto unlock;
for_each_engine(engine, i915, id) {
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
@@ -935,12 +934,12 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- i915_gem_request_get(prev);
- __i915_add_request(prev, true);
+ i915_request_get(prev);
+ __i915_request_add(prev, true);
count = 0;
do {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int reset_count;
rq = hang_create_request(&h, engine);
@@ -949,8 +948,8 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
if (!wait_for_hang(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -960,8 +959,8 @@ static int igt_reset_queue(void *arg)
intel_engine_dump(prev->engine, &p,
"%s\n", prev->engine->name);
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
i915_reset(i915, 0);
i915_gem_set_wedged(i915);
@@ -980,8 +979,8 @@ static int igt_reset_queue(void *arg)
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
prev->fence.error);
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
err = -EINVAL;
goto fini;
}
@@ -989,21 +988,21 @@ static int igt_reset_queue(void *arg)
if (rq->fence.error) {
pr_err("Fence error status not zero [%d] after unrelated reset\n",
rq->fence.error);
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
err = -EINVAL;
goto fini;
}
if (i915_reset_count(&i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
err = -EINVAL;
goto fini;
}
- i915_gem_request_put(prev);
+ i915_request_put(prev);
prev = rq;
count++;
} while (time_before(jiffies, end_time));
@@ -1012,7 +1011,7 @@ static int igt_reset_queue(void *arg)
*h.batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
- i915_gem_request_put(prev);
+ i915_request_put(prev);
err = flush_test(i915, I915_WAIT_LOCKED);
if (err)
@@ -1036,7 +1035,7 @@ static int igt_handle_error(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine = i915->engine[RCS];
struct hang h;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_gpu_state *error;
int err;
@@ -1060,8 +1059,8 @@ static int igt_handle_error(void *arg)
goto err_fini;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -1098,7 +1097,7 @@ static int igt_handle_error(void *arg)
}
err_request:
- i915_gem_request_put(rq);
+ i915_request_put(rq);
err_fini:
hang_fini(&h);
err_unlock:
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 2f6367643171..f76f2597df5c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -61,20 +61,30 @@ static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
static int intel_shadow_table_check(void)
{
- const i915_reg_t *reg = gen8_shadowed_regs;
- unsigned int i;
+ struct {
+ const i915_reg_t *regs;
+ unsigned int size;
+ } reg_lists[] = {
+ { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
+ { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
+ };
+ const i915_reg_t *reg;
+ unsigned int i, j;
s32 prev;
- for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
- u32 offset = i915_mmio_reg_offset(*reg);
+ for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) {
+ reg = reg_lists[j].regs;
+ for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) {
+ u32 offset = i915_mmio_reg_offset(*reg);
- if (prev >= (s32)offset) {
- pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
- __func__, i, offset, prev);
- return -EINVAL;
- }
+ if (prev >= (s32)offset) {
+ pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
+ __func__, i, offset, prev);
+ return -EINVAL;
+ }
- prev = offset;
+ prev = offset;
+ }
}
return 0;
@@ -90,6 +100,7 @@ int intel_uncore_mock_selftests(void)
{ __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
+ { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
};
int err, i;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 55c0e2c15782..78a89efa1119 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -81,7 +81,7 @@ static void mock_context_unpin(struct intel_engine_cs *engine,
i915_gem_context_put(ctx);
}
-static int mock_request_alloc(struct drm_i915_gem_request *request)
+static int mock_request_alloc(struct i915_request *request)
{
struct mock_request *mock = container_of(request, typeof(*mock), base);
@@ -91,24 +91,24 @@ static int mock_request_alloc(struct drm_i915_gem_request *request)
return 0;
}
-static int mock_emit_flush(struct drm_i915_gem_request *request,
+static int mock_emit_flush(struct i915_request *request,
unsigned int flags)
{
return 0;
}
-static void mock_emit_breadcrumb(struct drm_i915_gem_request *request,
+static void mock_emit_breadcrumb(struct i915_request *request,
u32 *flags)
{
}
-static void mock_submit_request(struct drm_i915_gem_request *request)
+static void mock_submit_request(struct i915_request *request)
{
struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
- i915_gem_request_submit(request);
+ i915_request_submit(request);
GEM_BUG_ON(!request->global_seqno);
spin_lock_irq(&engine->hw_lock);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 1bc61f3f76fc..e6d4b882599a 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -43,7 +43,7 @@ void mock_device_flush(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
mock_engine_flush(engine);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
}
static void mock_device_release(struct drm_device *dev)
@@ -243,16 +243,10 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->kernel_context)
goto err_engine;
- i915->preempt_context = mock_context(i915, NULL);
- if (!i915->preempt_context)
- goto err_kernel_context;
-
WARN_ON(i915_gemfs_init(i915));
return i915;
-err_kernel_context:
- i915_gem_context_put(i915->kernel_context);
err_engine:
for_each_engine(engine, i915, id)
mock_engine_free(engine);
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 8097e3693ec4..0dc29e242597 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -25,16 +25,16 @@
#include "mock_engine.h"
#include "mock_request.h"
-struct drm_i915_gem_request *
+struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
unsigned long delay)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
struct mock_request *mock;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
- request = i915_gem_request_alloc(engine, context);
+ request = i915_request_alloc(engine, context);
if (IS_ERR(request))
return NULL;
@@ -44,7 +44,7 @@ mock_request(struct intel_engine_cs *engine,
return &mock->base;
}
-bool mock_cancel_request(struct drm_i915_gem_request *request)
+bool mock_cancel_request(struct i915_request *request)
{
struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
@@ -57,7 +57,7 @@ bool mock_cancel_request(struct drm_i915_gem_request *request)
spin_unlock_irq(&engine->hw_lock);
if (was_queued)
- i915_gem_request_unsubmit(request);
+ i915_request_unsubmit(request);
return was_queued;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 4dea74c8e96d..995fb728380c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -27,20 +27,20 @@
#include <linux/list.h>
-#include "../i915_gem_request.h"
+#include "../i915_request.h"
struct mock_request {
- struct drm_i915_gem_request base;
+ struct i915_request base;
struct list_head link;
unsigned long delay;
};
-struct drm_i915_gem_request *
+struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
unsigned long delay);
-bool mock_cancel_request(struct drm_i915_gem_request *request);
+bool mock_cancel_request(struct i915_request *request);
#endif /* !__MOCK_REQUEST__ */