summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-03-08 10:50:37 +1000
committerDave Airlie <airlied@redhat.com>2016-03-08 10:50:37 +1000
commit507d44a9e1bb01661c75b88fd866d2461ab41c9c (patch)
tree75ae546cddd02379de3eca3497a042e9148828f1
parentd8c61663c7a8e5041f5c2a4b142f24d0d7861ad6 (diff)
parent5790ff742b1feee62f60a95f4caf78827f656f58 (diff)
Merge tag 'drm-intel-next-2016-02-29' of git://anongit.freedesktop.org/drm-intel into drm-next
- fbc by default on hsw&bdw, thanks to great work by Paulo! - psr by default hsw,bdw,vlv&chv, thanks to great work by Rodrigo! - fixes to hw state readout vs. rpm issues (Imre) - dc3 fixes&improvements (Mika), this and above already cherr-pick to -fixes - first part of locking fixes from Tvrtko - proper atomic code for load detect (Maarten) - more rpm fixes from Ville - more atomic work from Maarten * tag 'drm-intel-next-2016-02-29' of git://anongit.freedesktop.org/drm-intel: (63 commits) drm/i915: Update DRIVER_DATE to 20160229 drm/i915: Execlists cannot pin a context without the object drm/i915: Reduce the pointer dance of i915_is_ggtt() drm/i915: Rename vma->*_list to *_link for consistency drm/i915: Balance assert_rpm_wakelock_held() for !IS_ENABLED(CONFIG_PM) drm/i915/lrc: Only set RS ctx enable in ctx control reg if there is a RS drm/i915/gen9: Set value of Indirect Context Offset based on gen version drm/i915: Remove update_sprite_watermarks. drm/i915: Kill off intel_crtc->atomic.wait_vblank, v6. drm/i915: Unify power domain handling. drm/i915: Pass crtc state to modeset_get_crtc_power_domains. drm/i915: Add for_each_pipe_masked() drm/i915: Make sure pipe interrupts are processed before turning off power well on BDW+ drm/i915: synchronize_irq() before turning off disp2d power well on VLV/CHV drm/i915: Skip PIPESTAT reads from irq handler on VLV/CHV when power well is down drm/i915/gen9: Write dc state debugmask bits only once drm/i915/gen9: Extend dmc debug mask to include cores drm/i915/gen9: Verify and enforce dc6 state writes drm/i915/gen9: Check for DC state mismatch drm/i915/fbc: enable FBC by default on HSW and BDW ...
-rw-r--r--drivers/gpu/drm/i915/Kconfig11
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c56
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c37
-rw-r--r--drivers/gpu/drm/i915/i915_params.c17
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h27
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c23
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c112
-rw-r--r--drivers/gpu/drm/i915/intel_display.c622
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h14
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c17
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c22
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c190
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c11
42 files changed, 930 insertions, 610 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 4c59793c4ccb..20a5d0455e19 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -45,3 +45,14 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
option changes the default for that module option.
If in doubt, say "N".
+
+config DRM_I915_USERPTR
+ bool "Always enable userptr support"
+ depends on DRM_I915
+ select MMU_NOTIFIER
+ default y
+ help
+ This option selects CONFIG_MMU_NOTIFIER if it isn't already
+ selected to enabled full userptr support.
+
+ If in doubt, say "Y".
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ec0c2a05eed6..a0f1bd711b53 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,9 +117,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
u64 size = 0;
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
- drm_mm_node_allocated(&vma->node))
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
size += vma->node.size;
}
@@ -155,7 +154,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->pin_count > 0)
pin_count++;
}
@@ -164,14 +163,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
- i915_is_ggtt(vma->vm) ? "g" : "pp",
+ vma->is_ggtt ? "g" : "pp",
vma->node.start, vma->node.size);
- if (i915_is_ggtt(vma->vm))
- seq_printf(m, ", type: %u)", vma->ggtt_view.type);
- else
- seq_puts(m, ")");
+ if (vma->is_ggtt)
+ seq_printf(m, ", type: %u", vma->ggtt_view.type);
+ seq_puts(m, ")");
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -230,7 +228,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(vma, head, mm_list) {
+ list_for_each_entry(vma, head, vm_link) {
seq_printf(m, " ");
describe_obj(m, vma->obj);
seq_printf(m, "\n");
@@ -342,13 +340,13 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->shared += obj->base.size;
if (USES_FULL_PPGTT(obj->base.dev)) {
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
struct i915_hw_ppgtt *ppgtt;
if (!drm_mm_node_allocated(&vma->node))
continue;
- if (i915_is_ggtt(vma->vm)) {
+ if (vma->is_ggtt) {
stats->global += obj->base.size;
continue;
}
@@ -454,12 +452,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->active_list, mm_list);
+ count_vmas(&vm->active_list, vm_link);
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->inactive_list, mm_list);
+ count_vmas(&vm->inactive_list, vm_link);
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -825,8 +823,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(pipe))) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -840,6 +841,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Pipe %c IER:\t%08x\n",
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IER(pipe)));
+
+ intel_display_power_put(dev_priv, power_domain);
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -4004,6 +4007,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
pipe));
+ enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -4014,7 +4018,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (pipe_crc->source && source)
return -EINVAL;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -4031,7 +4036,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
if (ret != 0)
- return ret;
+ goto out;
/* none -> real source transition */
if (source) {
@@ -4043,8 +4048,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
sizeof(pipe_crc->entries[0]),
GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
+ if (!entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -4100,7 +4107,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
hsw_enable_ips(crtc);
}
- return 0;
+ ret = 0;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2df2fac04708..1c6d227aae7c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -444,8 +444,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
- i915_gem_cleanup_engines(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_irq:
intel_guc_ucode_fini(dev);
@@ -1256,8 +1256,8 @@ int i915_driver_unload(struct drm_device *dev)
intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
- i915_gem_cleanup_engines(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 44912ecebc1a..20e82008b8b6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -603,13 +603,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_gt_powersave(dev);
- /*
- * Disable CRTCs directly since we want to preserve sw state
- * for _thaw. Also, power gate the CRTC power wells.
- */
- drm_modeset_lock_all(dev);
intel_display_suspend(dev);
- drm_modeset_unlock_all(dev);
intel_dp_mst_suspend(dev);
@@ -764,9 +758,7 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
- drm_modeset_lock_all(dev);
intel_display_resume(dev);
- drm_modeset_unlock_all(dev);
intel_dp_mst_resume(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 64cfd446453c..10480939159c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -59,7 +59,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160214"
+#define DRIVER_DATE "20160229"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -261,6 +261,9 @@ struct i915_hotplug {
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for_each_if ((__mask) & (1 << (__p)))
#define for_each_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
@@ -746,6 +749,7 @@ struct intel_csr {
uint32_t mmio_count;
i915_reg_t mmioaddr[8];
uint32_t mmiodata[8];
+ uint32_t dc_state;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -1848,6 +1852,7 @@ struct drm_i915_private {
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
+ struct drm_atomic_state *modeset_restore_state;
struct list_head vm_list; /* Global list of all address spaces */
struct i915_gtt gtt; /* VM representing the global address space */
@@ -3058,7 +3063,7 @@ int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_cleanup_engines(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct drm_i915_gem_request *req,
@@ -3151,18 +3156,11 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
-static inline bool i915_is_ggtt(struct i915_address_space *vm)
-{
- struct i915_address_space *ggtt =
- &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
- return vm == ggtt;
-}
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
WARN_ON(i915_is_ggtt(vm));
-
return container_of(vm, struct i915_hw_ppgtt, base);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index de57e7f0be0f..3d31d3ac589e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -138,10 +138,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+ list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
- list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+ list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
@@ -272,7 +272,7 @@ drop_pages(struct drm_i915_gem_object *obj)
int ret;
drm_gem_object_reference(&obj->base);
- list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
@@ -489,7 +489,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
*needs_clflush = 0;
- if (!obj->base.filp)
+ if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
return -EINVAL;
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -2416,7 +2416,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
- list_move_tail(&vma->mm_list, &vma->vm->active_list);
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void
@@ -2454,9 +2454,9 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
list_move_tail(&obj->global_list,
&to_i915(obj->base.dev)->mm.bound_list);
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (!list_empty(&vma->mm_list))
- list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!list_empty(&vma->vm_link))
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
}
i915_gem_request_assign(&obj->last_fenced_req, NULL);
@@ -2970,11 +2970,9 @@ i915_gem_retire_requests(struct drm_device *dev)
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
if (i915.enable_execlists) {
- unsigned long flags;
-
- spin_lock_irqsave(&ring->execlist_lock, flags);
+ spin_lock_irq(&ring->execlist_lock);
idle &= list_empty(&ring->execlist_queue);
- spin_unlock_irqrestore(&ring->execlist_lock, flags);
+ spin_unlock_irq(&ring->execlist_lock);
intel_execlists_retire_requests(ring);
}
@@ -3319,7 +3317,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (list_empty(&vma->vma_link))
+ if (list_empty(&vma->obj_link))
return 0;
if (!drm_mm_node_allocated(&vma->node)) {
@@ -3338,8 +3336,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
return ret;
}
- if (i915_is_ggtt(vma->vm) &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+ if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj);
/* release the fence reg _after_ flushing */
@@ -3353,8 +3350,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
vma->vm->unbind_vma(vma);
vma->bound = 0;
- list_del_init(&vma->mm_list);
- if (i915_is_ggtt(vma->vm)) {
+ list_del_init(&vma->vm_link);
+ if (vma->is_ggtt) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
obj->map_and_fenceable = false;
} else if (vma->ggtt_view.pages) {
@@ -3611,7 +3608,7 @@ search_free:
goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&vma->mm_list, &vm->inactive_list);
+ list_add_tail(&vma->vm_link, &vm->inactive_list);
return vma;
@@ -3776,7 +3773,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* And bump the LRU for this access */
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
- list_move_tail(&vma->mm_list,
+ list_move_tail(&vma->vm_link,
&to_i915(obj->base.dev)->gtt.base.inactive_list);
return 0;
@@ -3811,7 +3808,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
- list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -3874,7 +3871,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
*/
}
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -3884,7 +3881,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
}
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
@@ -4558,7 +4555,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
- list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
int ret;
vma->pin_count = 0;
@@ -4615,7 +4612,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
vma->vm == vm)
return vma;
@@ -4632,7 +4629,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
if (WARN_ONCE(!view, "no view specified"))
return ERR_PTR(-EINVAL);
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
@@ -4641,19 +4638,16 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
void i915_gem_vma_destroy(struct i915_vma *vma)
{
- struct i915_address_space *vm = NULL;
WARN_ON(vma->node.allocated);
/* Keep the vma as a placeholder in the execbuffer reservation lists */
if (!list_empty(&vma->exec_list))
return;
- vm = vma->vm;
+ if (!vma->is_ggtt)
+ i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
- if (!i915_is_ggtt(vm))
- i915_ppgtt_put(i915_vm_to_ppgtt(vm));
-
- list_del(&vma->vma_link);
+ list_del(&vma->obj_link);
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
@@ -4913,7 +4907,7 @@ i915_gem_init_hw(struct drm_device *dev)
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
- i915_gem_cleanup_engines(dev);
+ i915_gem_cleanup_ringbuffer(dev);
goto out;
}
@@ -4926,7 +4920,7 @@ i915_gem_init_hw(struct drm_device *dev)
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
i915_gem_request_cancel(req);
- i915_gem_cleanup_engines(dev);
+ i915_gem_cleanup_ringbuffer(dev);
goto out;
}
@@ -4934,7 +4928,7 @@ i915_gem_init_hw(struct drm_device *dev)
if (ret && ret != -EIO) {
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
i915_gem_request_cancel(req);
- i915_gem_cleanup_engines(dev);
+ i915_gem_cleanup_ringbuffer(dev);
goto out;
}
@@ -5009,7 +5003,7 @@ out_unlock:
}
void
-i915_gem_cleanup_engines(struct drm_device *dev)
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
@@ -5018,14 +5012,13 @@ i915_gem_cleanup_engines(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
dev_priv->gt.cleanup_ring(ring);
- if (i915.enable_execlists) {
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode.
- */
- intel_gpu_reset(dev);
- }
+ if (i915.enable_execlists)
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode.
+ */
+ intel_gpu_reset(dev);
}
static void
@@ -5204,8 +5197,8 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
- list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
+ list_for_each_entry(vma, &o->vma_list, obj_link) {
+ if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
@@ -5223,7 +5216,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
@@ -5237,8 +5230,8 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
{
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
+ list_for_each_entry(vma, &o->vma_list, obj_link) {
+ if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
@@ -5254,7 +5247,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
@@ -5267,7 +5260,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, obj_link)
if (drm_mm_node_allocated(&vma->node))
return true;
@@ -5284,8 +5277,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
BUG_ON(list_empty(&o->vma_list));
- list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
+ list_for_each_entry(vma, &o->vma_list, obj_link) {
+ if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
@@ -5297,7 +5290,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->pin_count > 0)
return true;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 83a097c94911..5dd84e148bba 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -142,7 +142,7 @@ static void i915_gem_context_clean(struct intel_context *ctx)
return;
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
- mm_list) {
+ vm_link) {
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
break;
}
@@ -855,6 +855,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (!contexts_enabled(dev))
return -ENODEV;
+ if (args->pad != 0)
+ return -EINVAL;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -878,6 +881,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct intel_context *ctx;
int ret;
+ if (args->pad != 0)
+ return -EINVAL;
+
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 07c6e4d320c9..ea1f8d1bd228 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -116,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
search_again:
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+ list_for_each_entry(vma, &vm->inactive_list, vm_link) {
if (mark_free(vma, &unwind_list))
goto found;
}
@@ -125,7 +125,7 @@ search_again:
goto none;
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(vma, &vm->active_list, mm_list) {
+ list_for_each_entry(vma, &vm->active_list, vm_link) {
if (mark_free(vma, &unwind_list))
goto found;
}
@@ -270,7 +270,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
WARN_ON(!list_empty(&vm->active_list));
}
- list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+ list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
if (vma->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8fd00d279447..1328bc5021b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -668,7 +668,7 @@ need_reloc_mappable(struct i915_vma *vma)
if (entry->relocation_count == 0)
return false;
- if (!i915_is_ggtt(vma->vm))
+ if (!vma->is_ggtt)
return false;
/* See also use_cpu_reloc() */
@@ -687,8 +687,7 @@ eb_vma_misplaced(struct i915_vma *vma)
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
- WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
- !i915_is_ggtt(vma->vm));
+ WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9127f8f3561c..49e4f26b79d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2758,7 +2758,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
}
/* Clear any non-preallocated blocks */
@@ -3198,6 +3198,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
}
gtt->base.dev = dev;
+ gtt->base.is_ggtt = true;
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
&gtt->mappable_base, &gtt->mappable_end);
@@ -3258,7 +3259,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != vm)
continue;
@@ -3314,19 +3315,20 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&vma->vma_link);
- INIT_LIST_HEAD(&vma->mm_list);
+ INIT_LIST_HEAD(&vma->vm_link);
+ INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
+ vma->is_ggtt = i915_is_ggtt(vm);
if (i915_is_ggtt(vm))
vma->ggtt_view = *ggtt_view;
-
- list_add_tail(&vma->vma_link, &obj->vma_list);
- if (!i915_is_ggtt(vm))
+ else
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+ list_add_tail(&vma->obj_link, &obj->vma_list);
+
return vma;
}
@@ -3598,13 +3600,9 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return 0;
if (vma->bound == 0 && vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma->vm,
- vma->node.start,
- vma->node.size,
- VM_TO_TRACE_NAME(vma->vm));
-
/* XXX: i915_vma_pin() will fix this +- hack */
vma->pin_count++;
+ trace_i915_va_alloc(vma);
ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start,
vma->node.size);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 66a6da2396a2..8774f1ba46e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -183,6 +183,7 @@ struct i915_vma {
#define GLOBAL_BIND (1<<0)
#define LOCAL_BIND (1<<1)
unsigned int bound : 4;
+ bool is_ggtt : 1;
/**
* Support different GGTT views into the same object.
@@ -194,9 +195,9 @@ struct i915_vma {
struct i915_ggtt_view ggtt_view;
/** This object's place on the active/inactive lists */
- struct list_head mm_list;
+ struct list_head vm_link;
- struct list_head vma_link; /* Link in the object's VMA list */
+ struct list_head obj_link; /* Link in the object's VMA list */
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
@@ -275,6 +276,8 @@ struct i915_address_space {
u64 start; /* Start offset always 0 for dri2 */
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
+ bool is_ggtt;
+
struct i915_page_scratch *scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -330,6 +333,8 @@ struct i915_address_space {
u32 flags);
};
+#define i915_is_ggtt(V) ((V)->is_ggtt)
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -418,7 +423,7 @@ static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
uint32_t pde_shift)
{
- const uint64_t mask = ~((1 << pde_shift) - 1);
+ const uint64_t mask = ~((1ULL << pde_shift) - 1);
uint64_t end;
WARN_ON(length == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 58c1e592bbdb..d3c473ffb90a 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -52,7 +52,7 @@ static int num_vma_bound(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int count = 0;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (drm_mm_node_allocated(&vma->node))
count++;
if (vma->pin_count)
@@ -176,7 +176,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
/* For the unbound phase, this should be a no-op! */
list_for_each_entry_safe(vma, v,
- &obj->vma_list, vma_link)
+ &obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ba1a00d815d3..2e6e9fb6f80d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -638,6 +638,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
+ lockdep_assert_held(&dev->struct_mutex);
+
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
stolen_offset, gtt_offset, size);
@@ -695,7 +697,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt->inactive_list);
}
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 7107f2fd38f5..4b09c840d493 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -78,7 +78,7 @@ static void cancel_userptr(struct work_struct *work)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
int ret = i915_vma_unbind(vma);
WARN_ON(ret && ret != -EIO);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 978c026963b8..831895b8cb75 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -736,7 +736,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma;
int i = 0;
- list_for_each_entry(vma, head, mm_list) {
+ list_for_each_entry(vma, head, vm_link) {
capture_bo(err++, vma);
if (++i == count)
break;
@@ -759,7 +759,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
if (err == last)
break;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma);
}
@@ -1127,12 +1127,12 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
int i;
i = 0;
- list_for_each_entry(vma, &vm->active_list, mm_list)
+ list_for_each_entry(vma, &vm->active_list, vm_link)
i++;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == vm && vma->pin_count > 0)
i++;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 25a89373df63..d1a46ef5ab3f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1651,6 +1651,12 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
int pipe;
spin_lock(&dev_priv->irq_lock);
+
+ if (!dev_priv->display_irqs_enabled) {
+ spin_unlock(&dev_priv->irq_lock);
+ return;
+ }
+
for_each_pipe(dev_priv, pipe) {
i915_reg_t reg;
u32 mask, iir_bit = 0;
@@ -3343,21 +3349,28 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+ enum pipe pipe;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ dev_priv->de_irq_mask[pipe],
+ ~dev_priv->de_irq_mask[pipe] | extra_ier);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask)
+{
+ enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
- if (pipe_mask & 1 << PIPE_A)
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
- dev_priv->de_irq_mask[PIPE_A],
- ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
- if (pipe_mask & 1 << PIPE_B)
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
- dev_priv->de_irq_mask[PIPE_B],
- ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
- if (pipe_mask & 1 << PIPE_C)
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
- dev_priv->de_irq_mask[PIPE_C],
- ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
+ for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* make sure we're done processing display irqs */
+ synchronize_irq(dev_priv->dev->irq);
}
static void cherryview_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8b9f36814165..278c9c40c2e0 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -38,7 +38,7 @@ struct i915_params i915 __read_mostly = {
.enable_execlists = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
- .enable_psr = 0,
+ .enable_psr = -1,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = -1,
.enable_ips = 1,
@@ -49,7 +49,6 @@ struct i915_params i915 __read_mostly = {
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 1,
- .disable_vtd_wa = 0,
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
@@ -92,7 +91,7 @@ MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
-module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
+module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
@@ -102,7 +101,7 @@ MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
-module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
+module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
@@ -128,9 +127,10 @@ MODULE_PARM_DESC(enable_execlists,
module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR "
- "(0=disabled [default], 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode)");
+ "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+ "Default: -1 (use per-chip default)");
-module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
+module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support.");
@@ -164,12 +164,9 @@ MODULE_PARM_DESC(invert_brightness,
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
-module_param_named(disable_display, i915.disable_display, bool, 0600);
+module_param_named(disable_display, i915.disable_display, bool, 0400);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
-module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
-MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
-
module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 529929073120..bd5026b15d3e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -56,7 +56,6 @@ struct i915_params {
bool load_detect_test;
bool reset;
bool disable_display;
- bool disable_vtd_wa;
bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 144586ee74d5..f76cbf3e5d1e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3296,19 +3296,20 @@ enum skl_disp_power_wells {
#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
/*
- * HDMI/DP bits are gen4+
+ * HDMI/DP bits are g4x+
*
* WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
+#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
+/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
-/* VLV DP/HDMI bits again match Bspec */
-#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
-#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
@@ -7567,6 +7568,7 @@ enum skl_disp_power_wells {
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
#define DC_STATE_DEBUG _MMIO(0x45520)
+#define DC_STATE_DEBUG_MASK_CORES (1<<0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 52b2d409945d..fa09e5581137 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -175,35 +175,24 @@ TRACE_EVENT(i915_vma_unbind,
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
-#define VM_TO_TRACE_NAME(vm) \
- (i915_is_ggtt(vm) ? "G" : \
- "P")
-
-DECLARE_EVENT_CLASS(i915_va,
- TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
- TP_ARGS(vm, start, length, name),
+TRACE_EVENT(i915_va_alloc,
+ TP_PROTO(struct i915_vma *vma),
+ TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u64, start)
__field(u64, end)
- __string(name, name)
),
TP_fast_assign(
- __entry->vm = vm;
- __entry->start = start;
- __entry->end = start + length - 1;
- __assign_str(name, name);
+ __entry->vm = vma->vm;
+ __entry->start = vma->node.start;
+ __entry->end = vma->node.start + vma->node.size - 1;
),
- TP_printk("vm=%p (%s), 0x%llx-0x%llx",
- __entry->vm, __get_str(name), __entry->start, __entry->end)
-);
-
-DEFINE_EVENT(i915_va, i915_va_alloc,
- TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
- TP_ARGS(vm, start, length, name)
+ TP_printk("vm=%p (%c), 0x%llx-0x%llx",
+ __entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P', __entry->start, __entry->end)
);
DECLARE_EVENT_CLASS(i915_px_entry,
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 4625f8a9ba12..8e579a8505ac 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -97,6 +97,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
crtc_state->wm_changed = false;
+ crtc_state->fb_changed = false;
return &crtc_state->base;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ad5dfabc452e..505fc5cf26f8 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
@@ -206,9 +213,7 @@ static void pch_post_disable_crt(struct intel_encoder *encoder)
static void intel_enable_crt(struct intel_encoder *encoder)
{
- struct intel_crt *crt = intel_encoder_to_crt(encoder);
-
- intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+ intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
}
static enum drm_mode_status
@@ -473,11 +478,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt)
+intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
uint32_t save_bclrpat;
uint32_t save_vtotal;
uint32_t vtotal, vactive;
@@ -646,7 +650,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else if (INTEL_INFO(dev)->gen < 4)
- status = intel_crt_load_detect(crt);
+ status = intel_crt_load_detect(crt,
+ to_intel_crtc(connector->state->crtc)->pipe);
else
status = connector_status_unknown;
intel_release_load_detect_pipe(connector, &tmp, &ctx);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 2a7ec3141c8d..902054efb902 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -220,19 +220,19 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-void intel_csr_load_program(struct drm_i915_private *dev_priv)
+bool intel_csr_load_program(struct drm_i915_private *dev_priv)
{
u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
if (!IS_GEN9(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
- return;
+ return false;
}
if (!dev_priv->csr.dmc_payload) {
DRM_ERROR("Tried to program CSR with empty payload\n");
- return;
+ return false;
}
fw_size = dev_priv->csr.dmc_fw_size;
@@ -243,6 +243,10 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
I915_WRITE(dev_priv->csr.mmioaddr[i],
dev_priv->csr.mmiodata[i]);
}
+
+ dev_priv->csr.dc_state = 0;
+
+ return true;
}
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cdf2e14aa45d..21a9b83f3bfc 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1913,13 +1913,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
enum transcoder cpu_transcoder;
enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(intel_encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
- return false;
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
+ ret = false;
+ goto out;
+ }
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
@@ -1931,23 +1934,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
case TRANS_DDI_MODE_SELECT_DVI:
- return (type == DRM_MODE_CONNECTOR_HDMIA);
+ ret = type == DRM_MODE_CONNECTOR_HDMIA;
+ break;
case TRANS_DDI_MODE_SELECT_DP_SST:
- if (type == DRM_MODE_CONNECTOR_eDP)
- return true;
- return (type == DRM_MODE_CONNECTOR_DisplayPort);
+ ret = type == DRM_MODE_CONNECTOR_eDP ||
+ type == DRM_MODE_CONNECTOR_DisplayPort;
+ break;
+
case TRANS_DDI_MODE_SELECT_DP_MST:
/* if the transcoder is in MST state then
* connector isn't connected */
- return false;
+ ret = false;
+ break;
case TRANS_DDI_MODE_SELECT_FDI:
- return (type == DRM_MODE_CONNECTOR_VGA);
+ ret = type == DRM_MODE_CONNECTOR_VGA;
+ break;
default:
- return false;
+ ret = false;
+ break;
}
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -1959,15 +1972,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum intel_display_power_domain power_domain;
u32 tmp;
int i;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
- return false;
+ goto out;
if (port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -1985,25 +2001,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
break;
}
- return true;
- } else {
- for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+ ret = true;
- if ((tmp & TRANS_DDI_PORT_MASK)
- == TRANS_DDI_SELECT_PORT(port)) {
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
- return false;
+ goto out;
+ }
- *pipe = i;
- return true;
- }
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+ TRANS_DDI_MODE_SELECT_DP_MST)
+ goto out;
+
+ *pipe = i;
+ ret = true;
+
+ goto out;
}
}
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
- return false;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@@ -2449,12 +2472,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(WRPLL_CTL(pll->id));
hw_state->wrpll = val;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & WRPLL_PLL_ENABLE;
}
@@ -2464,12 +2489,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & SPLL_PLL_ENABLE;
}
@@ -2586,16 +2613,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
uint32_t val;
unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ ret = false;
+
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1;
val = I915_READ(regs[pll->id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
- return false;
+ goto out;
val = I915_READ(DPLL_CTRL1);
hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@@ -2605,8 +2635,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
}
+ ret = true;
- return true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
}
static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -2873,13 +2907,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
{
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ ret = false;
+
val = I915_READ(BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
- return false;
+ goto out;
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@@ -2926,7 +2963,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
}
static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -3061,11 +3103,15 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
{
u32 temp;
- if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
return true;
}
+
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 836bbdc239b6..8b7b8b64b008 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1343,18 +1343,21 @@ void assert_pipe(struct drm_i915_private *dev_priv,
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
+ enum intel_display_power_domain power_domain;
/* if we need the pipe quirk it must be always on */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
- cur_state = false;
- } else {
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
+
+ intel_display_power_put(dev_priv, power_domain);
+ } else {
+ cur_state = false;
}
I915_STATE_WARN(cur_state != state,
@@ -2551,12 +2554,16 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
return false;
+ mutex_lock(&dev->struct_mutex);
+
obj = i915_gem_object_create_stolen_for_preallocated(dev,
base_aligned,
base_aligned,
size_aligned);
- if (!obj)
+ if (!obj) {
+ mutex_unlock(&dev->struct_mutex);
return false;
+ }
obj->tiling_mode = plane_config->tiling;
if (obj->tiling_mode == I915_TILING_X)
@@ -2569,12 +2576,12 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.modifier[0] = fb->modifier[0];
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
- mutex_lock(&dev->struct_mutex);
if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
&mode_cmd, obj)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
+
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
@@ -4785,9 +4792,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
- if (atomic->wait_vblank)
- intel_wait_for_vblank(dev, crtc->pipe);
-
intel_frontbuffer_flip(dev, atomic->fb_bits);
crtc->wm.cxsr_allowed = true;
@@ -5301,31 +5305,37 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
}
}
-static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
+static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
unsigned long mask;
- enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
- if (!crtc->state->active)
+ if (!crtc_state->base.active)
return 0;
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (intel_crtc->config->pch_pfit.enabled ||
- intel_crtc->config->pch_pfit.force_thru)
+ if (crtc_state->pch_pfit.enabled ||
+ crtc_state->pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
- for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
mask |= BIT(intel_display_port_power_domain(intel_encoder));
+ }
return mask;
}
-static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+static unsigned long
+modeset_get_crtc_power_domains(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5333,7 +5343,8 @@ static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
unsigned long domains, new_domains, old_domains;
old_domains = intel_crtc->enabled_power_domains;
- intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+ intel_crtc->enabled_power_domains = new_domains =
+ get_crtc_power_domains(crtc, crtc_state);
domains = new_domains & ~old_domains;
@@ -5352,31 +5363,6 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
intel_display_power_put(dev_priv, domain);
}
-static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
-{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long put_domains[I915_MAX_PIPES] = {};
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- int i;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (needs_modeset(crtc->state))
- put_domains[to_intel_crtc(crtc)->pipe] =
- modeset_get_crtc_power_domains(crtc);
- }
-
- if (dev_priv->display.modeset_commit_cdclk &&
- intel_state->dev_cdclk != dev_priv->cdclk_freq)
- dev_priv->display.modeset_commit_cdclk(state);
-
- for (i = 0; i < I915_MAX_PIPES; i++)
- if (put_domains[i])
- modeset_put_power_domains(dev_priv, put_domains[i]);
-}
-
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
@@ -6039,8 +6025,7 @@ static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
return 144000;
}
-/* Compute the max pixel clock for new configuration. Uses atomic state if
- * that's non-NULL, look at current state otherwise. */
+/* Compute the max pixel clock for new configuration. */
static int intel_mode_max_pixclk(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -6063,9 +6048,6 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
intel_state->min_pixclk[i] = pixclk;
}
- if (!intel_state->active_crtcs)
- return 0;
-
for_each_pipe(dev_priv, pipe)
max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
@@ -6393,55 +6375,16 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
*/
int intel_display_suspend(struct drm_device *dev)
{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
- struct drm_crtc *crtc;
- unsigned crtc_mask = 0;
- int ret = 0;
-
- if (WARN_ON(!ctx))
- return 0;
-
- lockdep_assert_held(&ctx->ww_ctx);
- state = drm_atomic_state_alloc(dev);
- if (WARN_ON(!state))
- return -ENOMEM;
-
- state->acquire_ctx = ctx;
- state->allow_modeset = true;
-
- for_each_crtc(dev, crtc) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_crtc_state(state, crtc);
-
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
- goto free;
-
- if (!crtc_state->active)
- continue;
-
- crtc_state->active = false;
- crtc_mask |= 1 << drm_crtc_index(crtc);
- }
-
- if (crtc_mask) {
- ret = drm_atomic_commit(state);
-
- if (!ret) {
- for_each_crtc(dev, crtc)
- if (crtc_mask & (1 << drm_crtc_index(crtc)))
- crtc->state->active = true;
-
- return ret;
- }
- }
+ int ret;
-free:
+ state = drm_atomic_helper_suspend(dev);
+ ret = PTR_ERR_OR_ZERO(state);
if (ret)
DRM_ERROR("Suspending crtc's failed with %i\n", ret);
- drm_atomic_state_free(state);
+ else
+ dev_priv->modeset_restore_state = state;
return ret;
}
@@ -8181,18 +8124,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ ret = false;
+
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
switch (tmp & PIPECONF_BPC_MASK) {
@@ -8272,7 +8219,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->base.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void ironlake_init_pch_refclk(struct drm_device *dev)
@@ -9376,18 +9328,21 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
@@ -9450,7 +9405,12 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_pfit_config(crtc, pipe_config);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -9716,9 +9676,6 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
intel_state->min_pixclk[i] = pixel_rate;
}
- if (!intel_state->active_crtcs)
- return 0;
-
for_each_pipe(dev_priv, pipe)
max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
@@ -9982,12 +9939,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum intel_display_power_domain pfit_domain;
+ enum intel_display_power_domain power_domain;
+ unsigned long power_domain_mask;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ power_domain_mask = BIT(power_domain);
+
+ ret = false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -10014,13 +9976,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
- return false;
+ power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ goto out;
+ power_domain_mask |= BIT(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
haswell_get_ddi_port_state(crtc, pipe_config);
@@ -10030,14 +9993,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
skl_init_scalers(dev, crtc, pipe_config);
}
- pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-
if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
- if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+ power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+ if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ power_domain_mask |= BIT(power_domain);
if (INTEL_INFO(dev)->gen >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
@@ -10055,7 +10018,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- return true;
+ ret = true;
+
+out:
+ for_each_power_domain(power_domain, power_domain_mask)
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
@@ -10376,6 +10345,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL;
+ drm_framebuffer_reference(fb);
return fb;
#else
return NULL;
@@ -10431,7 +10401,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
- struct drm_atomic_state *state = NULL;
+ struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
struct intel_crtc_state *crtc_state;
int ret, i = -1;
@@ -10440,6 +10410,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
connector->base.id, connector->name,
encoder->base.id, encoder->name);
+ old->restore_state = NULL;
+
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
@@ -10456,24 +10428,15 @@ retry:
*/
/* See if we already have a CRTC for this connector */
- if (encoder->crtc) {
- crtc = encoder->crtc;
+ if (connector->state->crtc) {
+ crtc = connector->state->crtc;
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail;
- ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
- if (ret)
- goto fail;
-
- old->dpms_mode = connector->dpms;
- old->load_detect_temp = false;
/* Make sure the crtc and connector are running */
- if (connector->dpms != DRM_MODE_DPMS_ON)
- connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
-
- return true;
+ goto found;
}
/* Find an unused one (if possible) */
@@ -10481,8 +10444,15 @@ retry:
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
- if (possible_crtc->state->enable)
+
+ ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
+ if (ret)
+ goto fail;
+
+ if (possible_crtc->state->enable) {
+ drm_modeset_unlock(&possible_crtc->mutex);
continue;
+ }
crtc = possible_crtc;
break;
@@ -10496,23 +10466,22 @@ retry:
goto fail;
}
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- goto fail;
+found:
+ intel_crtc = to_intel_crtc(crtc);
+
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
goto fail;
- intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = connector->dpms;
- old->load_detect_temp = true;
- old->release_fb = NULL;
-
state = drm_atomic_state_alloc(dev);
- if (!state)
- return false;
+ restore_state = drm_atomic_state_alloc(dev);
+ if (!state || !restore_state) {
+ ret = -ENOMEM;
+ goto fail;
+ }
state->acquire_ctx = ctx;
+ restore_state->acquire_ctx = ctx;
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state)) {
@@ -10520,7 +10489,9 @@ retry:
goto fail;
}
- connector_state->crtc = crtc;
+ ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
+ if (ret)
+ goto fail;
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state)) {
@@ -10544,7 +10515,6 @@ retry:
if (fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
- old->release_fb = fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
@@ -10556,15 +10526,28 @@ retry:
if (ret)
goto fail;
- drm_mode_copy(&crtc_state->base.mode, mode);
+ drm_framebuffer_unreference(fb);
+
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ if (ret)
+ goto fail;
+
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
+ if (!ret)
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+ if (!ret)
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
+ goto fail;
+ }
if (drm_atomic_commit(state)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
- if (old->release_fb)
- old->release_fb->funcs->destroy(old->release_fb);
goto fail;
}
- crtc->primary->crtc = crtc;
+
+ old->restore_state = restore_state;
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -10572,7 +10555,8 @@ retry:
fail:
drm_atomic_state_free(state);
- state = NULL;
+ drm_atomic_state_free(restore_state);
+ restore_state = state = NULL;
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
@@ -10586,65 +10570,24 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_atomic_state *state;
- struct drm_connector_state *connector_state;
- struct intel_crtc_state *crtc_state;
+ struct drm_atomic_state *state = old->restore_state;
int ret;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.id, encoder->name);
- if (old->load_detect_temp) {
- state = drm_atomic_state_alloc(dev);
- if (!state)
- goto fail;
-
- state->acquire_ctx = ctx;
-
- connector_state = drm_atomic_get_connector_state(state, connector);
- if (IS_ERR(connector_state))
- goto fail;
-
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(crtc_state))
- goto fail;
-
- connector_state->crtc = NULL;
-
- crtc_state->base.enable = crtc_state->base.active = false;
-
- ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
- 0, 0);
- if (ret)
- goto fail;
-
- ret = drm_atomic_commit(state);
- if (ret)
- goto fail;
-
- if (old->release_fb) {
- drm_framebuffer_unregister_private(old->release_fb);
- drm_framebuffer_unreference(old->release_fb);
- }
-
+ if (!state)
return;
- }
-
- /* Switch crtc and encoder back off if necessary */
- if (old->dpms_mode != DRM_MODE_DPMS_ON)
- connector->funcs->dpms(connector, old->dpms_mode);
- return;
-fail:
- DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
- drm_atomic_state_free(state);
+ ret = drm_atomic_commit(state);
+ if (ret) {
+ DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
+ drm_atomic_state_free(state);
+ }
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -10996,6 +10939,12 @@ static bool page_flip_finished(struct intel_crtc *crtc)
return true;
/*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+
+ /*
* A DSPSURFLIVE check isn't enough in case the mmio and CS flips
* used the same base address. In that case the mmio flip might
* have completed, but the CS hasn't even executed the flip yet.
@@ -11839,7 +11788,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->state);
int idx = intel_crtc->base.base.id, ret;
- int i = drm_plane_index(plane);
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
@@ -11872,6 +11820,9 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
if (!was_visible && !visible)
return 0;
+ if (fb != old_plane_state->base.fb)
+ pipe_config->fb_changed = true;
+
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
@@ -11886,11 +11837,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
pipe_config->wm_changed = true;
/* must disable cxsr around plane enable/disable */
- if (plane->type != DRM_PLANE_TYPE_CURSOR) {
- if (is_crtc_enabled)
- intel_crtc->atomic.wait_vblank = true;
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
pipe_config->disable_cxsr = true;
- }
} else if (intel_wm_need_update(plane, plane_state)) {
pipe_config->wm_changed = true;
}
@@ -11904,14 +11852,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
intel_crtc->atomic.post_enable_primary = turn_on;
intel_crtc->atomic.update_fbc = true;
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (turn_on && IS_BROADWELL(dev))
- intel_crtc->atomic.wait_vblank = true;
-
break;
case DRM_PLANE_TYPE_CURSOR:
break;
@@ -11924,13 +11864,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
*/
if (IS_IVYBRIDGE(dev) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
- !needs_scaling(old_plane_state)) {
- to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
- } else if (turn_off && !mode_changed) {
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |=
- 1 << i;
- }
+ !needs_scaling(old_plane_state))
+ pipe_config->disable_lp_wm = true;
break;
}
@@ -13098,8 +13033,6 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll_config *shared_dpll = NULL;
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *intel_crtc_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
@@ -13108,21 +13041,21 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
return;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- int dpll;
-
- intel_crtc = to_intel_crtc(crtc);
- intel_crtc_state = to_intel_crtc_state(crtc_state);
- dpll = intel_crtc_state->shared_dpll;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
- if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
+ if (!needs_modeset(crtc_state))
continue;
- intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
+ to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
+
+ if (old_dpll == DPLL_ID_PRIVATE)
+ continue;
if (!shared_dpll)
shared_dpll = intel_atomic_get_shared_dpll_state(state);
- shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
+ shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
}
}
@@ -13258,6 +13191,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
if (ret < 0)
return ret;
+
+ DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
+ intel_state->cdclk, intel_state->dev_cdclk);
} else
to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
@@ -13463,6 +13399,71 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
return ret;
}
+static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ unsigned crtc_mask)
+{
+ unsigned last_vblank_count[I915_MAX_PIPES];
+ enum pipe pipe;
+ int ret;
+
+ if (!crtc_mask)
+ return;
+
+ for_each_pipe(dev_priv, pipe) {
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (!((1 << pipe) & crtc_mask))
+ continue;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (WARN_ON(ret != 0)) {
+ crtc_mask &= ~(1 << pipe);
+ continue;
+ }
+
+ last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
+ }
+
+ for_each_pipe(dev_priv, pipe) {
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ long lret;
+
+ if (!((1 << pipe) & crtc_mask))
+ continue;
+
+ lret = wait_event_timeout(dev->vblank[pipe].queue,
+ last_vblank_count[pipe] !=
+ drm_crtc_vblank_count(crtc),
+ msecs_to_jiffies(50));
+
+ WARN_ON(!lret);
+
+ drm_crtc_vblank_put(crtc);
+ }
+}
+
+static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
+{
+ /* fb updated, need to unpin old fb */
+ if (crtc_state->fb_changed)
+ return true;
+
+ /* wm changes, need vblank before final wm's */
+ if (crtc_state->wm_changed)
+ return true;
+
+ /*
+ * cxsr is re-enabled after vblank.
+ * This is already handled by crtc_state->wm_changed,
+ * but added for clarity.
+ */
+ if (crtc_state->disable_cxsr)
+ return true;
+
+ return false;
+}
+
/**
* intel_atomic_commit - commit validated state object
* @dev: DRM device
@@ -13489,6 +13490,8 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_crtc *crtc;
int ret = 0, i;
bool hw_check = intel_state->modeset;
+ unsigned long put_domains[I915_MAX_PIPES] = {};
+ unsigned crtc_vblank_mask = 0;
ret = intel_atomic_prepare_commit(dev, state, async);
if (ret) {
@@ -13504,11 +13507,22 @@ static int intel_atomic_commit(struct drm_device *dev,
sizeof(intel_state->min_pixclk));
dev_priv->active_crtcs = intel_state->active_crtcs;
dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ if (needs_modeset(crtc->state) ||
+ to_intel_crtc_state(crtc->state)->update_pipe) {
+ hw_check = true;
+
+ put_domains[to_intel_crtc(crtc)->pipe] =
+ modeset_get_crtc_power_domains(crtc,
+ to_intel_crtc_state(crtc->state));
+ }
+
if (!needs_modeset(crtc->state))
continue;
@@ -13541,32 +13555,25 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_shared_dpll_commit(state);
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
- modeset_update_crtc_power_domains(state);
+
+ if (dev_priv->display.modeset_commit_cdclk &&
+ intel_state->dev_cdclk != dev_priv->cdclk_freq)
+ dev_priv->display.modeset_commit_cdclk(state);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool modeset = needs_modeset(crtc->state);
- bool update_pipe = !modeset &&
- to_intel_crtc_state(crtc->state)->update_pipe;
- unsigned long put_domains = 0;
-
- if (modeset)
- intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
+ bool update_pipe = !modeset && pipe_config->update_pipe;
if (modeset && crtc->state->active) {
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
}
- if (update_pipe) {
- put_domains = modeset_get_crtc_power_domains(crtc);
-
- /* make sure intel_modeset_check_state runs */
- hw_check = true;
- }
-
if (!modeset)
intel_pre_plane_update(to_intel_crtc_state(crtc_state));
@@ -13577,18 +13584,24 @@ static int intel_atomic_commit(struct drm_device *dev,
(crtc->state->planes_changed || update_pipe))
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
- if (put_domains)
- modeset_put_power_domains(dev_priv, put_domains);
-
- intel_post_plane_update(intel_crtc);
-
- if (modeset)
- intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
+ if (pipe_config->base.active && needs_vblank_wait(pipe_config))
+ crtc_vblank_mask |= 1 << i;
}
/* FIXME: add subpixel order */
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ if (!state->legacy_cursor_update)
+ intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ intel_post_plane_update(to_intel_crtc(crtc));
+
+ if (put_domains[i])
+ modeset_put_power_domains(dev_priv, put_domains[i]);
+ }
+
+ if (intel_state->modeset)
+ intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
@@ -13670,7 +13683,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(PCH_DPLL(pll->id));
@@ -13678,6 +13691,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & DPLL_VCO_ENABLE;
}
@@ -15493,6 +15508,17 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
return false;
}
+static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_connector *connector;
+
+ for_each_connector_on_encoder(dev, &encoder->base, connector)
+ return true;
+
+ return false;
+}
+
static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -15603,7 +15629,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_connector *connector;
struct drm_device *dev = encoder->base.dev;
- bool active = false;
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
@@ -15611,15 +15636,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
bool has_active_crtc = encoder->base.crtc &&
to_intel_crtc(encoder->base.crtc)->active;
- for_each_intel_connector(dev, connector) {
- if (connector->base.encoder != &encoder->base)
- continue;
-
- active = true;
- break;
- }
-
- if (active && !has_active_crtc) {
+ if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
@@ -15674,10 +15691,12 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
i915_redisable_vga_power_on(dev);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
static bool primary_get_hw_state(struct intel_plane *plane)
@@ -15905,7 +15924,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
unsigned long put_domains;
- put_domains = modeset_get_crtc_power_domains(&crtc->base);
+ put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -15916,54 +15935,65 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
void intel_display_resume(struct drm_device *dev)
{
- struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
- struct intel_connector *conn;
- struct intel_plane *plane;
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+ struct drm_modeset_acquire_ctx ctx;
int ret;
+ bool setup = false;
- if (!state)
- return;
-
- state->acquire_ctx = dev->mode_config.acquire_ctx;
+ dev_priv->modeset_restore_state = NULL;
- /* preserve complete old state, including dpll */
- intel_atomic_get_shared_dpll_state(state);
+ /*
+ * This is a cludge because with real atomic modeset mode_config.mutex
+ * won't be taken. Unfortunately some probed state like
+ * audio_codec_enable is still protected by mode_config.mutex, so lock
+ * it here for now.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(&ctx, 0);
- for_each_crtc(dev, crtc) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_crtc_state(state, crtc);
+retry:
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
- goto err;
+ if (ret == 0 && !setup) {
+ setup = true;
- /* force a restore */
- crtc_state->mode_changed = true;
+ intel_modeset_setup_hw_state(dev);
+ i915_redisable_vga(dev);
}
- for_each_intel_plane(dev, plane) {
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
- if (ret)
- goto err;
- }
+ if (ret == 0 && state) {
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i;
- for_each_intel_connector(dev, conn) {
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
- if (ret)
- goto err;
+ state->acquire_ctx = &ctx;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * Force recalculation even if we restore
+ * current state. With fast modeset this may not result
+ * in a modeset when the state is compatible.
+ */
+ crtc_state->mode_changed = true;
+ }
+
+ ret = drm_atomic_commit(state);
}
- intel_modeset_setup_hw_state(dev);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
- i915_redisable_vga(dev);
- ret = drm_atomic_commit(state);
- if (!ret)
- return;
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ mutex_unlock(&dev->mode_config.mutex);
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_free(state);
+ if (ret) {
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_atomic_state_free(state);
+ }
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -15972,9 +16002,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
struct drm_i915_gem_object *obj;
int ret;
- mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
- mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev);
@@ -16054,9 +16082,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_overlay(dev);
- mutex_lock(&dev->struct_mutex);
intel_cleanup_gt_powersave(dev);
- mutex_unlock(&dev->struct_mutex);
intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 23599c36503f..cbc06596659a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2356,15 +2356,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(intel_dp->output_reg);
if (!(tmp & DP_PORT_EN))
- return false;
+ goto out;
if (IS_GEN7(dev) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
@@ -2375,7 +2378,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
*pipe = p;
- return true;
+ ret = true;
+
+ goto out;
}
}
@@ -2387,7 +2392,12 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
*pipe = PORT_TO_PIPE(tmp);
}
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_dp_get_config(struct intel_encoder *encoder,
@@ -4487,20 +4497,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
-static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
{
u32 bit;
switch (port->port) {
case PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
break;
default:
MISSING_CASE(port->port);
@@ -4548,12 +4558,12 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
{
if (HAS_PCH_IBX(dev_priv))
return ibx_digital_port_connected(dev_priv, port);
- if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(dev_priv))
return cpt_digital_port_connected(dev_priv, port);
else if (IS_BROXTON(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_digital_port_connected(dev_priv, port);
+ else if (IS_GM45(dev_priv))
+ return gm45_digital_port_connected(dev_priv, port);
else
return g4x_digital_port_connected(dev_priv, port);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3cae3768ea37..4c027d69fac9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -379,6 +379,7 @@ struct intel_crtc_state {
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool wm_changed; /* watermarks are updated */
+ bool fb_changed; /* fb on any of the planes is changed */
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
@@ -547,9 +548,7 @@ struct intel_crtc_atomic_commit {
/* Sleepable operations to perform after commit */
unsigned fb_bits;
- bool wait_vblank;
bool post_enable_primary;
- unsigned update_sprite_watermarks;
/* Sleepable operations to perform before and after commit */
bool update_fbc;
@@ -910,9 +909,7 @@ struct intel_unpin_work {
};
struct intel_load_detect_pipe {
- struct drm_framebuffer *release_fb;
- bool load_detect_temp;
- int dpms_mode;
+ struct drm_atomic_state *restore_state;
};
static inline struct intel_encoder *
@@ -995,6 +992,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -1227,7 +1226,7 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
-void intel_csr_load_program(struct drm_i915_private *);
+bool intel_csr_load_program(struct drm_i915_private *);
void intel_csr_ucode_fini(struct drm_i915_private *);
/* intel_dp.c */
@@ -1436,6 +1435,8 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
@@ -1522,6 +1523,7 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
enable_rpm_wakeref_asserts(dev_priv)
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 378f879f4015..01b8e9f4c272 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -634,7 +634,6 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u32 val;
DRM_DEBUG_KMS("\n");
@@ -642,9 +641,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
intel_dsi_clear_device_ready(encoder);
- val = I915_READ(DSPCLK_GATE_D);
- val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ if (!IS_BROXTON(dev_priv)) {
+ u32 val;
+
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
+ }
drm_panel_unprepare(intel_dsi->panel);
@@ -664,13 +667,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
+ bool ret;
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
@@ -691,12 +697,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
*pipe = port == PORT_A ? PIPE_A : PIPE_B;
- return true;
+ ret = true;
+
+ goto out;
}
}
}
+out:
+ intel_display_power_put(dev_priv, power_domain);
- return false;
+ return ret;
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
@@ -775,10 +785,9 @@ static void set_dsi_timings(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- unsigned int bpp = intel_crtc->config->pipe_bpp;
+ unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -849,7 +858,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum port port;
- unsigned int bpp = intel_crtc->config->pipe_bpp;
+ unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
u32 val, tmp;
u16 mode_hdisplay;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index de7be7f3fb42..92f39227b361 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -34,6 +34,8 @@
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
+int dsi_pixel_format_bpp(int pixel_format);
+
struct intel_dsi_host;
struct intel_dsi {
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 787f01c63984..7f145b4fec6a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -440,10 +440,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
- if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
- bits_per_pixel = 18;
- else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
- bits_per_pixel = 16;
+ bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index bb5e95a1a453..70883c54cb0a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -30,7 +30,7 @@
#include "i915_drv.h"
#include "intel_dsi.h"
-static int dsi_pixel_format_bpp(int pixel_format)
+int dsi_pixel_format_bpp(int pixel_format)
{
int bpp;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 3614a951736b..0f0492f4a357 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -823,13 +823,15 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
+ bool enable_by_default = IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv);
if (intel_vgpu_active(dev_priv->dev)) {
fbc->no_fbc_reason = "VGPU is active";
return false;
}
- if (i915.enable_fbc < 0) {
+ if (i915.enable_fbc < 0 && !enable_by_default) {
fbc->no_fbc_reason = "disabled per chip default";
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 09840f4380f9..97a91e631915 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -406,8 +406,8 @@ retry:
continue;
}
- encoder = connector->encoder;
- if (!encoder || WARN_ON(!encoder->crtc)) {
+ encoder = connector->state->best_encoder;
+ if (!encoder || WARN_ON(!connector->state->crtc)) {
if (connector->force > DRM_FORCE_OFF)
goto bail;
@@ -420,7 +420,7 @@ retry:
num_connectors_enabled++;
- new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
+ new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
/*
* Make sure we're not trying to drive multiple connectors
@@ -466,17 +466,22 @@ retry:
* usually contains. But since our current
* code puts a mode derived from the post-pfit timings
* into crtc->mode this works out correctly.
+ *
+ * This is crtc->mode and not crtc->state->mode for the
+ * fastboot check to work correctly. crtc_state->mode has
+ * I915_MODE_FLAG_INHERITED, which we clear to force check
+ * state.
*/
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
- modes[i] = &encoder->crtc->mode;
+ modes[i] = &connector->state->crtc->mode;
}
crtcs[i] = new_crtc;
DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
- pipe_name(to_intel_crtc(encoder->crtc)->pipe),
- encoder->crtc->base.id,
+ pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
+ connector->state->crtc->base.id,
modes[i]->hdisplay, modes[i]->vdisplay,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 3accd914490f..82a3c03fbc0e 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -199,7 +199,7 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
* the value matches either of two values representing completion
* of the GuC boot process.
*
- * This is used for polling the GuC status in a wait_for_atomic()
+ * This is used for polling the GuC status in a wait_for()
* loop below.
*/
static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
@@ -259,14 +259,14 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
/*
- * Spin-wait for the DMA to complete & the GuC to start up.
+ * Wait for the DMA to complete & the GuC to start up.
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver will attempt to fall back to
* execlist mode if this happens.)
*/
- ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
+ ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
I915_READ(DMA_CTRL), status);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index edb7e901ba4a..80b44c054087 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -880,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
@@ -897,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_hdmi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3a03646e343d..6a978ce80244 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -223,7 +223,8 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
-#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
+#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
+#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine);
@@ -1144,10 +1145,6 @@ void intel_lr_context_unpin(struct intel_context *ctx,
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
-
- if (WARN_ON_ONCE(!ctx_obj))
- return;
-
if (--ctx->engine[engine->id].pin_count == 0) {
kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
@@ -2317,6 +2314,27 @@ make_rpcs(struct drm_device *dev)
return rpcs;
}
+static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
+{
+ u32 indirect_ctx_offset;
+
+ switch (INTEL_INFO(ring->dev)->gen) {
+ default:
+ MISSING_CASE(INTEL_INFO(ring->dev)->gen);
+ /* fall through */
+ case 9:
+ indirect_ctx_offset =
+ GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ break;
+ case 8:
+ indirect_ctx_offset =
+ GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ break;
+ }
+
+ return indirect_ctx_offset;
+}
+
static int
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
@@ -2360,7 +2378,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE));
+ (HAS_RESOURCE_STREAMER(dev) ?
+ CTX_CTRL_RS_CTX_ENABLE : 0)));
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
/* Ring buffer start address is not known until the buffer is pinned.
@@ -2389,7 +2408,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
- CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+ intel_lr_indirect_ctx_offset(ring) << 6;
reg_state[CTX_BB_PER_CTX_PTR+1] =
(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 811ddf7799f0..30a8403a8f4f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -76,22 +76,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 379eabe093cb..347d4df49a9b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2851,7 +2851,10 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
memset(ddb, 0, sizeof(*ddb));
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
for_each_plane(dev_priv, pipe, plane) {
@@ -2863,6 +2866,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
val = I915_READ(CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
val);
+
+ intel_display_power_put(dev_priv, power_domain);
}
}
@@ -4116,11 +4121,13 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
static void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u32 rgvmodectl;
u8 fmax, fmin, fstart, vstart;
spin_lock_irq(&mchdev_lock);
+ rgvmodectl = I915_READ(MEMMODECTL);
+
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -5229,8 +5236,6 @@ static void cherryview_setup_pctx(struct drm_device *dev)
u32 pcbr;
int pctx_size = 32*1024;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
@@ -5252,7 +5257,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
u32 pcbr;
int pctx_size = 24*1024;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ mutex_lock(&dev->struct_mutex);
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
@@ -5280,7 +5285,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
pctx = i915_gem_object_create_stolen(dev, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
- return;
+ goto out;
}
pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
@@ -5289,6 +5294,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
out:
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
+ mutex_unlock(&dev->struct_mutex);
}
static void valleyview_cleanup_pctx(struct drm_device *dev)
@@ -5298,7 +5304,7 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
if (WARN_ON(!dev_priv->vlv_pctx))
return;
- drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
+ drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
dev_priv->vlv_pctx = NULL;
}
@@ -6241,8 +6247,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
return;
if (IS_IRONLAKE_M(dev)) {
- mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
+ mutex_lock(&dev->struct_mutex);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
} else if (INTEL_INFO(dev)->gen >= 6) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 4ab757947f15..0b42ada338c8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -778,6 +778,15 @@ void intel_psr_init(struct drm_device *dev)
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
+ /* Per platform default */
+ if (i915.enable_psr == -1) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
+ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ i915.enable_psr = 1;
+ else
+ i915.enable_psr = 0;
+ }
+
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
/* HSW and BDW require workarounds that we don't implement. */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 133321a5b3d0..45ce45a5e122 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -746,9 +746,9 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
ret = i915_gem_render_state_init(req);
if (ret)
- DRM_ERROR("init render state: %d\n", ret);
+ return ret;
- return ret;
+ return 0;
}
static int wa_add(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index bbca527184d0..4172e73212cd 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -284,6 +284,13 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
1 << PIPE_C | 1 << PIPE_B);
}
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
+{
+ if (IS_BROADWELL(dev_priv))
+ gen8_irq_power_well_pre_disable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+}
+
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -309,6 +316,14 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
}
}
+static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (power_well->data == SKL_DISP_PW_2)
+ gen8_irq_power_well_pre_disable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+}
+
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
@@ -334,6 +349,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
} else {
if (enable_requested) {
+ hsw_power_well_pre_disable(dev_priv);
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
@@ -456,20 +472,61 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
*/
}
-static void gen9_set_dc_state_debugmask_memory_up(
- struct drm_i915_private *dev_priv)
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ uint32_t val, mask;
+
+ mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
+
+ if (IS_BROXTON(dev_priv))
+ mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
val = I915_READ(DC_STATE_DEBUG);
- if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
- val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
+ if ((val & mask) != mask) {
+ val |= mask;
I915_WRITE(DC_STATE_DEBUG, val);
POSTING_READ(DC_STATE_DEBUG);
}
}
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ int rewrites = 0;
+ int rereads = 0;
+ u32 v;
+
+ I915_WRITE(DC_STATE_EN, state);
+
+ /* It has been observed that disabling the dc6 state sometimes
+ * doesn't stick and dmc keeps returning old value. Make sure
+ * the write really sticks enough times and also force rewrite until
+ * we are confident that state is exactly what we want.
+ */
+ do {
+ v = I915_READ(DC_STATE_EN);
+
+ if (v != state) {
+ I915_WRITE(DC_STATE_EN, state);
+ rewrites++;
+ rereads = 0;
+ } else if (rereads++ > 5) {
+ break;
+ }
+
+ } while (rewrites < 100);
+
+ if (v != state)
+ DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
+
+ /* Most of the times we need one retry, avoid spam */
+ if (rewrites > 1)
+ DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
+}
+
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
{
uint32_t val;
@@ -488,16 +545,21 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
state = DC_STATE_EN_UPTO_DC5;
- if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
- gen9_set_dc_state_debugmask_memory_up(dev_priv);
-
val = I915_READ(DC_STATE_EN);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
+
+ /* Check if DMC is ignoring our DC state requests */
+ if ((val & mask) != dev_priv->csr.dc_state)
+ DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->csr.dc_state, val & mask);
+
val &= ~mask;
val |= state;
- I915_WRITE(DC_STATE_EN, val);
- POSTING_READ(DC_STATE_EN);
+
+ gen9_write_dc_state(dev_priv, val);
+
+ dev_priv->csr.dc_state = val & mask;
}
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -663,6 +725,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
state_mask = SKL_POWER_WELL_STATE(power_well->data);
is_enabled = tmp & state_mask;
+ if (!enable && enable_requested)
+ skl_power_well_pre_disable(dev_priv, power_well);
+
if (enable) {
if (!enable_requested) {
WARN((tmp & state_mask) &&
@@ -941,6 +1006,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
+ /* make sure we're done processing display irqs */
+ synchronize_irq(dev_priv->dev->irq);
+
vlv_power_sequencer_reset(dev_priv);
}
@@ -1435,6 +1503,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
chv_set_pipe_power_well(dev_priv, power_well, false);
}
+static void
+__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
+ }
+
+ power_domains->domain_use_count[domain]++;
+}
+
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
@@ -1450,24 +1534,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
intel_runtime_pm_get(dev_priv);
- power_domains = &dev_priv->power_domains;
+ mutex_lock(&power_domains->lock);
+
+ __intel_display_power_get_domain(dev_priv, domain);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool is_enabled;
+
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return false;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++)
- intel_power_well_enable(dev_priv, power_well);
+ if (__intel_display_power_is_enabled(dev_priv, domain)) {
+ __intel_display_power_get_domain(dev_priv, domain);
+ is_enabled = true;
+ } else {
+ is_enabled = false;
}
- power_domains->domain_use_count[domain]++;
-
mutex_unlock(&power_domains->lock);
+
+ if (!is_enabled)
+ intel_runtime_pm_put(dev_priv);
+
+ return is_enabled;
}
/**
@@ -2028,8 +2141,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
skl_init_cdclk(dev_priv);
- if (dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
+ gen9_set_dc_state_debugmask(dev_priv);
}
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -2239,6 +2352,41 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
}
/**
+ * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference if the device is
+ * already in use and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ int ret = pm_runtime_get_if_in_use(device);
+
+ /*
+ * In cases runtime PM is disabled by the RPM core and we get
+ * an -EINVAL return value we are not supposed to call this
+ * function, since the power state is undefined. This applies
+ * atm to the late/early system suspend/resume handlers.
+ */
+ WARN_ON_ONCE(ret < 0);
+ if (ret <= 0)
+ return false;
+ }
+
+ atomic_inc(&dev_priv->pm.wakeref_count);
+ assert_rpm_wakelock_held(dev_priv);
+
+ return true;
+}
+
+/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d21f75bda96e..6745bad5bff0 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1182,10 +1182,9 @@ static int
intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
{
- struct drm_encoder *encoder = &intel_tv->base.base;
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc *crtc = connector->state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
@@ -1234,8 +1233,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
- intel_wait_for_vblank(intel_tv->base.base.dev,
- to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
type = -1;
tv_dac = I915_READ(TV_DAC);
@@ -1265,8 +1263,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
- intel_wait_for_vblank(intel_tv->base.base.dev,
- to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {