summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRodrigo Vivi <rodrigo.vivi@intel.com>2024-09-17 19:49:44 -0400
committerRodrigo Vivi <rodrigo.vivi@intel.com>2024-09-17 19:49:44 -0400
commit089726a48f6f9ca2ff05e929ee238fcd49199090 (patch)
tree76f329112a99b835762a1d4ac12eca10a00b6e8e
parent1926272ee33ecea7ed1c5c92b4bd7910bfdfafbd (diff)
2024y-09m-17d-23h-48m-33s UTC: drm-tip rerere cache update
git version 2.46.0
-rw-r--r--rr-cache/81579c8fec2fc49e618dd2283303b5d58dbd9033/postimage471
-rw-r--r--rr-cache/81579c8fec2fc49e618dd2283303b5d58dbd9033/preimage488
-rw-r--r--rr-cache/9e63314e5ad6c77e0963bc61cc788b8f74a9860b/postimage1395
-rw-r--r--rr-cache/9e63314e5ad6c77e0963bc61cc788b8f74a9860b/preimage1547
-rw-r--r--rr-cache/c3af3f3a26e05878b3dc9cf670c390ed4180ae9f/preimage1766
5 files changed, 2725 insertions, 2942 deletions
diff --git a/rr-cache/81579c8fec2fc49e618dd2283303b5d58dbd9033/postimage b/rr-cache/81579c8fec2fc49e618dd2283303b5d58dbd9033/postimage
new file mode 100644
index 000000000000..2b39878cf670
--- /dev/null
+++ b/rr-cache/81579c8fec2fc49e618dd2283303b5d58dbd9033/postimage
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_display.h"
+#include "regs/xe_regs.h"
+
+#include <linux/fb.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+#include <uapi/drm/xe_drm.h>
+
+#include "soc/intel_dram.h"
+#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
+#include "intel_acpi.h"
+#include "intel_audio.h"
+#include "intel_bw.h"
+#include "intel_display.h"
+#include "intel_display_driver.h"
+#include "intel_display_irq.h"
+#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_dp.h"
+#include "intel_encoder.h"
+#include "intel_fbdev.h"
+#include "intel_hdcp.h"
+#include "intel_hotplug.h"
+#include "intel_opregion.h"
+#include "xe_module.h"
+
+/* Xe device functions */
+
+static bool has_display(struct xe_device *xe)
+{
+ return HAS_DISPLAY(xe);
+}
+
+/**
+ * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
+ * early on
+ * @pdev: PCI device
+ *
+ * Returns: true if probe needs to be deferred, false otherwise
+ */
+bool xe_display_driver_probe_defer(struct pci_dev *pdev)
+{
+ if (!xe_modparam.probe_display)
+ return 0;
+
+ return intel_display_driver_probe_defer(pdev);
+}
+
+/**
+ * xe_display_driver_set_hooks - Add driver flags and hooks for display
+ * @driver: DRM device driver
+ *
+ * Set features and function hooks in @driver that are needed for driving the
+ * display IP. This sets the driver's capability of driving display, regardless
+ * if the device has it enabled
+ */
+void xe_display_driver_set_hooks(struct drm_driver *driver)
+{
+ if (!xe_modparam.probe_display)
+ return;
+
+ driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
+}
+
+static void unset_display_features(struct xe_device *xe)
+{
+ xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+}
+
+static void display_destroy(struct drm_device *dev, void *dummy)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ destroy_workqueue(xe->display.hotplug.dp_wq);
+}
+
+/**
+ * xe_display_create - create display struct
+ * @xe: XE device instance
+ *
+ * Initialize all fields used by the display part.
+ *
+ * TODO: once everything can be inside a single struct, make the struct opaque
+ * to the rest of xe and return it to be xe->display.
+ *
+ * Returns: 0 on success
+ */
+int xe_display_create(struct xe_device *xe)
+{
+ spin_lock_init(&xe->display.fb_tracking.lock);
+
+ xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
+
+ return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
+}
+
+static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_power_domains_cleanup(xe);
+}
+
+int xe_display_init_nommio(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return 0;
+
+ /* Fake uncore lock */
+ spin_lock_init(&xe->uncore.lock);
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(xe);
+
+ return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
+}
+
+static void xe_display_fini_noirq(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_remove_noirq(xe);
+ intel_opregion_cleanup(display);
+}
+
+int xe_display_init_noirq(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+ int err;
+
+ if (!xe->info.probe_display)
+ return 0;
+
+ intel_display_driver_early_probe(xe);
+
+ /* Early display init.. */
+ intel_opregion_setup(display);
+
+ /*
+ * Fill the dram structure to get the system dram info. This will be
+ * used for memory latency calculation.
+ */
+ intel_dram_detect(xe);
+
+ intel_bw_init_hw(xe);
+
+ intel_display_device_info_runtime_init(xe);
+
+ err = intel_display_driver_probe_noirq(xe);
+ if (err) {
+ intel_opregion_cleanup(display);
+ return err;
+ }
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe);
+}
+
+static void xe_display_fini_noaccel(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_remove_nogem(xe);
+}
+
+int xe_display_init_noaccel(struct xe_device *xe)
+{
+ int err;
+
+ if (!xe->info.probe_display)
+ return 0;
+
+ err = intel_display_driver_probe_nogem(xe);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noaccel, xe);
+}
+
+int xe_display_init(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return 0;
+
+ return intel_display_driver_probe(xe);
+}
+
+void xe_display_fini(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_hpd_poll_fini(xe);
+
+ intel_hdcp_component_fini(xe);
+ intel_audio_deinit(xe);
+}
+
+void xe_display_register(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_register(xe);
+ intel_register_dsm_handler();
+ intel_power_domains_enable(xe);
+}
+
+void xe_display_unregister(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_unregister_dsm_handler();
+ intel_power_domains_disable(xe);
+ intel_display_driver_unregister(xe);
+}
+
+void xe_display_driver_remove(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_remove(xe);
+}
+
+/* IRQ-related functions */
+
+void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (master_ctl & DISPLAY_IRQ)
+ gen11_display_irq_handler(xe);
+}
+
+void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ if (gu_misc_iir & GU_MISC_GSE)
+ intel_opregion_asle_intr(display);
+}
+
+void xe_display_irq_reset(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ gen11_display_irq_reset(xe);
+}
+
+void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (gt->info.id == XE_GT0)
+ gen11_de_irq_postinstall(xe);
+}
+
+static bool suspend_to_idle(void)
+{
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ return true;
+#endif
+ return false;
+}
+
+static void xe_display_flush_cleanup_work(struct xe_device *xe)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&xe->drm, crtc) {
+ struct drm_crtc_commit *commit;
+
+ spin_lock(&crtc->base.commit_lock);
+ commit = list_first_entry_or_null(&crtc->base.commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->base.commit_lock);
+
+ if (commit) {
+ wait_for_completion(&commit->cleanup_done);
+ drm_crtc_commit_put(commit);
+ }
+ }
+}
+
+/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
+static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+{
+ struct intel_display *display = &xe->display;
+ bool s2idle = suspend_to_idle();
+ if (!xe->info.probe_display)
+ return;
+
+ /*
+ * We do a lot of poking in a lot of registers, make sure they work
+ * properly.
+ */
+ intel_power_domains_disable(xe);
+ if (!runtime)
+ intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+
+ if (!runtime && has_display(xe)) {
+ drm_kms_helper_poll_disable(&xe->drm);
+ intel_display_driver_disable_user_access(xe);
+ intel_display_driver_suspend(xe);
+ }
+
+ xe_display_flush_cleanup_work(xe);
+
+ intel_hpd_cancel_work(xe);
+
+ if (!runtime && has_display(xe)) {
+ intel_display_driver_suspend_access(xe);
+ intel_encoder_suspend_all(&xe->display);
+ }
+
+ intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
+
+ intel_dmc_suspend(display);
+}
+
+void xe_display_pm_suspend(struct xe_device *xe)
+{
+ __xe_display_pm_suspend(xe, false);
+}
+
+void xe_display_pm_shutdown(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_shutdown(xe);
+}
+
+void xe_display_pm_shutdown_noirq(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_shutdown_noirq(xe);
+}
+
+void xe_display_pm_shutdown_noaccel(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_shutdown_nogem(xe);
+}
+
+void xe_display_pm_runtime_suspend(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (xe->d3cold.allowed)
+ __xe_display_pm_suspend(xe, true);
+
+ intel_hpd_poll_enable(xe);
+}
+
+void xe_display_pm_suspend_late(struct xe_device *xe)
+{
+ bool s2idle = suspend_to_idle();
+ if (!xe->info.probe_display)
+ return;
+
+ intel_power_domains_suspend(xe, s2idle);
+
+ intel_display_power_suspend_late(xe);
+}
+
+void xe_display_pm_resume_early(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_power_resume_early(xe);
+
+ intel_power_domains_resume(xe);
+}
+
+static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_dmc_resume(display);
+
+ if (has_display(xe))
+ drm_mode_config_reset(&xe->drm);
+
+ intel_display_driver_init_hw(xe);
+
+ if (!runtime)
+ intel_display_driver_resume(xe);
+}
+
+void xe_display_pm_resume(struct xe_device *xe)
+{
+ __xe_display_pm_resume(xe, false);
+}
+
+void xe_display_pm_runtime_resume(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_hpd_poll_disable(xe);
+
+ if (xe->d3cold.allowed)
+ __xe_display_pm_resume(xe, true);
+}
+
+
+static void display_device_remove(struct drm_device *dev, void *arg)
+{
+ struct xe_device *xe = arg;
+
+ intel_display_device_remove(xe);
+}
+
+int xe_display_probe(struct xe_device *xe)
+{
+ int err;
+
+ if (!xe->info.probe_display)
+ goto no_display;
+
+ intel_display_device_probe(xe);
+
+ err = drmm_add_action_or_reset(&xe->drm, display_device_remove, xe);
+ if (err)
+ return err;
+
+ if (has_display(xe))
+ return 0;
+
+no_display:
+ xe->info.probe_display = false;
+ unset_display_features(xe);
+ return 0;
+}
diff --git a/rr-cache/81579c8fec2fc49e618dd2283303b5d58dbd9033/preimage b/rr-cache/81579c8fec2fc49e618dd2283303b5d58dbd9033/preimage
new file mode 100644
index 000000000000..eaa3494d8d4d
--- /dev/null
+++ b/rr-cache/81579c8fec2fc49e618dd2283303b5d58dbd9033/preimage
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_display.h"
+#include "regs/xe_regs.h"
+
+#include <linux/fb.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+#include <uapi/drm/xe_drm.h>
+
+#include "soc/intel_dram.h"
+#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
+#include "intel_acpi.h"
+#include "intel_audio.h"
+#include "intel_bw.h"
+#include "intel_display.h"
+#include "intel_display_driver.h"
+#include "intel_display_irq.h"
+#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_dp.h"
+#include "intel_encoder.h"
+#include "intel_fbdev.h"
+#include "intel_hdcp.h"
+#include "intel_hotplug.h"
+#include "intel_opregion.h"
+#include "xe_module.h"
+
+/* Xe device functions */
+
+static bool has_display(struct xe_device *xe)
+{
+ return HAS_DISPLAY(xe);
+}
+
+/**
+ * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
+ * early on
+ * @pdev: PCI device
+ *
+ * Returns: true if probe needs to be deferred, false otherwise
+ */
+bool xe_display_driver_probe_defer(struct pci_dev *pdev)
+{
+ if (!xe_modparam.probe_display)
+ return 0;
+
+ return intel_display_driver_probe_defer(pdev);
+}
+
+/**
+ * xe_display_driver_set_hooks - Add driver flags and hooks for display
+ * @driver: DRM device driver
+ *
+ * Set features and function hooks in @driver that are needed for driving the
+ * display IP. This sets the driver's capability of driving display, regardless
+ * if the device has it enabled
+ */
+void xe_display_driver_set_hooks(struct drm_driver *driver)
+{
+ if (!xe_modparam.probe_display)
+ return;
+
+ driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
+}
+
+static void unset_display_features(struct xe_device *xe)
+{
+ xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+}
+
+static void display_destroy(struct drm_device *dev, void *dummy)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ destroy_workqueue(xe->display.hotplug.dp_wq);
+}
+
+/**
+ * xe_display_create - create display struct
+ * @xe: XE device instance
+ *
+ * Initialize all fields used by the display part.
+ *
+ * TODO: once everything can be inside a single struct, make the struct opaque
+ * to the rest of xe and return it to be xe->display.
+ *
+ * Returns: 0 on success
+ */
+int xe_display_create(struct xe_device *xe)
+{
+ spin_lock_init(&xe->display.fb_tracking.lock);
+
+ xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
+
+ return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
+}
+
+static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_power_domains_cleanup(xe);
+}
+
+int xe_display_init_nommio(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return 0;
+
+ /* Fake uncore lock */
+ spin_lock_init(&xe->uncore.lock);
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(xe);
+
+ return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
+}
+
+static void xe_display_fini_noirq(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_remove_noirq(xe);
+ intel_opregion_cleanup(display);
+}
+
+int xe_display_init_noirq(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+ int err;
+
+ if (!xe->info.probe_display)
+ return 0;
+
+ intel_display_driver_early_probe(xe);
+
+ /* Early display init.. */
+ intel_opregion_setup(display);
+
+ /*
+ * Fill the dram structure to get the system dram info. This will be
+ * used for memory latency calculation.
+ */
+ intel_dram_detect(xe);
+
+ intel_bw_init_hw(xe);
+
+ intel_display_device_info_runtime_init(xe);
+
+ err = intel_display_driver_probe_noirq(xe);
+ if (err) {
+ intel_opregion_cleanup(display);
+ return err;
+ }
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe);
+}
+
+static void xe_display_fini_noaccel(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_remove_nogem(xe);
+}
+
+int xe_display_init_noaccel(struct xe_device *xe)
+{
+ int err;
+
+ if (!xe->info.probe_display)
+ return 0;
+
+ err = intel_display_driver_probe_nogem(xe);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noaccel, xe);
+}
+
+int xe_display_init(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return 0;
+
+ return intel_display_driver_probe(xe);
+}
+
+void xe_display_fini(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_hpd_poll_fini(xe);
+
+ intel_hdcp_component_fini(xe);
+ intel_audio_deinit(xe);
+}
+
+void xe_display_register(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_register(xe);
+ intel_register_dsm_handler();
+ intel_power_domains_enable(xe);
+}
+
+void xe_display_unregister(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_unregister_dsm_handler();
+ intel_power_domains_disable(xe);
+ intel_display_driver_unregister(xe);
+}
+
+void xe_display_driver_remove(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_remove(xe);
+}
+
+/* IRQ-related functions */
+
+void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (master_ctl & DISPLAY_IRQ)
+ gen11_display_irq_handler(xe);
+}
+
+void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ if (gu_misc_iir & GU_MISC_GSE)
+ intel_opregion_asle_intr(display);
+}
+
+void xe_display_irq_reset(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ gen11_display_irq_reset(xe);
+}
+
+void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (gt->info.id == XE_GT0)
+ gen11_de_irq_postinstall(xe);
+}
+
+static bool suspend_to_idle(void)
+{
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ return true;
+#endif
+ return false;
+}
+
+static void xe_display_flush_cleanup_work(struct xe_device *xe)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&xe->drm, crtc) {
+ struct drm_crtc_commit *commit;
+
+ spin_lock(&crtc->base.commit_lock);
+ commit = list_first_entry_or_null(&crtc->base.commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->base.commit_lock);
+
+ if (commit) {
+ wait_for_completion(&commit->cleanup_done);
+ drm_crtc_commit_put(commit);
+ }
+ }
+}
+
+/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
+static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+{
+ struct intel_display *display = &xe->display;
+ bool s2idle = suspend_to_idle();
+ if (!xe->info.probe_display)
+ return;
+
+ /*
+ * We do a lot of poking in a lot of registers, make sure they work
+ * properly.
+ */
+ intel_power_domains_disable(xe);
+ if (!runtime)
+ intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+
+ if (!runtime && has_display(xe)) {
+ drm_kms_helper_poll_disable(&xe->drm);
+ intel_display_driver_disable_user_access(xe);
+ intel_display_driver_suspend(xe);
+ }
+
+ xe_display_flush_cleanup_work(xe);
+
+ intel_hpd_cancel_work(xe);
+
+ if (!runtime && has_display(xe)) {
+ intel_display_driver_suspend_access(xe);
+ intel_encoder_suspend_all(&xe->display);
+ }
+
+ intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
+
+ intel_dmc_suspend(display);
+}
+
+void xe_display_pm_suspend(struct xe_device *xe)
+{
+ __xe_display_pm_suspend(xe, false);
+}
+
+void xe_display_pm_shutdown(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_shutdown(xe);
+}
+
+void xe_display_pm_shutdown_noirq(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_shutdown_noirq(xe);
+}
+
+void xe_display_pm_shutdown_noaccel(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_driver_shutdown_nogem(xe);
+}
+
+void xe_display_pm_runtime_suspend(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (xe->d3cold.allowed)
+ __xe_display_pm_suspend(xe, true);
+
+ intel_hpd_poll_enable(xe);
+}
+
+void xe_display_pm_suspend_late(struct xe_device *xe)
+{
+ bool s2idle = suspend_to_idle();
+ if (!xe->info.probe_display)
+ return;
+
+ intel_power_domains_suspend(xe, s2idle);
+
+ intel_display_power_suspend_late(xe);
+}
+
+void xe_display_pm_resume_early(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_power_resume_early(xe);
+
+ intel_power_domains_resume(xe);
+}
+
+static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_dmc_resume(display);
+
+ if (has_display(xe))
+ drm_mode_config_reset(&xe->drm);
+
+ intel_display_driver_init_hw(xe);
+<<<<<<<
+=======
+
+ if (!runtime && has_display(xe))
+ intel_display_driver_resume_access(xe);
+
+ intel_hpd_init(xe);
+
+ if (!runtime && has_display(xe)) {
+ intel_display_driver_resume(xe);
+ drm_kms_helper_poll_enable(&xe->drm);
+ intel_display_driver_enable_user_access(xe);
+ intel_hpd_poll_disable(xe);
+ }
+
+ intel_opregion_resume(display);
+>>>>>>>
+
+ if (!runtime)
+ intel_display_driver_resume(xe);
+}
+
+void xe_display_pm_resume(struct xe_device *xe)
+{
+ __xe_display_pm_resume(xe, false);
+}
+
+void xe_display_pm_runtime_resume(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_hpd_poll_disable(xe);
+
+ if (xe->d3cold.allowed)
+ __xe_display_pm_resume(xe, true);
+}
+
+
+static void display_device_remove(struct drm_device *dev, void *arg)
+{
+ struct xe_device *xe = arg;
+
+ intel_display_device_remove(xe);
+}
+
+int xe_display_probe(struct xe_device *xe)
+{
+ int err;
+
+ if (!xe->info.probe_display)
+ goto no_display;
+
+ intel_display_device_probe(xe);
+
+ err = drmm_add_action_or_reset(&xe->drm, display_device_remove, xe);
+ if (err)
+ return err;
+
+ if (has_display(xe))
+ return 0;
+
+no_display:
+ xe->info.probe_display = false;
+ unset_display_features(xe);
+ return 0;
+}
diff --git a/rr-cache/9e63314e5ad6c77e0963bc61cc788b8f74a9860b/postimage b/rr-cache/9e63314e5ad6c77e0963bc61cc788b8f74a9860b/postimage
deleted file mode 100644
index d607aa9c4ec2..000000000000
--- a/rr-cache/9e63314e5ad6c77e0963bc61cc788b8f74a9860b/postimage
+++ /dev/null
@@ -1,1395 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2014-2018 Broadcom
- * Copyright (C) 2023 Raspberry Pi
- */
-
-#include <drm/drm_syncobj.h>
-
-#include "v3d_drv.h"
-#include "v3d_regs.h"
-#include "v3d_trace.h"
-
-/* Takes the reservation lock on all the BOs being referenced, so that
- * at queue submit time we can update the reservations.
- *
- * We don't lock the RCL the tile alloc/state BOs, or overflow memory
- * (all of which are on exec->unref_list). They're entirely private
- * to v3d, so we don't attach dma-buf fences to them.
- */
-static int
-v3d_lock_bo_reservations(struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int i, ret;
-
- ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
- if (ret)
- return ret;
-
- for (i = 0; i < job->bo_count; i++) {
- ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
- if (ret)
- goto fail;
-
- ret = drm_sched_job_add_implicit_dependencies(&job->base,
- job->bo[i], true);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
- return ret;
-}
-
-/**
- * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
- * referenced by the job.
- * @dev: DRM device
- * @file_priv: DRM file for this fd
- * @job: V3D job being set up
- * @bo_handles: GEM handles
- * @bo_count: Number of GEM handles passed in
- *
- * The command validator needs to reference BOs by their index within
- * the submitted job's BO list. This does the validation of the job's
- * BO list and reference counting for the lifetime of the job.
- *
- * Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at v3d_exec_cleanup() time.
- */
-static int
-v3d_lookup_bos(struct drm_device *dev,
- struct drm_file *file_priv,
- struct v3d_job *job,
- u64 bo_handles,
- u32 bo_count)
-{
- job->bo_count = bo_count;
-
- if (!job->bo_count) {
- /* See comment on bo_index for why we have to check
- * this.
- */
- DRM_DEBUG("Rendering requires BOs\n");
- return -EINVAL;
- }
-
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)bo_handles,
- job->bo_count, &job->bo);
-}
-
-static void
-v3d_job_free(struct kref *ref)
-{
- struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
- int i;
-
- if (job->bo) {
- for (i = 0; i < job->bo_count; i++)
- drm_gem_object_put(job->bo[i]);
- kvfree(job->bo);
- }
-
- dma_fence_put(job->irq_fence);
- dma_fence_put(job->done_fence);
-
- if (job->perfmon)
- v3d_perfmon_put(job->perfmon);
-
- kfree(job);
-}
-
-static void
-v3d_render_job_free(struct kref *ref)
-{
- struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
- base.refcount);
- struct v3d_bo *bo, *save;
-
- list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
- drm_gem_object_put(&bo->base.base);
- }
-
- v3d_job_free(ref);
-}
-
-void v3d_job_cleanup(struct v3d_job *job)
-{
- if (!job)
- return;
-
- drm_sched_job_cleanup(&job->base);
- v3d_job_put(job);
-}
-
-void v3d_job_put(struct v3d_job *job)
-{
- if (!job)
- return;
-
- kref_put(&job->refcount, job->free);
-}
-
-static int
-v3d_job_allocate(void **container, size_t size)
-{
- *container = kcalloc(1, size, GFP_KERNEL);
- if (!*container) {
- DRM_ERROR("Cannot allocate memory for V3D job.\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void
-v3d_job_deallocate(void **container)
-{
- kfree(*container);
- *container = NULL;
-}
-
-static int
-v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
- struct v3d_job *job, void (*free)(struct kref *ref),
- u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
-{
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int ret, i;
-
- job->v3d = v3d;
- job->free = free;
- job->file = file_priv;
-
- ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- 1, v3d_priv);
- if (ret)
- return ret;
-
- if (has_multisync) {
- if (se->in_sync_count && se->wait_stage == queue) {
- struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
-
- for (i = 0; i < se->in_sync_count; i++) {
- struct drm_v3d_sem in;
-
- if (copy_from_user(&in, handle++, sizeof(in))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy wait dep handle.\n");
- goto fail_deps;
- }
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
- }
- } else {
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
-
- kref_init(&job->refcount);
-
- return 0;
-
-fail_deps:
- drm_sched_job_cleanup(&job->base);
- return ret;
-}
-
-static void
-v3d_push_job(struct v3d_job *job)
-{
- drm_sched_job_arm(&job->base);
-
- job->done_fence = dma_fence_get(&job->base.s_fence->finished);
-
- /* put by scheduler job completion */
- kref_get(&job->refcount);
-
- drm_sched_entity_push_job(&job->base);
-}
-
-static void
-v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
- struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx,
- u32 out_sync,
- struct v3d_submit_ext *se,
- struct dma_fence *done_fence)
-{
- struct drm_syncobj *sync_out;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int i;
-
- for (i = 0; i < job->bo_count; i++) {
- /* XXX: Use shared fences for read-only objects. */
- dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
- DMA_RESV_USAGE_WRITE);
- }
-
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
-
- /* Update the return sync object for the job */
- /* If it only supports a single signal semaphore*/
- if (!has_multisync) {
- sync_out = drm_syncobj_find(file_priv, out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, done_fence);
- drm_syncobj_put(sync_out);
- }
- return;
- }
-
- /* If multiple semaphores extension is supported */
- if (se->out_sync_count) {
- for (i = 0; i < se->out_sync_count; i++) {
- drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
- done_fence);
- drm_syncobj_put(se->out_syncs[i].syncobj);
- }
- kvfree(se->out_syncs);
- }
-}
-
-static int
-v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
- struct v3d_dev *v3d,
- struct drm_v3d_submit_csd *args,
- struct v3d_csd_job **job,
- struct v3d_job **clean_job,
- struct v3d_submit_ext *se,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int ret;
-
- ret = v3d_job_allocate((void *)job, sizeof(**job));
- if (ret)
- return ret;
-
- ret = v3d_job_init(v3d, file_priv, &(*job)->base,
- v3d_job_free, args->in_sync, se, V3D_CSD);
- if (ret) {
- v3d_job_deallocate((void *)job);
- return ret;
- }
-
- ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
- if (ret)
- return ret;
-
- ret = v3d_job_init(v3d, file_priv, *clean_job,
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret) {
- v3d_job_deallocate((void *)clean_job);
- return ret;
- }
-
- (*job)->args = *args;
-
- ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- return ret;
-
- return v3d_lock_bo_reservations(*clean_job, acquire_ctx);
-}
-
-static void
-v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
-{
- unsigned int i;
-
- if (!(se && se->out_sync_count))
- return;
-
- for (i = 0; i < se->out_sync_count; i++)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-}
-
-static int
-v3d_get_multisync_post_deps(struct drm_file *file_priv,
- struct v3d_submit_ext *se,
- u32 count, u64 handles)
-{
- struct drm_v3d_sem __user *post_deps;
- int i, ret;
-
- if (!count)
- return 0;
-
- se->out_syncs = (struct v3d_submit_outsync *)
- kvmalloc_array(count,
- sizeof(struct v3d_submit_outsync),
- GFP_KERNEL);
- if (!se->out_syncs)
- return -ENOMEM;
-
- post_deps = u64_to_user_ptr(handles);
-
- for (i = 0; i < count; i++) {
- struct drm_v3d_sem out;
-
- if (copy_from_user(&out, post_deps++, sizeof(out))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy post dep handles\n");
- goto fail;
- }
-
- se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
- out.handle);
- if (!se->out_syncs[i].syncobj) {
- ret = -EINVAL;
- goto fail;
- }
- }
- se->out_sync_count = count;
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-
- return ret;
-}
-
-/* Get data for multiple binary semaphores synchronization. Parse syncobj
- * to be signaled when job completes (out_sync).
- */
-static int
-v3d_get_multisync_submit_deps(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_submit_ext *se)
-{
- struct drm_v3d_multi_sync multisync;
- int ret;
-
- if (se->in_sync_count || se->out_sync_count) {
- DRM_DEBUG("Two multisync extensions were added to the same job.");
- return -EINVAL;
- }
-
- if (copy_from_user(&multisync, ext, sizeof(multisync)))
- return -EFAULT;
-
- if (multisync.pad)
- return -EINVAL;
-
- ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count,
- multisync.out_syncs);
- if (ret)
- return ret;
-
- se->in_sync_count = multisync.in_sync_count;
- se->in_syncs = multisync.in_syncs;
- se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
- se->wait_stage = multisync.wait_stage;
-
- return 0;
-}
-
-/* Get data for the indirect CSD job submission. */
-static int
-v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct v3d_dev *v3d = v3d_priv->v3d;
- struct drm_v3d_indirect_csd indirect_csd;
- struct v3d_indirect_csd_info *info = &job->indirect_csd;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd)))
- return -EFAULT;
-
- if (!v3d_has_csd(v3d)) {
- DRM_DEBUG("Attempting CSD submit on non-CSD hardware.\n");
- return -EINVAL;
- }
-
- job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD;
- info->offset = indirect_csd.offset;
- info->wg_size = indirect_csd.wg_size;
- memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets,
- sizeof(indirect_csd.wg_uniform_offsets));
-
- info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect);
-
- return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit,
- &info->job, &info->clean_job,
- NULL, &info->acquire_ctx);
-}
-
-/* Get data for the query timestamp job submission. */
-static int
-v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- u32 __user *offsets, *syncs;
- struct drm_v3d_timestamp_query timestamp;
- struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
- unsigned int i;
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&timestamp, ext, sizeof(timestamp)))
- return -EFAULT;
-
- if (timestamp.pad)
- return -EINVAL;
-
- job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
-
- query_info->queries = kvmalloc_array(timestamp.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
- offsets = u64_to_user_ptr(timestamp.offsets);
- syncs = u64_to_user_ptr(timestamp.syncs);
-
- for (i = 0; i < timestamp.count; i++) {
- u32 offset, sync;
-
- if (get_user(offset, offsets++)) {
- err = -EFAULT;
- goto error;
- }
-
- query_info->queries[i].offset = offset;
-
- if (get_user(sync, syncs++)) {
- err = -EFAULT;
- goto error;
- }
-
- query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
- sync);
- if (!query_info->queries[i].syncobj) {
- err = -ENOENT;
- goto error;
- }
- }
- query_info->count = timestamp.count;
-
- return 0;
-
-error:
- v3d_timestamp_query_info_free(&job->timestamp_query, i);
- return err;
-}
-
-static int
-v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- u32 __user *syncs;
- struct drm_v3d_reset_timestamp_query reset;
- struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
- unsigned int i;
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&reset, ext, sizeof(reset)))
- return -EFAULT;
-
- job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
-
- query_info->queries = kvmalloc_array(reset.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
- syncs = u64_to_user_ptr(reset.syncs);
-
- for (i = 0; i < reset.count; i++) {
- u32 sync;
-
- query_info->queries[i].offset = reset.offset + 8 * i;
-
- if (get_user(sync, syncs++)) {
- err = -EFAULT;
- goto error;
- }
-
- query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
- sync);
- if (!query_info->queries[i].syncobj) {
- err = -ENOENT;
- goto error;
- }
- }
- query_info->count = reset.count;
-
- return 0;
-
-error:
- v3d_timestamp_query_info_free(&job->timestamp_query, i);
- return err;
-}
-
-/* Get data for the copy timestamp query results job submission. */
-static int
-v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- u32 __user *offsets, *syncs;
- struct drm_v3d_copy_timestamp_query copy;
- struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
- unsigned int i;
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&copy, ext, sizeof(copy)))
- return -EFAULT;
-
- if (copy.pad)
- return -EINVAL;
-
- job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
-
- query_info->queries = kvmalloc_array(copy.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
- offsets = u64_to_user_ptr(copy.offsets);
- syncs = u64_to_user_ptr(copy.syncs);
-
- for (i = 0; i < copy.count; i++) {
- u32 offset, sync;
-
- if (get_user(offset, offsets++)) {
- err = -EFAULT;
- goto error;
- }
-
- query_info->queries[i].offset = offset;
-
- if (get_user(sync, syncs++)) {
- err = -EFAULT;
- goto error;
- }
-
- query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
- sync);
- if (!query_info->queries[i].syncobj) {
- err = -ENOENT;
- goto error;
- }
- }
- query_info->count = copy.count;
-
- job->copy.do_64bit = copy.do_64bit;
- job->copy.do_partial = copy.do_partial;
- job->copy.availability_bit = copy.availability_bit;
- job->copy.offset = copy.offset;
- job->copy.stride = copy.stride;
-
- return 0;
-
-error:
- v3d_timestamp_query_info_free(&job->timestamp_query, i);
- return err;
-}
-
-static int
-v3d_copy_query_info(struct v3d_performance_query_info *query_info,
- unsigned int count,
- unsigned int nperfmons,
- u32 __user *syncs,
- u64 __user *kperfmon_ids,
- struct drm_file *file_priv)
-{
- unsigned int i, j;
- int err;
-
- for (i = 0; i < count; i++) {
- struct v3d_performance_query *query = &query_info->queries[i];
- u32 __user *ids_pointer;
- u32 sync, id;
- u64 ids;
-
- if (get_user(sync, syncs++)) {
- err = -EFAULT;
- goto error;
- }
-
- if (get_user(ids, kperfmon_ids++)) {
- err = -EFAULT;
- goto error;
- }
-
- query->kperfmon_ids =
- kvmalloc_array(nperfmons,
- sizeof(struct v3d_performance_query *),
- GFP_KERNEL);
- if (!query->kperfmon_ids) {
- err = -ENOMEM;
- goto error;
- }
-
- ids_pointer = u64_to_user_ptr(ids);
-
- for (j = 0; j < nperfmons; j++) {
- if (get_user(id, ids_pointer++)) {
- kvfree(query->kperfmon_ids);
- err = -EFAULT;
- goto error;
- }
-
- query->kperfmon_ids[j] = id;
- }
-
- query->syncobj = drm_syncobj_find(file_priv, sync);
- if (!query->syncobj) {
- kvfree(query->kperfmon_ids);
- err = -ENOENT;
- goto error;
- }
- }
-
- return 0;
-
-error:
- v3d_performance_query_info_free(query_info, i);
- return err;
-}
-
-static int
-v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- struct v3d_performance_query_info *query_info = &job->performance_query;
- struct drm_v3d_reset_performance_query reset;
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&reset, ext, sizeof(reset)))
- return -EFAULT;
-
- job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
-
- query_info->queries =
- kvmalloc_array(reset.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
- err = v3d_copy_query_info(query_info,
- reset.count,
- reset.nperfmons,
- u64_to_user_ptr(reset.syncs),
- u64_to_user_ptr(reset.kperfmon_ids),
- file_priv);
- if (err)
- return err;
-
- query_info->count = reset.count;
- query_info->nperfmons = reset.nperfmons;
-
- return 0;
-}
-
-static int
-v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- struct v3d_performance_query_info *query_info = &job->performance_query;
- struct drm_v3d_copy_performance_query copy;
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&copy, ext, sizeof(copy)))
- return -EFAULT;
-
- if (copy.pad)
- return -EINVAL;
-
- job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
-
- query_info->queries =
- kvmalloc_array(copy.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
- err = v3d_copy_query_info(query_info,
- copy.count,
- copy.nperfmons,
- u64_to_user_ptr(copy.syncs),
- u64_to_user_ptr(copy.kperfmon_ids),
- file_priv);
- if (err)
- return err;
-
- query_info->count = copy.count;
- query_info->nperfmons = copy.nperfmons;
- query_info->ncounters = copy.ncounters;
-
- job->copy.do_64bit = copy.do_64bit;
- job->copy.do_partial = copy.do_partial;
- job->copy.availability_bit = copy.availability_bit;
- job->copy.offset = copy.offset;
- job->copy.stride = copy.stride;
-
- return 0;
-}
-
-/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
- * according to the extension id (name).
- */
-static int
-v3d_get_extensions(struct drm_file *file_priv,
- u64 ext_handles,
- struct v3d_submit_ext *se,
- struct v3d_cpu_job *job)
-{
- struct drm_v3d_extension __user *user_ext;
- int ret;
-
- user_ext = u64_to_user_ptr(ext_handles);
- while (user_ext) {
- struct drm_v3d_extension ext;
-
- if (copy_from_user(&ext, user_ext, sizeof(ext))) {
- DRM_DEBUG("Failed to copy submit extension\n");
- return -EFAULT;
- }
-
- switch (ext.id) {
- case DRM_V3D_EXT_ID_MULTI_SYNC:
- ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se);
- break;
- case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
- ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
- ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
- ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
- ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
- ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
- ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job);
- break;
- default:
- DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
- return -EINVAL;
- }
-
- if (ret)
- return ret;
-
- user_ext = u64_to_user_ptr(ext.next);
- }
-
- return 0;
-}
-
-/**
- * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * This is the main entrypoint for userspace to submit a 3D frame to
- * the GPU. Userspace provides the binner command list (if
- * applicable), and the kernel sets up the render command list to draw
- * to the framebuffer described in the ioctl, using the command lists
- * that the 3D engine's binner will produce.
- */
-int
-v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_cl *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_bin_job *bin = NULL;
- struct v3d_render_job *render = NULL;
- struct v3d_job *clean_job = NULL;
- struct v3d_job *last_job;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
-
- if (args->pad)
- return -EINVAL;
-
- if (args->flags &&
- args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
- DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_allocate((void *)&render, sizeof(*render));
- if (ret)
- return ret;
-
- ret = v3d_job_init(v3d, file_priv, &render->base,
- v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
- if (ret) {
- v3d_job_deallocate((void *)&render);
- goto fail;
- }
-
- render->start = args->rcl_start;
- render->end = args->rcl_end;
- INIT_LIST_HEAD(&render->unref_list);
-
- if (args->bcl_start != args->bcl_end) {
- ret = v3d_job_allocate((void *)&bin, sizeof(*bin));
- if (ret)
- goto fail;
-
- ret = v3d_job_init(v3d, file_priv, &bin->base,
- v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
- if (ret) {
- v3d_job_deallocate((void *)&bin);
- goto fail;
- }
-
- bin->start = args->bcl_start;
- bin->end = args->bcl_end;
- bin->qma = args->qma;
- bin->qms = args->qms;
- bin->qts = args->qts;
- bin->render = render;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
- ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job));
- if (ret)
- goto fail;
-
- ret = v3d_job_init(v3d, file_priv, clean_job,
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret) {
- v3d_job_deallocate((void *)&clean_job);
- goto fail;
- }
-
- last_job = clean_job;
- } else {
- last_job = &render->base;
- }
-
- ret = v3d_lookup_bos(dev, file_priv, last_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- render->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
-
- if (!render->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- if (bin) {
- bin->base.perfmon = render->base.perfmon;
- v3d_perfmon_get(bin->base.perfmon);
- v3d_push_job(&bin->base);
-
- ret = drm_sched_job_add_dependency(&render->base.base,
- dma_fence_get(bin->base.done_fence));
- if (ret)
- goto fail_unreserve;
- }
-
- v3d_push_job(&render->base);
-
- if (clean_job) {
- struct dma_fence *render_fence =
- dma_fence_get(render->base.done_fence);
- ret = drm_sched_job_add_dependency(&clean_job->base,
- render_fence);
- if (ret)
- goto fail_unreserve;
- clean_job->perfmon = render->base.perfmon;
- v3d_perfmon_get(clean_job->perfmon);
- v3d_push_job(clean_job);
- }
-
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- last_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- last_job->done_fence);
-
- v3d_job_put(&bin->base);
- v3d_job_put(&render->base);
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(last_job->bo,
- last_job->bo_count, &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)bin);
- v3d_job_cleanup((void *)render);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the TFU, which we don't
- * need to validate since the TFU is behind the MMU.
- */
-int
-v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_v3d_submit_tfu *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_tfu_job *job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_DEBUG("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_allocate((void *)&job, sizeof(*job));
- if (ret)
- return ret;
-
- ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync, &se, V3D_TFU);
- if (ret) {
- v3d_job_deallocate((void *)&job);
- goto fail;
- }
-
- job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
- sizeof(*job->base.bo), GFP_KERNEL);
- if (!job->base.bo) {
- ret = -ENOMEM;
- goto fail;
- }
-
- job->args = *args;
-
- for (job->base.bo_count = 0;
- job->base.bo_count < ARRAY_SIZE(args->bo_handles);
- job->base.bo_count++) {
- struct drm_gem_object *bo;
-
- if (!args->bo_handles[job->base.bo_count])
- break;
-
- bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
- if (!bo) {
- DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- job->base.bo_count,
- args->bo_handles[job->base.bo_count]);
- ret = -ENOENT;
- goto fail;
- }
- job->base.bo[job->base.bo_count] = bo;
- }
-
- ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
- if (ret)
- goto fail;
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- &job->base, &acquire_ctx,
- args->out_sync,
- &se,
- job->base.done_fence);
-
- v3d_job_put(&job->base);
-
- return 0;
-
-fail:
- v3d_job_cleanup((void *)job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the CSD, which we don't
- * need to validate since the CSD is behind the MMU.
- */
-int
-v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_csd *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_csd_job *job = NULL;
- struct v3d_job *clean_job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret;
-
- trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
-
- if (args->pad)
- return -EINVAL;
-
- if (!v3d_has_csd(v3d)) {
- DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
- return -EINVAL;
- }
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
- &job, &clean_job, &se,
- &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- job->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
- if (!job->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
-
- ret = drm_sched_job_add_dependency(&clean_job->base,
- dma_fence_get(job->base.done_fence));
- if (ret)
- goto fail_unreserve;
-
- v3d_push_job(clean_job);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- clean_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- clean_job->done_fence);
-
- v3d_job_put(&job->base);
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
- &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)job);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-static const unsigned int cpu_job_bo_handle_count[] = {
- [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1,
- [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1,
- [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1,
- [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2,
- [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0,
- [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1,
-};
-
-/**
- * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace specifies the CPU job type and data required to perform its
- * operations through the drm_v3d_extension struct.
- */
-int
-v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_v3d_submit_cpu *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_submit_ext *out_se = NULL;
- struct v3d_cpu_job *cpu_job = NULL;
- struct v3d_csd_job *csd_job = NULL;
- struct v3d_job *clean_job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret;
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("Invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job));
- if (ret)
- return ret;
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- goto fail;
- }
- }
-
- /* Every CPU job must have a CPU job user extension */
- if (!cpu_job->job_type) {
- DRM_DEBUG("CPU job must have a CPU job user extension.\n");
- ret = -EINVAL;
- goto fail;
- }
-
- if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
- DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n");
- ret = -EINVAL;
- goto fail;
- }
-
- trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type);
-
- ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
- v3d_job_free, 0, &se, V3D_CPU);
- if (ret) {
- v3d_job_deallocate((void *)&cpu_job);
- goto fail;
- }
-
- clean_job = cpu_job->indirect_csd.clean_job;
- csd_job = cpu_job->indirect_csd.job;
-
- if (args->bo_handle_count) {
- ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx);
- if (ret)
- goto fail;
- }
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&cpu_job->base);
-
- switch (cpu_job->job_type) {
- case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
- ret = drm_sched_job_add_dependency(&csd_job->base.base,
- dma_fence_get(cpu_job->base.done_fence));
- if (ret)
- goto fail_unreserve;
-
- v3d_push_job(&csd_job->base);
-
- ret = drm_sched_job_add_dependency(&clean_job->base,
- dma_fence_get(csd_job->base.done_fence));
- if (ret)
- goto fail_unreserve;
-
- v3d_push_job(clean_job);
-
- break;
- default:
- break;
- }
- mutex_unlock(&v3d->sched_lock);
-
- out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se;
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- &cpu_job->base,
- &acquire_ctx, 0,
- out_se, cpu_job->base.done_fence);
-
- switch (cpu_job->job_type) {
- case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
- v3d_attach_fences_and_unlock_reservation(file_priv,
- clean_job,
- &cpu_job->indirect_csd.acquire_ctx,
- 0, &se, clean_job->done_fence);
- break;
- default:
- break;
- }
-
- v3d_job_put(&cpu_job->base);
- v3d_job_put(&csd_job->base);
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-
- drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count,
- &acquire_ctx);
-
- drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
- &cpu_job->indirect_csd.acquire_ctx);
-
-fail:
- v3d_job_cleanup((void *)cpu_job);
- v3d_job_cleanup((void *)csd_job);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
- kvfree(cpu_job->timestamp_query.queries);
- kvfree(cpu_job->performance_query.queries);
-
- return ret;
-}
diff --git a/rr-cache/9e63314e5ad6c77e0963bc61cc788b8f74a9860b/preimage b/rr-cache/9e63314e5ad6c77e0963bc61cc788b8f74a9860b/preimage
deleted file mode 100644
index 17370d99956c..000000000000
--- a/rr-cache/9e63314e5ad6c77e0963bc61cc788b8f74a9860b/preimage
+++ /dev/null
@@ -1,1547 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2014-2018 Broadcom
- * Copyright (C) 2023 Raspberry Pi
- */
-
-#include <drm/drm_syncobj.h>
-
-#include "v3d_drv.h"
-#include "v3d_regs.h"
-#include "v3d_trace.h"
-
-/* Takes the reservation lock on all the BOs being referenced, so that
- * at queue submit time we can update the reservations.
- *
- * We don't lock the RCL the tile alloc/state BOs, or overflow memory
- * (all of which are on exec->unref_list). They're entirely private
- * to v3d, so we don't attach dma-buf fences to them.
- */
-static int
-v3d_lock_bo_reservations(struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int i, ret;
-
- ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
- if (ret)
- return ret;
-
- for (i = 0; i < job->bo_count; i++) {
- ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
- if (ret)
- goto fail;
-
- ret = drm_sched_job_add_implicit_dependencies(&job->base,
- job->bo[i], true);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
- return ret;
-}
-
-/**
- * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
- * referenced by the job.
- * @dev: DRM device
- * @file_priv: DRM file for this fd
- * @job: V3D job being set up
- * @bo_handles: GEM handles
- * @bo_count: Number of GEM handles passed in
- *
- * The command validator needs to reference BOs by their index within
- * the submitted job's BO list. This does the validation of the job's
- * BO list and reference counting for the lifetime of the job.
- *
- * Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at v3d_exec_cleanup() time.
- */
-static int
-v3d_lookup_bos(struct drm_device *dev,
- struct drm_file *file_priv,
- struct v3d_job *job,
- u64 bo_handles,
- u32 bo_count)
-{
- job->bo_count = bo_count;
-
- if (!job->bo_count) {
- /* See comment on bo_index for why we have to check
- * this.
- */
- DRM_DEBUG("Rendering requires BOs\n");
- return -EINVAL;
- }
-
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)bo_handles,
- job->bo_count, &job->bo);
-}
-
-static void
-v3d_job_free(struct kref *ref)
-{
- struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
- int i;
-
- if (job->bo) {
- for (i = 0; i < job->bo_count; i++)
- drm_gem_object_put(job->bo[i]);
- kvfree(job->bo);
- }
-
- dma_fence_put(job->irq_fence);
- dma_fence_put(job->done_fence);
-
- if (job->perfmon)
- v3d_perfmon_put(job->perfmon);
-
- kfree(job);
-}
-
-static void
-v3d_render_job_free(struct kref *ref)
-{
- struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
- base.refcount);
- struct v3d_bo *bo, *save;
-
- list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
- drm_gem_object_put(&bo->base.base);
- }
-
- v3d_job_free(ref);
-}
-
-void v3d_job_cleanup(struct v3d_job *job)
-{
- if (!job)
- return;
-
- drm_sched_job_cleanup(&job->base);
- v3d_job_put(job);
-}
-
-void v3d_job_put(struct v3d_job *job)
-{
- if (!job)
- return;
-
- kref_put(&job->refcount, job->free);
-}
-
-static int
-v3d_job_allocate(void **container, size_t size)
-{
- *container = kcalloc(1, size, GFP_KERNEL);
- if (!*container) {
- DRM_ERROR("Cannot allocate memory for V3D job.\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void
-v3d_job_deallocate(void **container)
-{
- kfree(*container);
- *container = NULL;
-}
-
-static int
-v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
- struct v3d_job *job, void (*free)(struct kref *ref),
- u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
-{
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int ret, i;
-
- job->v3d = v3d;
- job->free = free;
- job->file = file_priv;
-
- ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- 1, v3d_priv);
- if (ret)
- return ret;
-
- if (has_multisync) {
- if (se->in_sync_count && se->wait_stage == queue) {
- struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
-
- for (i = 0; i < se->in_sync_count; i++) {
- struct drm_v3d_sem in;
-
- if (copy_from_user(&in, handle++, sizeof(in))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy wait dep handle.\n");
- goto fail_deps;
- }
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
- }
- } else {
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
-
- kref_init(&job->refcount);
-
- return 0;
-
-fail_deps:
- drm_sched_job_cleanup(&job->base);
- return ret;
-}
-
-static void
-v3d_push_job(struct v3d_job *job)
-{
- drm_sched_job_arm(&job->base);
-
- job->done_fence = dma_fence_get(&job->base.s_fence->finished);
-
- /* put by scheduler job completion */
- kref_get(&job->refcount);
-
- drm_sched_entity_push_job(&job->base);
-}
-
-static void
-v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
- struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx,
- u32 out_sync,
- struct v3d_submit_ext *se,
- struct dma_fence *done_fence)
-{
- struct drm_syncobj *sync_out;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int i;
-
- for (i = 0; i < job->bo_count; i++) {
- /* XXX: Use shared fences for read-only objects. */
- dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
- DMA_RESV_USAGE_WRITE);
- }
-
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
-
- /* Update the return sync object for the job */
- /* If it only supports a single signal semaphore*/
- if (!has_multisync) {
- sync_out = drm_syncobj_find(file_priv, out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, done_fence);
- drm_syncobj_put(sync_out);
- }
- return;
- }
-
- /* If multiple semaphores extension is supported */
- if (se->out_sync_count) {
- for (i = 0; i < se->out_sync_count; i++) {
- drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
- done_fence);
- drm_syncobj_put(se->out_syncs[i].syncobj);
- }
- kvfree(se->out_syncs);
- }
-}
-
-static int
-v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
- struct v3d_dev *v3d,
- struct drm_v3d_submit_csd *args,
- struct v3d_csd_job **job,
- struct v3d_job **clean_job,
- struct v3d_submit_ext *se,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int ret;
-
- ret = v3d_job_allocate((void *)job, sizeof(**job));
- if (ret)
- return ret;
-
- ret = v3d_job_init(v3d, file_priv, &(*job)->base,
- v3d_job_free, args->in_sync, se, V3D_CSD);
- if (ret) {
- v3d_job_deallocate((void *)job);
- return ret;
- }
-
- ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
- if (ret)
- return ret;
-
- ret = v3d_job_init(v3d, file_priv, *clean_job,
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret) {
- v3d_job_deallocate((void *)clean_job);
- return ret;
- }
-
- (*job)->args = *args;
-
- ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- return ret;
-
- return v3d_lock_bo_reservations(*clean_job, acquire_ctx);
-}
-
-static void
-v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
-{
- unsigned int i;
-
- if (!(se && se->out_sync_count))
- return;
-
- for (i = 0; i < se->out_sync_count; i++)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-}
-
-static int
-v3d_get_multisync_post_deps(struct drm_file *file_priv,
- struct v3d_submit_ext *se,
- u32 count, u64 handles)
-{
- struct drm_v3d_sem __user *post_deps;
- int i, ret;
-
- if (!count)
- return 0;
-
- se->out_syncs = (struct v3d_submit_outsync *)
- kvmalloc_array(count,
- sizeof(struct v3d_submit_outsync),
- GFP_KERNEL);
- if (!se->out_syncs)
- return -ENOMEM;
-
- post_deps = u64_to_user_ptr(handles);
-
- for (i = 0; i < count; i++) {
- struct drm_v3d_sem out;
-
- if (copy_from_user(&out, post_deps++, sizeof(out))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy post dep handles\n");
- goto fail;
- }
-
- se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
- out.handle);
- if (!se->out_syncs[i].syncobj) {
- ret = -EINVAL;
- goto fail;
- }
- }
- se->out_sync_count = count;
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-
- return ret;
-}
-
-/* Get data for multiple binary semaphores synchronization. Parse syncobj
- * to be signaled when job completes (out_sync).
- */
-static int
-v3d_get_multisync_submit_deps(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_submit_ext *se)
-{
- struct drm_v3d_multi_sync multisync;
- int ret;
-
- if (se->in_sync_count || se->out_sync_count) {
- DRM_DEBUG("Two multisync extensions were added to the same job.");
- return -EINVAL;
- }
-
- if (copy_from_user(&multisync, ext, sizeof(multisync)))
- return -EFAULT;
-
- if (multisync.pad)
- return -EINVAL;
-
- ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count,
- multisync.out_syncs);
- if (ret)
- return ret;
-
- se->in_sync_count = multisync.in_sync_count;
- se->in_syncs = multisync.in_syncs;
- se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
- se->wait_stage = multisync.wait_stage;
-
- return 0;
-}
-
-/* Get data for the indirect CSD job submission. */
-static int
-v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct v3d_dev *v3d = v3d_priv->v3d;
- struct drm_v3d_indirect_csd indirect_csd;
- struct v3d_indirect_csd_info *info = &job->indirect_csd;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd)))
- return -EFAULT;
-
- if (!v3d_has_csd(v3d)) {
- DRM_DEBUG("Attempting CSD submit on non-CSD hardware.\n");
- return -EINVAL;
- }
-
- job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD;
- info->offset = indirect_csd.offset;
- info->wg_size = indirect_csd.wg_size;
- memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets,
- sizeof(indirect_csd.wg_uniform_offsets));
-
- info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect);
-
- return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit,
- &info->job, &info->clean_job,
- NULL, &info->acquire_ctx);
-}
-
-/* Get data for the query timestamp job submission. */
-static int
-v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- u32 __user *offsets, *syncs;
- struct drm_v3d_timestamp_query timestamp;
-<<<<<<<
-=======
- struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
->>>>>>>
- unsigned int i;
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&timestamp, ext, sizeof(timestamp)))
- return -EFAULT;
-
- if (timestamp.pad)
- return -EINVAL;
-
- job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
-
- query_info->queries = kvmalloc_array(timestamp.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
- offsets = u64_to_user_ptr(timestamp.offsets);
- syncs = u64_to_user_ptr(timestamp.syncs);
-
- for (i = 0; i < timestamp.count; i++) {
- u32 offset, sync;
-
-<<<<<<<
- if (copy_from_user(&offset, offsets++, sizeof(offset))) {
-=======
- if (get_user(offset, offsets++)) {
->>>>>>>
- err = -EFAULT;
- goto error;
- }
-
- query_info->queries[i].offset = offset;
-
-<<<<<<<
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
-=======
- if (get_user(sync, syncs++)) {
->>>>>>>
- err = -EFAULT;
- goto error;
- }
-
-<<<<<<<
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->timestamp_query.queries[i].syncobj) {
-=======
- query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
- sync);
- if (!query_info->queries[i].syncobj) {
->>>>>>>
- err = -ENOENT;
- goto error;
- }
- }
- query_info->count = timestamp.count;
-
- return 0;
-
-error:
- v3d_timestamp_query_info_free(&job->timestamp_query, i);
- return err;
-}
-
-static int
-v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- u32 __user *syncs;
- struct drm_v3d_reset_timestamp_query reset;
-<<<<<<<
-=======
- struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
->>>>>>>
- unsigned int i;
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&reset, ext, sizeof(reset)))
- return -EFAULT;
-
- job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
-
- query_info->queries = kvmalloc_array(reset.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
- syncs = u64_to_user_ptr(reset.syncs);
-
- for (i = 0; i < reset.count; i++) {
- u32 sync;
-
- query_info->queries[i].offset = reset.offset + 8 * i;
-
-<<<<<<<
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
-=======
- if (get_user(sync, syncs++)) {
->>>>>>>
- err = -EFAULT;
- goto error;
- }
-
-<<<<<<<
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->timestamp_query.queries[i].syncobj) {
-=======
- query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
- sync);
- if (!query_info->queries[i].syncobj) {
->>>>>>>
- err = -ENOENT;
- goto error;
- }
- }
- query_info->count = reset.count;
-
- return 0;
-
-error:
- v3d_timestamp_query_info_free(&job->timestamp_query, i);
- return err;
-}
-
-/* Get data for the copy timestamp query results job submission. */
-static int
-v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- u32 __user *offsets, *syncs;
- struct drm_v3d_copy_timestamp_query copy;
-<<<<<<<
-=======
- struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
->>>>>>>
- unsigned int i;
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&copy, ext, sizeof(copy)))
- return -EFAULT;
-
- if (copy.pad)
- return -EINVAL;
-
- job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
-
- query_info->queries = kvmalloc_array(copy.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
- offsets = u64_to_user_ptr(copy.offsets);
- syncs = u64_to_user_ptr(copy.syncs);
-
- for (i = 0; i < copy.count; i++) {
- u32 offset, sync;
-
-<<<<<<<
- if (copy_from_user(&offset, offsets++, sizeof(offset))) {
-=======
- if (get_user(offset, offsets++)) {
->>>>>>>
- err = -EFAULT;
- goto error;
- }
-
- query_info->queries[i].offset = offset;
-
-<<<<<<<
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
-=======
- if (get_user(sync, syncs++)) {
->>>>>>>
- err = -EFAULT;
- goto error;
- }
-
-<<<<<<<
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->timestamp_query.queries[i].syncobj) {
-=======
- query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
- sync);
- if (!query_info->queries[i].syncobj) {
->>>>>>>
- err = -ENOENT;
- goto error;
- }
- }
- query_info->count = copy.count;
-
- job->copy.do_64bit = copy.do_64bit;
- job->copy.do_partial = copy.do_partial;
- job->copy.availability_bit = copy.availability_bit;
- job->copy.offset = copy.offset;
- job->copy.stride = copy.stride;
-
- return 0;
-
-error:
- v3d_timestamp_query_info_free(&job->timestamp_query, i);
- return err;
-<<<<<<<
-=======
-}
-
-static int
-v3d_copy_query_info(struct v3d_performance_query_info *query_info,
- unsigned int count,
- unsigned int nperfmons,
- u32 __user *syncs,
- u64 __user *kperfmon_ids,
- struct drm_file *file_priv)
-{
- unsigned int i, j;
- int err;
-
- for (i = 0; i < count; i++) {
- struct v3d_performance_query *query = &query_info->queries[i];
- u32 __user *ids_pointer;
- u32 sync, id;
- u64 ids;
-
- if (get_user(sync, syncs++)) {
- err = -EFAULT;
- goto error;
- }
-
- if (get_user(ids, kperfmon_ids++)) {
- err = -EFAULT;
- goto error;
- }
-
- query->kperfmon_ids =
- kvmalloc_array(nperfmons,
- sizeof(struct v3d_performance_query *),
- GFP_KERNEL);
- if (!query->kperfmon_ids) {
- err = -ENOMEM;
- goto error;
- }
-
- ids_pointer = u64_to_user_ptr(ids);
-
- for (j = 0; j < nperfmons; j++) {
- if (get_user(id, ids_pointer++)) {
- kvfree(query->kperfmon_ids);
- err = -EFAULT;
- goto error;
- }
-
- query->kperfmon_ids[j] = id;
- }
-
- query->syncobj = drm_syncobj_find(file_priv, sync);
- if (!query->syncobj) {
- kvfree(query->kperfmon_ids);
- err = -ENOENT;
- goto error;
- }
- }
-
- return 0;
-
-error:
- v3d_performance_query_info_free(query_info, i);
- return err;
->>>>>>>
-}
-
-static int
-v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- struct v3d_performance_query_info *query_info = &job->performance_query;
- struct drm_v3d_reset_performance_query reset;
-<<<<<<<
-=======
- unsigned int i, j;
->>>>>>>
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&reset, ext, sizeof(reset)))
- return -EFAULT;
-
- if (reset.nperfmons > V3D_MAX_PERFMONS)
- return -EINVAL;
-
- job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
-
- query_info->queries =
- kvmalloc_array(reset.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
-<<<<<<<
- err = v3d_copy_query_info(query_info,
- reset.count,
- reset.nperfmons,
- u64_to_user_ptr(reset.syncs),
- u64_to_user_ptr(reset.kperfmon_ids),
- file_priv);
- if (err)
- return err;
-
- query_info->count = reset.count;
- query_info->nperfmons = reset.nperfmons;
-=======
- syncs = u64_to_user_ptr(reset.syncs);
- kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
-
- for (i = 0; i < reset.count; i++) {
- u32 sync;
- u64 ids;
- u32 __user *ids_pointer;
- u32 id;
-
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- err = -EFAULT;
- goto error;
- }
-
- if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
- err = -EFAULT;
- goto error;
- }
-
- ids_pointer = u64_to_user_ptr(ids);
-
- for (j = 0; j < reset.nperfmons; j++) {
- if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
- err = -EFAULT;
- goto error;
- }
-
- job->performance_query.queries[i].kperfmon_ids[j] = id;
- }
-
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->performance_query.queries[i].syncobj) {
- err = -ENOENT;
- goto error;
- }
- }
- job->performance_query.count = reset.count;
- job->performance_query.nperfmons = reset.nperfmons;
->>>>>>>
-
- return 0;
-
-error:
- v3d_performance_query_info_free(&job->performance_query, i);
- return err;
-}
-
-static int
-v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
-{
- struct v3d_performance_query_info *query_info = &job->performance_query;
- struct drm_v3d_copy_performance_query copy;
-<<<<<<<
-=======
- unsigned int i, j;
->>>>>>>
- int err;
-
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&copy, ext, sizeof(copy)))
- return -EFAULT;
-
- if (copy.pad)
- return -EINVAL;
-
- if (copy.nperfmons > V3D_MAX_PERFMONS)
- return -EINVAL;
-
- job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
-
- query_info->queries =
- kvmalloc_array(copy.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!query_info->queries)
- return -ENOMEM;
-
-<<<<<<<
- err = v3d_copy_query_info(query_info,
- copy.count,
- copy.nperfmons,
- u64_to_user_ptr(copy.syncs),
- u64_to_user_ptr(copy.kperfmon_ids),
- file_priv);
- if (err)
- return err;
-
- query_info->count = copy.count;
- query_info->nperfmons = copy.nperfmons;
- query_info->ncounters = copy.ncounters;
-=======
- syncs = u64_to_user_ptr(copy.syncs);
- kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
-
- for (i = 0; i < copy.count; i++) {
- u32 sync;
- u64 ids;
- u32 __user *ids_pointer;
- u32 id;
-
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- err = -EFAULT;
- goto error;
- }
-
- if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
- err = -EFAULT;
- goto error;
- }
-
- ids_pointer = u64_to_user_ptr(ids);
-
- for (j = 0; j < copy.nperfmons; j++) {
- if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
- err = -EFAULT;
- goto error;
- }
-
- job->performance_query.queries[i].kperfmon_ids[j] = id;
- }
-
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->performance_query.queries[i].syncobj) {
- err = -ENOENT;
- goto error;
- }
- }
- job->performance_query.count = copy.count;
- job->performance_query.nperfmons = copy.nperfmons;
- job->performance_query.ncounters = copy.ncounters;
->>>>>>>
-
- job->copy.do_64bit = copy.do_64bit;
- job->copy.do_partial = copy.do_partial;
- job->copy.availability_bit = copy.availability_bit;
- job->copy.offset = copy.offset;
- job->copy.stride = copy.stride;
-
- return 0;
-
-error:
- v3d_performance_query_info_free(&job->performance_query, i);
- return err;
-}
-
-/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
- * according to the extension id (name).
- */
-static int
-v3d_get_extensions(struct drm_file *file_priv,
- u64 ext_handles,
- struct v3d_submit_ext *se,
- struct v3d_cpu_job *job)
-{
- struct drm_v3d_extension __user *user_ext;
- int ret;
-
- user_ext = u64_to_user_ptr(ext_handles);
- while (user_ext) {
- struct drm_v3d_extension ext;
-
- if (copy_from_user(&ext, user_ext, sizeof(ext))) {
- DRM_DEBUG("Failed to copy submit extension\n");
- return -EFAULT;
- }
-
- switch (ext.id) {
- case DRM_V3D_EXT_ID_MULTI_SYNC:
- ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se);
- break;
- case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
- ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
- ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
- ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
- ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
- ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job);
- break;
- case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
- ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job);
- break;
- default:
- DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
- return -EINVAL;
- }
-
- if (ret)
- return ret;
-
- user_ext = u64_to_user_ptr(ext.next);
- }
-
- return 0;
-}
-
-/**
- * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * This is the main entrypoint for userspace to submit a 3D frame to
- * the GPU. Userspace provides the binner command list (if
- * applicable), and the kernel sets up the render command list to draw
- * to the framebuffer described in the ioctl, using the command lists
- * that the 3D engine's binner will produce.
- */
-int
-v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_cl *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_bin_job *bin = NULL;
- struct v3d_render_job *render = NULL;
- struct v3d_job *clean_job = NULL;
- struct v3d_job *last_job;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
-
- if (args->pad)
- return -EINVAL;
-
- if (args->flags &&
- args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
- DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_allocate((void *)&render, sizeof(*render));
- if (ret)
- return ret;
-
- ret = v3d_job_init(v3d, file_priv, &render->base,
- v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
- if (ret) {
- v3d_job_deallocate((void *)&render);
- goto fail;
- }
-
- render->start = args->rcl_start;
- render->end = args->rcl_end;
- INIT_LIST_HEAD(&render->unref_list);
-
- if (args->bcl_start != args->bcl_end) {
- ret = v3d_job_allocate((void *)&bin, sizeof(*bin));
- if (ret)
- goto fail;
-
- ret = v3d_job_init(v3d, file_priv, &bin->base,
- v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
- if (ret) {
- v3d_job_deallocate((void *)&bin);
- goto fail;
- }
-
- bin->start = args->bcl_start;
- bin->end = args->bcl_end;
- bin->qma = args->qma;
- bin->qms = args->qms;
- bin->qts = args->qts;
- bin->render = render;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
- ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job));
- if (ret)
- goto fail;
-
- ret = v3d_job_init(v3d, file_priv, clean_job,
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret) {
- v3d_job_deallocate((void *)&clean_job);
- goto fail;
- }
-
- last_job = clean_job;
- } else {
- last_job = &render->base;
- }
-
- ret = v3d_lookup_bos(dev, file_priv, last_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- render->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
-
- if (!render->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- if (bin) {
- bin->base.perfmon = render->base.perfmon;
- v3d_perfmon_get(bin->base.perfmon);
- v3d_push_job(&bin->base);
-
- ret = drm_sched_job_add_dependency(&render->base.base,
- dma_fence_get(bin->base.done_fence));
- if (ret)
- goto fail_unreserve;
- }
-
- v3d_push_job(&render->base);
-
- if (clean_job) {
- struct dma_fence *render_fence =
- dma_fence_get(render->base.done_fence);
- ret = drm_sched_job_add_dependency(&clean_job->base,
- render_fence);
- if (ret)
- goto fail_unreserve;
- clean_job->perfmon = render->base.perfmon;
- v3d_perfmon_get(clean_job->perfmon);
- v3d_push_job(clean_job);
- }
-
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- last_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- last_job->done_fence);
-
- v3d_job_put(&bin->base);
- v3d_job_put(&render->base);
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(last_job->bo,
- last_job->bo_count, &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)bin);
- v3d_job_cleanup((void *)render);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the TFU, which we don't
- * need to validate since the TFU is behind the MMU.
- */
-int
-v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_v3d_submit_tfu *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_tfu_job *job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_DEBUG("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_allocate((void *)&job, sizeof(*job));
- if (ret)
- return ret;
-
- ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync, &se, V3D_TFU);
- if (ret) {
- v3d_job_deallocate((void *)&job);
- goto fail;
- }
-
- job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
- sizeof(*job->base.bo), GFP_KERNEL);
- if (!job->base.bo) {
- ret = -ENOMEM;
- goto fail;
- }
-
- job->args = *args;
-
- for (job->base.bo_count = 0;
- job->base.bo_count < ARRAY_SIZE(args->bo_handles);
- job->base.bo_count++) {
- struct drm_gem_object *bo;
-
- if (!args->bo_handles[job->base.bo_count])
- break;
-
- bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
- if (!bo) {
- DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- job->base.bo_count,
- args->bo_handles[job->base.bo_count]);
- ret = -ENOENT;
- goto fail;
- }
- job->base.bo[job->base.bo_count] = bo;
- }
-
- ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
- if (ret)
- goto fail;
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- &job->base, &acquire_ctx,
- args->out_sync,
- &se,
- job->base.done_fence);
-
- v3d_job_put(&job->base);
-
- return 0;
-
-fail:
- v3d_job_cleanup((void *)job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the CSD, which we don't
- * need to validate since the CSD is behind the MMU.
- */
-int
-v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_csd *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_csd_job *job = NULL;
- struct v3d_job *clean_job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret;
-
- trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
-
- if (args->pad)
- return -EINVAL;
-
- if (!v3d_has_csd(v3d)) {
- DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
- return -EINVAL;
- }
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
- &job, &clean_job, &se,
- &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- job->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
- if (!job->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
-
- ret = drm_sched_job_add_dependency(&clean_job->base,
- dma_fence_get(job->base.done_fence));
- if (ret)
- goto fail_unreserve;
-
- v3d_push_job(clean_job);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- clean_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- clean_job->done_fence);
-
- v3d_job_put(&job->base);
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
- &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)job);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-static const unsigned int cpu_job_bo_handle_count[] = {
- [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1,
- [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1,
- [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1,
- [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2,
- [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0,
- [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1,
-};
-
-/**
- * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace specifies the CPU job type and data required to perform its
- * operations through the drm_v3d_extension struct.
- */
-int
-v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_v3d_submit_cpu *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_submit_ext *out_se = NULL;
- struct v3d_cpu_job *cpu_job = NULL;
- struct v3d_csd_job *csd_job = NULL;
- struct v3d_job *clean_job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret;
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("Invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job));
- if (ret)
- return ret;
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- goto fail;
- }
- }
-
- /* Every CPU job must have a CPU job user extension */
- if (!cpu_job->job_type) {
- DRM_DEBUG("CPU job must have a CPU job user extension.\n");
- ret = -EINVAL;
- goto fail;
- }
-
- if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
- DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n");
- ret = -EINVAL;
- goto fail;
- }
-
- trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type);
-
- ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
- v3d_job_free, 0, &se, V3D_CPU);
- if (ret) {
- v3d_job_deallocate((void *)&cpu_job);
- goto fail;
- }
-
- clean_job = cpu_job->indirect_csd.clean_job;
- csd_job = cpu_job->indirect_csd.job;
-
- if (args->bo_handle_count) {
- ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx);
- if (ret)
- goto fail;
- }
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&cpu_job->base);
-
- switch (cpu_job->job_type) {
- case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
- ret = drm_sched_job_add_dependency(&csd_job->base.base,
- dma_fence_get(cpu_job->base.done_fence));
- if (ret)
- goto fail_unreserve;
-
- v3d_push_job(&csd_job->base);
-
- ret = drm_sched_job_add_dependency(&clean_job->base,
- dma_fence_get(csd_job->base.done_fence));
- if (ret)
- goto fail_unreserve;
-
- v3d_push_job(clean_job);
-
- break;
- default:
- break;
- }
- mutex_unlock(&v3d->sched_lock);
-
- out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se;
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- &cpu_job->base,
- &acquire_ctx, 0,
- out_se, cpu_job->base.done_fence);
-
- switch (cpu_job->job_type) {
- case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
- v3d_attach_fences_and_unlock_reservation(file_priv,
- clean_job,
- &cpu_job->indirect_csd.acquire_ctx,
- 0, &se, clean_job->done_fence);
- break;
- default:
- break;
- }
-
- v3d_job_put(&cpu_job->base);
- v3d_job_put(&csd_job->base);
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-
- drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count,
- &acquire_ctx);
-
- drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
- &cpu_job->indirect_csd.acquire_ctx);
-
-fail:
- v3d_job_cleanup((void *)cpu_job);
- v3d_job_cleanup((void *)csd_job);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
- kvfree(cpu_job->timestamp_query.queries);
- kvfree(cpu_job->performance_query.queries);
-
- return ret;
-}
diff --git a/rr-cache/c3af3f3a26e05878b3dc9cf670c390ed4180ae9f/preimage b/rr-cache/c3af3f3a26e05878b3dc9cf670c390ed4180ae9f/preimage
new file mode 100644
index 000000000000..cc2cfd343e11
--- /dev/null
+++ b/rr-cache/c3af3f3a26e05878b3dc9cf670c390ed4180ae9f/preimage
@@ -0,0 +1,1766 @@
+/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/oom.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/vt.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+
+#include "display/i9xx_display_sr.h"
+#include "display/intel_acpi.h"
+#include "display/intel_bw.h"
+#include "display/intel_cdclk.h"
+#include "display/intel_display_driver.h"
+#include "display/intel_display.h"
+#include "display/intel_dmc.h"
+#include "display/intel_dp.h"
+#include "display/intel_dpt.h"
+#include "display/intel_encoder.h"
+#include "display/intel_fbdev.h"
+#include "display/intel_hotplug.h"
+#include "display/intel_overlay.h"
+#include "display/intel_pch_refclk.h"
+#include "display/intel_pps.h"
+#include "display/intel_sprite.h"
+#include "display/intel_vga.h"
+#include "display/skl_watermark.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_create.h"
+#include "gem/i915_gem_dmabuf.h"
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_print.h"
+#include "gt/intel_rc6.h"
+
+#include "pxp/intel_pxp.h"
+#include "pxp/intel_pxp_debugfs.h"
+#include "pxp/intel_pxp_pm.h"
+
+#include "soc/intel_dram.h"
+#include "soc/intel_gmch.h"
+
+#include "i915_debugfs.h"
+#include "i915_driver.h"
+#include "i915_drm_client.h"
+#include "i915_drv.h"
+#include "i915_file_private.h"
+#include "i915_getparam.h"
+#include "i915_hwmon.h"
+#include "i915_ioc32.h"
+#include "i915_ioctl.h"
+#include "i915_irq.h"
+#include "i915_memcpy.h"
+#include "i915_perf.h"
+#include "i915_query.h"
+#include "i915_switcheroo.h"
+#include "i915_sysfs.h"
+#include "i915_utils.h"
+#include "i915_vgpu.h"
+#include "intel_clock_gating.h"
+#include "intel_gvt.h"
+#include "intel_memory_region.h"
+#include "intel_pci_config.h"
+#include "intel_pcode.h"
+#include "intel_region_ttm.h"
+#include "vlv_suspend.h"
+
+static const struct drm_driver i915_drm_driver;
+
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and recording error state.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time. Use an ordered one.
+ */
+ dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+ if (dev_priv->wq == NULL)
+ goto out_err;
+
+ dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->display.hotplug.dp_wq == NULL)
+ goto out_free_wq;
+
+ /*
+ * The unordered i915 workqueue should be used for all work
+ * scheduling that do not require running in order, which used
+ * to be scheduled on the system_wq before moving to a driver
+ * instance due deprecation of flush_scheduled_work().
+ */
+ dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
+ if (dev_priv->unordered_wq == NULL)
+ goto out_free_dp_wq;
+
+ return 0;
+
+out_free_dp_wq:
+ destroy_workqueue(dev_priv->display.hotplug.dp_wq);
+out_free_wq:
+ destroy_workqueue(dev_priv->wq);
+out_err:
+ drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
+
+ return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+ destroy_workqueue(dev_priv->unordered_wq);
+ destroy_workqueue(dev_priv->display.hotplug.dp_wq);
+ destroy_workqueue(dev_priv->wq);
+}
+
+/*
+ * We don't keep the workarounds for pre-production hardware, so we expect our
+ * driver to fail on these machines in one way or another. A little warning on
+ * dmesg may help both the user and the bug triagers.
+ *
+ * Our policy for removing pre-production workarounds is to keep the
+ * current gen workarounds as a guide to the bring-up of the next gen
+ * (workarounds have a habit of persisting!). Anything older than that
+ * should be removed along with the complications they introduce.
+ */
+static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
+{
+ bool pre = false;
+
+ pre |= IS_HASWELL_EARLY_SDV(dev_priv);
+ pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
+ pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
+ pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+ pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
+ pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
+ pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+ pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+ pre |= IS_DG2_G10(dev_priv) && INTEL_REVID(dev_priv) < 0x8;
+ pre |= IS_DG2_G11(dev_priv) && INTEL_REVID(dev_priv) < 0x5;
+ pre |= IS_DG2_G12(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+
+ if (pre) {
+ drm_err(&dev_priv->drm, "This is a pre-production stepping. "
+ "It may not be fully functional.\n");
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+ }
+}
+
+static void sanitize_gpu(struct drm_i915_private *i915)
+{
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i)
+ intel_gt_reset_all_engines(gt);
+ }
+}
+
+/**
+ * i915_driver_early_probe - setup state not requiring device access
+ * @dev_priv: device private
+ *
+ * Initialize everything that is a "SW-only" state, that is state not
+ * requiring accessing the device or exposing the driver via kernel internal
+ * or userspace interfaces. Example steps belonging here: lock initialization,
+ * system memory allocation, setting up device specific attributes and
+ * function hooks not requiring accessing the device.
+ */
+static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (i915_inject_probe_failure(dev_priv))
+ return -ENODEV;
+
+ intel_device_info_runtime_init_early(dev_priv);
+
+ intel_step_init(dev_priv);
+
+ intel_uncore_mmio_debug_init_early(dev_priv);
+
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+
+ mutex_init(&dev_priv->sb_lock);
+ cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
+
+ i915_memcpy_init_early(dev_priv);
+ intel_runtime_pm_init_early(&dev_priv->runtime_pm);
+
+ ret = i915_workqueues_init(dev_priv);
+ if (ret < 0)
+ return ret;
+
+ ret = vlv_suspend_init(dev_priv);
+ if (ret < 0)
+ goto err_workqueues;
+
+ ret = intel_region_ttm_device_init(dev_priv);
+ if (ret)
+ goto err_ttm;
+
+ ret = intel_root_gt_init_early(dev_priv);
+ if (ret < 0)
+ goto err_rootgt;
+
+ i915_gem_init_early(dev_priv);
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev_priv);
+
+ intel_irq_init(dev_priv);
+ intel_display_driver_early_probe(dev_priv);
+ intel_clock_gating_hooks_init(dev_priv);
+
+ intel_detect_preproduction_hw(dev_priv);
+
+ return 0;
+
+err_rootgt:
+ intel_region_ttm_device_fini(dev_priv);
+err_ttm:
+ vlv_suspend_cleanup(dev_priv);
+err_workqueues:
+ i915_workqueues_cleanup(dev_priv);
+ return ret;
+}
+
+/**
+ * i915_driver_late_release - cleanup the setup done in
+ * i915_driver_early_probe()
+ * @dev_priv: device private
+ */
+static void i915_driver_late_release(struct drm_i915_private *dev_priv)
+{
+ intel_irq_fini(dev_priv);
+ intel_power_domains_cleanup(dev_priv);
+ i915_gem_cleanup_early(dev_priv);
+ intel_gt_driver_late_release_all(dev_priv);
+ intel_region_ttm_device_fini(dev_priv);
+ vlv_suspend_cleanup(dev_priv);
+ i915_workqueues_cleanup(dev_priv);
+
+ cpu_latency_qos_remove_request(&dev_priv->sb_qos);
+ mutex_destroy(&dev_priv->sb_lock);
+
+ i915_params_free(&dev_priv->params);
+}
+
+/**
+ * i915_driver_mmio_probe - setup device MMIO
+ * @dev_priv: device private
+ *
+ * Setup minimal device state necessary for MMIO accesses later in the
+ * initialization sequence. The setup here should avoid any other device-wide
+ * side effects or exposing the driver via kernel internal or user space
+ * interfaces.
+ */
+static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
+{
+ struct intel_gt *gt;
+ int ret, i;
+
+ if (i915_inject_probe_failure(dev_priv))
+ return -ENODEV;
+
+ ret = intel_gmch_bridge_setup(dev_priv);
+ if (ret < 0)
+ return ret;
+
+ for_each_gt(gt, dev_priv, i) {
+ ret = intel_uncore_init_mmio(gt->uncore);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(&dev_priv->drm,
+ intel_uncore_fini_mmio,
+ gt->uncore);
+ if (ret)
+ return ret;
+ }
+
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_gmch_bar_setup(dev_priv);
+ intel_device_info_runtime_init(dev_priv);
+ intel_display_device_info_runtime_init(dev_priv);
+
+ for_each_gt(gt, dev_priv, i) {
+ ret = intel_gt_init_mmio(gt);
+ if (ret)
+ goto err_uncore;
+ }
+
+ /* As early as possible, scrub existing GPU state before clobbering */
+ sanitize_gpu(dev_priv);
+
+ return 0;
+
+err_uncore:
+ intel_gmch_bar_teardown(dev_priv);
+
+ return ret;
+}
+
+/**
+ * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
+ * @dev_priv: device private
+ */
+static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
+{
+ intel_gmch_bar_teardown(dev_priv);
+}
+
+/**
+ * i915_set_dma_info - set all relevant PCI dma info as configured for the
+ * platform
+ * @i915: valid i915 instance
+ *
+ * Set the dma max segment size, device and coherent masks. The dma mask set
+ * needs to occur before i915_ggtt_probe_hw.
+ *
+ * A couple of platforms have special needs. Address them as well.
+ *
+ */
+static int i915_set_dma_info(struct drm_i915_private *i915)
+{
+ unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
+ int ret;
+
+ GEM_BUG_ON(!mask_size);
+
+ /*
+ * We don't have a max segment size, so set it to the max so sg's
+ * debugging layer doesn't complain
+ */
+ dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
+
+ ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
+ if (ret)
+ goto mask_err;
+
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (GRAPHICS_VER(i915) == 2)
+ mask_size = 30;
+
+ /*
+ * 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_I965G(i915) || IS_I965GM(i915))
+ mask_size = 32;
+
+ ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
+ if (ret)
+ goto mask_err;
+
+ return 0;
+
+mask_err:
+ drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
+ return ret;
+}
+
+static int i915_pcode_init(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ int id, ret;
+
+ for_each_gt(gt, i915, id) {
+ ret = intel_pcode_init(gt->uncore);
+ if (ret) {
+ gt_err(gt, "intel_pcode_init failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i915_driver_hw_probe - setup state requiring device access
+ * @dev_priv: device private
+ *
+ * Setup state that requires accessing the device, but doesn't require
+ * exposing the driver via kernel internal or userspace interfaces.
+ */
+static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
+{
+ struct intel_display *display = &dev_priv->display;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ int ret;
+
+ if (i915_inject_probe_failure(dev_priv))
+ return -ENODEV;
+
+ if (HAS_PPGTT(dev_priv)) {
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_full_ppgtt(dev_priv)) {
+ drm_err(&dev_priv->drm,
+ "incompatible vGPU found, support for isolated ppGTT required\n");
+ return -ENXIO;
+ }
+ }
+
+ if (HAS_EXECLISTS(dev_priv)) {
+ /*
+ * Older GVT emulation depends upon intercepting CSB mmio,
+ * which we no longer use, preferring to use the HWSP cache
+ * instead.
+ */
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_hwsp_emulation(dev_priv)) {
+ drm_err(&dev_priv->drm,
+ "old vGPU host found, support for HWSP emulation required\n");
+ return -ENXIO;
+ }
+ }
+
+ /* needs to be done before ggtt probe */
+ intel_dram_edram_detect(dev_priv);
+
+ ret = i915_set_dma_info(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = i915_perf_init(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = i915_ggtt_probe_hw(dev_priv);
+ if (ret)
+ goto err_perf;
+
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver);
+ if (ret)
+ goto err_ggtt;
+
+ ret = i915_ggtt_init_hw(dev_priv);
+ if (ret)
+ goto err_ggtt;
+
+ /*
+ * Make sure we probe lmem before we probe stolen-lmem. The BAR size
+ * might be different due to bar resizing.
+ */
+ ret = intel_gt_tiles_init(dev_priv);
+ if (ret)
+ goto err_ggtt;
+
+ ret = intel_memory_regions_hw_probe(dev_priv);
+ if (ret)
+ goto err_ggtt;
+
+ ret = i915_ggtt_enable_hw(dev_priv);
+ if (ret) {
+ drm_err(&dev_priv->drm, "failed to enable GGTT\n");
+ goto err_mem_regions;
+ }
+
+ pci_set_master(pdev);
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+ * integrated graphics even though the support isn't actually there
+ * according to the published specs. It doesn't appear to function
+ * correctly in testing on 945G.
+ * This may be a side effect of MSI having been made available for PEG
+ * and the registers being closely associated.
+ *
+ * According to chipset errata, on the 965GM, MSI interrupts may
+ * be lost or delayed, and was defeatured. MSI interrupts seem to
+ * get lost on g4x as well, and interrupt delivery seems to stay
+ * properly dead afterwards. So we'll just disable them for all
+ * pre-gen5 chipsets.
+ *
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy
+ * interrupts even when in MSI mode. This results in spurious
+ * interrupt warnings if the legacy irq no. is shared with another
+ * device. The kernel then disables that interrupt source and so
+ * prevents the other device from working properly.
+ */
+ if (GRAPHICS_VER(dev_priv) >= 5) {
+ if (pci_enable_msi(pdev) < 0)
+ drm_dbg(&dev_priv->drm, "can't enable MSI");
+ }
+
+ ret = intel_gvt_init(dev_priv);
+ if (ret)
+ goto err_msi;
+
+ intel_opregion_setup(display);
+
+ ret = i915_pcode_init(dev_priv);
+ if (ret)
+ goto err_opregion;
+
+ /*
+ * Fill the dram structure to get the system dram info. This will be
+ * used for memory latency calculation.
+ */
+ intel_dram_detect(dev_priv);
+
+ intel_bw_init_hw(dev_priv);
+
+ return 0;
+
+err_opregion:
+ intel_opregion_cleanup(display);
+err_msi:
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+err_mem_regions:
+ intel_memory_regions_driver_release(dev_priv);
+err_ggtt:
+ i915_ggtt_driver_release(dev_priv);
+ i915_gem_drain_freed_objects(dev_priv);
+ i915_ggtt_driver_late_release(dev_priv);
+err_perf:
+ i915_perf_fini(dev_priv);
+ return ret;
+}
+
+/**
+ * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
+ * @dev_priv: device private
+ */
+static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
+{
+ struct intel_display *display = &dev_priv->display;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+
+ i915_perf_fini(dev_priv);
+
+ intel_opregion_cleanup(display);
+
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+}
+
+/**
+ * i915_driver_register - register the driver with the rest of the system
+ * @dev_priv: device private
+ *
+ * Perform any steps necessary to make the driver available via kernel
+ * internal or userspace interfaces.
+ */
+static void i915_driver_register(struct drm_i915_private *dev_priv)
+{
+ struct intel_gt *gt;
+ unsigned int i;
+
+ i915_gem_driver_register(dev_priv);
+ i915_pmu_register(dev_priv);
+
+ intel_vgpu_register(dev_priv);
+
+ /* Reveal our presence to userspace */
+ if (drm_dev_register(&dev_priv->drm, 0)) {
+ drm_err(&dev_priv->drm,
+ "Failed to register driver for userspace access!\n");
+ return;
+ }
+
+ i915_debugfs_register(dev_priv);
+ i915_setup_sysfs(dev_priv);
+
+ /* Depends on sysfs having been initialized */
+ i915_perf_register(dev_priv);
+
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_driver_register(gt);
+
+ intel_pxp_debugfs_register(dev_priv->pxp);
+
+ i915_hwmon_register(dev_priv);
+
+ intel_display_driver_register(dev_priv);
+
+ intel_power_domains_enable(dev_priv);
+ intel_runtime_pm_enable(&dev_priv->runtime_pm);
+
+ intel_register_dsm_handler();
+
+ if (i915_switcheroo_register(dev_priv))
+ drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
+}
+
+/**
+ * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
+ * @dev_priv: device private
+ */
+static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+{
+ struct intel_gt *gt;
+ unsigned int i;
+
+ i915_switcheroo_unregister(dev_priv);
+
+ intel_unregister_dsm_handler();
+
+ intel_runtime_pm_disable(&dev_priv->runtime_pm);
+ intel_power_domains_disable(dev_priv);
+
+ intel_display_driver_unregister(dev_priv);
+
+ intel_pxp_fini(dev_priv);
+
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_driver_unregister(gt);
+
+ i915_hwmon_unregister(dev_priv);
+
+ i915_perf_unregister(dev_priv);
+ i915_pmu_unregister(dev_priv);
+
+ i915_teardown_sysfs(dev_priv);
+ drm_dev_unplug(&dev_priv->drm);
+
+ i915_gem_driver_unregister(dev_priv);
+}
+
+void
+i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
+{
+ drm_printf(p, "iommu: %s\n",
+ str_enabled_disabled(i915_vtd_active(i915)));
+}
+
+static void i915_welcome_messages(struct drm_i915_private *dev_priv)
+{
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
+ struct drm_printer p = drm_dbg_printer(&dev_priv->drm, DRM_UT_DRIVER,
+ "device info:");
+ struct intel_gt *gt;
+ unsigned int i;
+
+ drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
+ INTEL_DEVID(dev_priv),
+ INTEL_REVID(dev_priv),
+ intel_platform_name(INTEL_INFO(dev_priv)->platform),
+ intel_subplatform(RUNTIME_INFO(dev_priv),
+ INTEL_INFO(dev_priv)->platform),
+ GRAPHICS_VER(dev_priv));
+
+ intel_device_info_print(INTEL_INFO(dev_priv),
+ RUNTIME_INFO(dev_priv), &p);
+ i915_print_iommu_status(dev_priv, &p);
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_info_print(&gt->info, &p);
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
+ drm_info(&dev_priv->drm,
+ "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
+}
+
+static struct drm_i915_private *
+i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct intel_device_info *match_info =
+ (struct intel_device_info *)ent->driver_data;
+ struct drm_i915_private *i915;
+
+ i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
+ struct drm_i915_private, drm);
+ if (IS_ERR(i915))
+ return i915;
+
+ pci_set_drvdata(pdev, &i915->drm);
+
+ /* Device parameters start as a copy of module parameters. */
+ i915_params_copy(&i915->params, &i915_modparams);
+
+ /* Set up device info and initial runtime info. */
+ intel_device_info_driver_create(i915, pdev->device, match_info);
+
+ intel_display_device_probe(i915);
+
+ return i915;
+}
+
+/**
+ * i915_driver_probe - setup chip and create an initial config
+ * @pdev: PCI device
+ * @ent: matching PCI ID entry
+ *
+ * The driver probe routine has to do several things:
+ * - drive output discovery via intel_display_driver_probe()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct drm_i915_private *i915;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ pr_err("Failed to enable graphics device: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ i915 = i915_driver_create(pdev, ent);
+ if (IS_ERR(i915)) {
+ pci_disable_device(pdev);
+ return PTR_ERR(i915);
+ }
+
+ ret = i915_driver_early_probe(i915);
+ if (ret < 0)
+ goto out_pci_disable;
+
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+ intel_vgpu_detect(i915);
+
+ ret = intel_gt_probe_all(i915);
+ if (ret < 0)
+ goto out_runtime_pm_put;
+
+ ret = i915_driver_mmio_probe(i915);
+ if (ret < 0)
+ goto out_runtime_pm_put;
+
+ ret = i915_driver_hw_probe(i915);
+ if (ret < 0)
+ goto out_cleanup_mmio;
+
+ ret = intel_display_driver_probe_noirq(i915);
+ if (ret < 0)
+ goto out_cleanup_hw;
+
+ ret = intel_irq_install(i915);
+ if (ret)
+ goto out_cleanup_modeset;
+
+ ret = intel_display_driver_probe_nogem(i915);
+ if (ret)
+ goto out_cleanup_irq;
+
+ ret = i915_gem_init(i915);
+ if (ret)
+ goto out_cleanup_modeset2;
+
+ ret = intel_pxp_init(i915);
+ if (ret && ret != -ENODEV)
+ drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
+
+ ret = intel_display_driver_probe(i915);
+ if (ret)
+ goto out_cleanup_gem;
+
+ i915_driver_register(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+ i915_welcome_messages(i915);
+
+ i915->do_release = true;
+
+ return 0;
+
+out_cleanup_gem:
+ i915_gem_suspend(i915);
+ i915_gem_driver_remove(i915);
+ i915_gem_driver_release(i915);
+out_cleanup_modeset2:
+ /* FIXME clean up the error path */
+ intel_display_driver_remove(i915);
+ intel_irq_uninstall(i915);
+ intel_display_driver_remove_noirq(i915);
+ goto out_cleanup_modeset;
+out_cleanup_irq:
+ intel_irq_uninstall(i915);
+out_cleanup_modeset:
+ intel_display_driver_remove_nogem(i915);
+out_cleanup_hw:
+ i915_driver_hw_remove(i915);
+ intel_memory_regions_driver_release(i915);
+ i915_ggtt_driver_release(i915);
+ i915_gem_drain_freed_objects(i915);
+ i915_ggtt_driver_late_release(i915);
+out_cleanup_mmio:
+ i915_driver_mmio_release(i915);
+out_runtime_pm_put:
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ i915_driver_late_release(i915);
+out_pci_disable:
+ pci_disable_device(pdev);
+ i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
+ return ret;
+}
+
+void i915_driver_remove(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ i915_driver_unregister(i915);
+
+ /* Flush any external code that still may be under the RCU lock */
+ synchronize_rcu();
+
+ i915_gem_suspend(i915);
+
+ intel_gvt_driver_remove(i915);
+
+ intel_display_driver_remove(i915);
+
+ intel_irq_uninstall(i915);
+
+ intel_display_driver_remove_noirq(i915);
+
+ i915_reset_error_state(i915);
+ i915_gem_driver_remove(i915);
+
+ intel_display_driver_remove_nogem(i915);
+
+ i915_driver_hw_remove(i915);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+static void i915_driver_release(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ intel_wakeref_t wakeref;
+
+ if (!dev_priv->do_release)
+ return;
+
+ wakeref = intel_runtime_pm_get(rpm);
+
+ i915_gem_driver_release(dev_priv);
+
+ intel_memory_regions_driver_release(dev_priv);
+ i915_ggtt_driver_release(dev_priv);
+ i915_gem_drain_freed_objects(dev_priv);
+ i915_ggtt_driver_late_release(dev_priv);
+
+ i915_driver_mmio_release(dev_priv);
+
+ intel_runtime_pm_put(rpm, wakeref);
+
+ intel_runtime_pm_driver_release(rpm);
+
+ i915_driver_late_release(dev_priv);
+
+ intel_display_device_remove(dev_priv);
+}
+
+static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ int ret;
+
+ ret = i915_gem_open(i915, file);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ i915_gem_context_close(file);
+ i915_drm_client_put(file_priv->client);
+
+ kfree_rcu(file_priv, rcu);
+
+ /* Catch up with all the deferred frees from "this" client */
+ i915_gem_flush_free_objects(to_i915(dev));
+}
+
+void i915_driver_shutdown(struct drm_i915_private *i915)
+{
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_runtime_pm_disable(&i915->runtime_pm);
+
+ intel_display_driver_shutdown(i915);
+
+ intel_irq_suspend(i915);
+
+ intel_display_driver_shutdown_noirq(i915);
+
+ i915_gem_suspend(i915);
+
+ /* TODO:
+ * - unify the pci_driver::shutdown sequence here with the
+ * pci_driver.driver.pm.poweroff,poweroff_late sequence.
+ * - unify the driver remove and system/runtime suspend sequences with
+ * the above unified shutdown/poweroff sequence.
+ */
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+ intel_runtime_pm_driver_last_release(&i915->runtime_pm);
+}
+
+static bool suspend_to_idle(struct drm_i915_private *dev_priv)
+{
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ return true;
+#endif
+ return false;
+}
+
+static void i915_drm_complete(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ intel_pxp_resume_complete(i915->pxp);
+}
+
+static int i915_drm_prepare(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ intel_pxp_suspend_prepare(i915->pxp);
+
+ /*
+ * NB intel_display_driver_suspend() may issue new requests after we've
+ * ostensibly marked the GPU as ready-to-sleep here. We need to
+ * split out that work and pull it forward so that after point,
+ * the GPU is not woken again.
+ */
+ return i915_gem_backup_suspend(i915);
+}
+
+static int i915_drm_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
+<<<<<<<
+=======
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ pci_power_t opregion_target_state;
+>>>>>>>
+
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ /* We do a lot of poking in a lot of registers, make sure they work
+ * properly. */
+ intel_power_domains_disable(dev_priv);
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
+ if (HAS_DISPLAY(dev_priv)) {
+ drm_kms_helper_poll_disable(dev);
+ intel_display_driver_disable_user_access(dev_priv);
+ }
+
+ pci_save_state(pdev);
+
+ intel_display_driver_suspend(dev_priv);
+
+ intel_irq_suspend(dev_priv);
+
+ intel_display_driver_suspend_noirq(dev_priv);
+
+ i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
+
+ i9xx_display_sr_save(dev_priv);
+
+ intel_display_driver_suspend_noggtt(display, suspend_to_idle(dev_priv));
+
+ dev_priv->suspend_count++;
+
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ i915_gem_drain_freed_objects(dev_priv);
+
+ return 0;
+}
+
+static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct intel_gt *gt;
+ int ret, i;
+ bool s2idle = !hibernation && suspend_to_idle(dev_priv);
+
+ disable_rpm_wakeref_asserts(rpm);
+
+ intel_pxp_suspend(dev_priv->pxp);
+
+ i915_gem_suspend_late(dev_priv);
+
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_suspend(gt->uncore);
+
+ intel_power_domains_suspend(dev_priv, s2idle);
+
+ intel_display_power_suspend_late(dev_priv);
+
+ ret = vlv_suspend_complete(dev_priv);
+ if (ret) {
+ drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
+ intel_power_domains_resume(dev_priv);
+
+ goto out;
+ }
+
+ pci_disable_device(pdev);
+ /*
+ * During hibernation on some platforms the BIOS may try to access
+ * the device even though it's already in D3 and hang the machine. So
+ * leave the device in D0 on those platforms and hope the BIOS will
+ * power down the device properly. The issue was seen on multiple old
+ * GENs with different BIOS vendors, so having an explicit blacklist
+ * is inpractical; apply the workaround on everything pre GEN6. The
+ * platforms where the issue was seen:
+ * Lenovo Thinkpad X301, X61s, X60, T60, X41
+ * Fujitsu FSC S7110
+ * Acer Aspire 1830T
+ */
+ if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
+ pci_set_power_state(pdev, PCI_D3hot);
+
+out:
+ enable_rpm_wakeref_asserts(rpm);
+ if (!dev_priv->uncore.user_forcewake_count)
+ intel_runtime_pm_driver_release(rpm);
+
+ return ret;
+}
+
+int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
+ pm_message_t state)
+{
+ int error;
+
+ if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
+ state.event != PM_EVENT_FREEZE))
+ return -EINVAL;
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ error = i915_drm_suspend(&i915->drm);
+ if (error)
+ return error;
+
+ return i915_drm_suspend_late(&i915->drm, false);
+}
+
+static int i915_drm_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
+ struct intel_gt *gt;
+ int ret, i;
+
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ ret = i915_pcode_init(dev_priv);
+ if (ret)
+ return ret;
+
+ sanitize_gpu(dev_priv);
+
+ ret = i915_ggtt_enable_hw(dev_priv);
+ if (ret)
+ drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
+
+ i915_ggtt_resume(to_gt(dev_priv)->ggtt);
+
+ for_each_gt(gt, dev_priv, i)
+ if (GRAPHICS_VER(gt->i915) >= 8)
+ setup_private_pat(gt);
+
+ /* Must be called after GGTT is resumed. */
+ intel_dpt_resume(dev_priv);
+
+ intel_dmc_resume(display);
+
+ i9xx_display_sr_restore(dev_priv);
+
+ intel_vga_redisable(display);
+
+ intel_gmbus_reset(dev_priv);
+
+ intel_pps_unlock_regs_wa(display);
+
+ intel_init_pch_refclk(dev_priv);
+
+ /*
+ * Interrupts have to be enabled before any batches are run. If not the
+ * GPU will hang. i915_gem_init_hw() will initiate batches to
+ * update/restore the context.
+ *
+ * drm_mode_config_reset() needs AUX interrupts.
+ *
+ * Modeset enabling in intel_display_driver_init_hw() also needs working
+ * interrupts.
+ */
+ intel_irq_resume(dev_priv);
+
+ if (HAS_DISPLAY(dev_priv))
+ drm_mode_config_reset(dev);
+
+ i915_gem_resume(dev_priv);
+
+ intel_display_driver_init_hw(dev_priv);
+
+ intel_clock_gating_init(dev_priv);
+
+ if (HAS_DISPLAY(dev_priv))
+ intel_display_driver_resume_access(dev_priv);
+
+ intel_hpd_init(dev_priv);
+
+ intel_display_driver_resume(dev_priv);
+
+ if (HAS_DISPLAY(dev_priv)) {
+ intel_display_driver_enable_user_access(dev_priv);
+ drm_kms_helper_poll_enable(dev);
+ }
+ intel_hpd_poll_disable(dev_priv);
+
+ intel_opregion_resume(display);
+
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
+
+ intel_power_domains_enable(dev_priv);
+
+ intel_gvt_resume(dev_priv);
+
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ return 0;
+}
+
+static int i915_drm_resume_early(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct intel_gt *gt;
+ int ret, i;
+
+ /*
+ * We have a resume ordering issue with the snd-hda driver also
+ * requiring our device to be power up. Due to the lack of a
+ * parent/child relationship we currently solve this with an early
+ * resume hook.
+ *
+ * FIXME: This should be solved with a special hdmi sink device or
+ * similar so that power domains can be employed.
+ */
+
+ /*
+ * Note that we need to set the power state explicitly, since we
+ * powered off the device during freeze and the PCI core won't power
+ * it back up for us during thaw. Powering off the device during
+ * freeze is not a hard requirement though, and during the
+ * suspend/resume phases the PCI core makes sure we get here with the
+ * device powered on. So in case we change our freeze logic and keep
+ * the device powered we can also remove the following set power state
+ * call.
+ */
+ ret = pci_set_power_state(pdev, PCI_D0);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "failed to set PCI D0 power state (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Note that pci_enable_device() first enables any parent bridge
+ * device and only then sets the power state for this device. The
+ * bridge enabling is a nop though, since bridge devices are resumed
+ * first. The order of enabling power and enabling the device is
+ * imposed by the PCI core as described above, so here we preserve the
+ * same order for the freeze/thaw phases.
+ *
+ * TODO: eventually we should remove pci_disable_device() /
+ * pci_enable_enable_device() from suspend/resume. Due to how they
+ * depend on the device enable refcount we can't anyway depend on them
+ * disabling/enabling the device.
+ */
+ if (pci_enable_device(pdev))
+ return -EIO;
+
+ pci_set_master(pdev);
+
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ ret = vlv_resume_prepare(dev_priv, false);
+ if (ret)
+ drm_err(&dev_priv->drm,
+ "Resume prepare failed: %d, continuing anyway\n", ret);
+
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_resume_early(gt);
+
+ intel_display_power_resume_early(dev_priv);
+
+ intel_power_domains_resume(dev_priv);
+
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ return ret;
+}
+
+int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ ret = i915_drm_resume_early(&i915->drm);
+ if (ret)
+ return ret;
+
+ return i915_drm_resume(&i915->drm);
+}
+
+static int i915_pm_prepare(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (!i915) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_prepare(&i915->drm);
+}
+
+static int i915_pm_suspend(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (!i915) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend(&i915->drm);
+}
+
+static int i915_pm_suspend_late(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ /*
+ * We have a suspend ordering issue with the snd-hda driver also
+ * requiring our device to be power up. Due to the lack of a
+ * parent/child relationship we currently solve this with an late
+ * suspend hook.
+ *
+ * FIXME: This should be solved with a special hdmi sink device or
+ * similar so that power domains can be employed.
+ */
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_late(&i915->drm, false);
+}
+
+static int i915_pm_poweroff_late(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_late(&i915->drm, true);
+}
+
+static int i915_pm_resume_early(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_resume_early(&i915->drm);
+}
+
+static int i915_pm_resume(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_resume(&i915->drm);
+}
+
+static void i915_pm_complete(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return;
+
+ i915_drm_complete(&i915->drm);
+}
+
+/* freeze: before creating the hibernation_image */
+static int i915_pm_freeze(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ int ret;
+
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend(&i915->drm);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_freeze(i915);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int i915_pm_freeze_late(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ int ret;
+
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend_late(&i915->drm, true);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_freeze_late(i915);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* thaw: called after creating the hibernation image, but before turning off. */
+static int i915_pm_thaw_early(struct device *kdev)
+{
+ return i915_pm_resume_early(kdev);
+}
+
+static int i915_pm_thaw(struct device *kdev)
+{
+ return i915_pm_resume(kdev);
+}
+
+/* restore: called after loading the hibernation image. */
+static int i915_pm_restore_early(struct device *kdev)
+{
+ return i915_pm_resume_early(kdev);
+}
+
+static int i915_pm_restore(struct device *kdev)
+{
+ return i915_pm_resume(kdev);
+}
+
+static int intel_runtime_suspend(struct device *kdev)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct intel_display *display = &dev_priv->display;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *root_pdev;
+ struct intel_gt *gt;
+ int ret, i;
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
+ return -ENODEV;
+
+ drm_dbg(&dev_priv->drm, "Suspending device\n");
+
+ disable_rpm_wakeref_asserts(rpm);
+
+ /*
+ * We are safe here against re-faults, since the fault handler takes
+ * an RPM reference.
+ */
+ i915_gem_runtime_suspend(dev_priv);
+
+ intel_pxp_runtime_suspend(dev_priv->pxp);
+
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_suspend(gt);
+
+ intel_irq_suspend(dev_priv);
+
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_suspend(gt->uncore);
+
+ intel_display_power_suspend(dev_priv);
+
+ ret = vlv_suspend_complete(dev_priv);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Runtime suspend failed, disabling it (%d)\n", ret);
+ intel_uncore_runtime_resume(&dev_priv->uncore);
+
+ intel_irq_resume(dev_priv);
+
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_resume(gt);
+
+ enable_rpm_wakeref_asserts(rpm);
+
+ return ret;
+ }
+
+ enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_driver_release(rpm);
+
+ if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
+ drm_err(&dev_priv->drm,
+ "Unclaimed access detected prior to suspending\n");
+
+ /*
+ * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+ * This should be totally removed when we handle the pci states properly
+ * on runtime PM.
+ */
+ root_pdev = pcie_find_root_port(pdev);
+ if (root_pdev)
+ pci_d3cold_disable(root_pdev);
+
+ /*
+ * FIXME: We really should find a document that references the arguments
+ * used below!
+ */
+ if (IS_BROADWELL(dev_priv)) {
+ /*
+ * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+ * being detected, and the call we do at intel_runtime_resume()
+ * won't be able to restore them. Since PCI_D3hot matches the
+ * actual specification and appears to be working, use it.
+ */
+ intel_opregion_notify_adapter(display, PCI_D3hot);
+ } else {
+ /*
+ * current versions of firmware which depend on this opregion
+ * notification have repurposed the D1 definition to mean
+ * "runtime suspended" vs. what you would normally expect (D3)
+ * to distinguish it from notifications that might be sent via
+ * the suspend path.
+ */
+ intel_opregion_notify_adapter(display, PCI_D1);
+ }
+
+ assert_forcewakes_inactive(&dev_priv->uncore);
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ intel_hpd_poll_enable(dev_priv);
+
+ drm_dbg(&dev_priv->drm, "Device suspended\n");
+ return 0;
+}
+
+static int intel_runtime_resume(struct device *kdev)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct intel_display *display = &dev_priv->display;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *root_pdev;
+ struct intel_gt *gt;
+ int ret, i;
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
+ return -ENODEV;
+
+ drm_dbg(&dev_priv->drm, "Resuming device\n");
+
+ drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
+ disable_rpm_wakeref_asserts(rpm);
+
+ intel_opregion_notify_adapter(display, PCI_D0);
+
+ root_pdev = pcie_find_root_port(pdev);
+ if (root_pdev)
+ pci_d3cold_enable(root_pdev);
+
+ if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
+ drm_dbg(&dev_priv->drm,
+ "Unclaimed access during suspend, bios?\n");
+
+ intel_display_power_resume(dev_priv);
+
+ ret = vlv_resume_prepare(dev_priv, true);
+
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_runtime_resume(gt->uncore);
+
+ intel_irq_resume(dev_priv);
+
+ /*
+ * No point of rolling back things in case of an error, as the best
+ * we can do is to hope that things will still work (and disable RPM).
+ */
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_resume(gt);
+
+ intel_pxp_runtime_resume(dev_priv->pxp);
+
+ /*
+ * On VLV/CHV display interrupts are part of the display
+ * power well, so hpd is reinitialized from there. For
+ * everyone else do it here.
+ */
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+ intel_hpd_init(dev_priv);
+ intel_hpd_poll_disable(dev_priv);
+ }
+
+ skl_watermark_ipc_update(dev_priv);
+
+ enable_rpm_wakeref_asserts(rpm);
+
+ if (ret)
+ drm_err(&dev_priv->drm,
+ "Runtime resume failed, disabling it (%d)\n", ret);
+ else
+ drm_dbg(&dev_priv->drm, "Device resumed\n");
+
+ return ret;
+}
+
+const struct dev_pm_ops i915_pm_ops = {
+ /*
+ * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
+ * PMSG_RESUME]
+ */
+ .prepare = i915_pm_prepare,
+ .suspend = i915_pm_suspend,
+ .suspend_late = i915_pm_suspend_late,
+ .resume_early = i915_pm_resume_early,
+ .resume = i915_pm_resume,
+ .complete = i915_pm_complete,
+
+ /*
+ * S4 event handlers
+ * @freeze, @freeze_late : called (1) before creating the
+ * hibernation image [PMSG_FREEZE] and
+ * (2) after rebooting, before restoring
+ * the image [PMSG_QUIESCE]
+ * @thaw, @thaw_early : called (1) after creating the hibernation
+ * image, before writing it [PMSG_THAW]
+ * and (2) after failing to create or
+ * restore the image [PMSG_RECOVER]
+ * @poweroff, @poweroff_late: called after writing the hibernation
+ * image, before rebooting [PMSG_HIBERNATE]
+ * @restore, @restore_early : called after rebooting and restoring the
+ * hibernation image [PMSG_RESTORE]
+ */
+ .freeze = i915_pm_freeze,
+ .freeze_late = i915_pm_freeze_late,
+ .thaw_early = i915_pm_thaw_early,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_suspend,
+ .poweroff_late = i915_pm_poweroff_late,
+ .restore_early = i915_pm_restore_early,
+ .restore = i915_pm_restore,
+
+ /* S0ix (via runtime suspend) event handlers */
+ .runtime_suspend = intel_runtime_suspend,
+ .runtime_resume = intel_runtime_resume,
+};
+
+static const struct file_operations i915_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release_noglobal,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = i915_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .compat_ioctl = i915_ioc32_compat_ioctl,
+ .llseek = noop_llseek,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = drm_show_fdinfo,
+#endif
+};
+
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -ENODEV;
+}
+
+static const struct drm_ioctl_desc i915_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
+};
+
+/*
+ * Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ * - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 6
+#define DRIVER_PATCHLEVEL 0
+
+static const struct drm_driver i915_drm_driver = {
+ /* Don't use MTRRs here; the Xserver or userspace app should
+ * deal with them for Intel hardware.
+ */
+ .driver_features =
+ DRIVER_GEM |
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
+ .release = i915_driver_release,
+ .open = i915_driver_open,
+ .postclose = i915_driver_postclose,
+ .show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo),
+
+ .gem_prime_import = i915_gem_prime_import,
+
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_dumb_mmap_offset,
+
+ .ioctls = i915_ioctls,
+ .num_ioctls = ARRAY_SIZE(i915_ioctls),
+ .fops = &i915_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};