summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTomi Valkeinen <tomi.valkeinen@ideasonboard.com>2024-04-27 10:27:33 +0300
committerTomi Valkeinen <tomi.valkeinen@ideasonboard.com>2024-04-27 10:27:33 +0300
commit3aa4574f6f41a255808b42b3be20de7a5cfca450 (patch)
tree6b788272ab796a4fad87bc703847a0a92d794bd2
parent77564159b9f0c66252a00333321f005b93e170bb (diff)
parent98ad158e543426561fa5df5c4387d4368601866f (diff)
Merge remote-tracking branch 'drm-xe/drm-xe-next' into drm-tip
# Conflicts: # drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
-rw-r--r--drivers/gpu/drm/xe/Makefile2
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h26
-rw-r--r--drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h170
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h16
-rw-r--r--drivers/gpu/drm/xe/regs/xe_sriov_regs.h3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c232
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c86
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c55
-rw-r--r--drivers/gpu/drm/xe/xe_device.c44
-rw-r--r--drivers/gpu/drm/xe/xe_device.h7
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c15
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h24
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c18
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c348
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h18
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c550
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h36
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service_types.h52
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c61
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c50
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c65
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_db_mgr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_id_mgr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c8
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c148
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c134
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h20
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c38
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h7
-rw-r--r--drivers/gpu/drm/xe/xe_module.c5
-rw-r--r--drivers/gpu/drm/xe/xe_module.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c2
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c25
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c15
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h1
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c12
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c715
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c5
60 files changed, 2648 insertions, 544 deletions
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 8321ec4f9b46..a67977edff5b 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -163,7 +163,9 @@ xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
xe_gt_sriov_pf_config.o \
xe_gt_sriov_pf_control.o \
+ xe_gt_sriov_pf_debugfs.o \
xe_gt_sriov_pf_policy.o \
+ xe_gt_sriov_pf_service.o \
xe_lmtt.o \
xe_lmtt_2l.o \
xe_lmtt_ml.o \
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 511cf974d585..5c1d40432ca0 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -194,14 +194,18 @@ enum {
* granularity) since the GPUs clock time runs off a different crystal
* from the CPUs clock. Changing this KLV on a VF that is currently
* running a context wont take effect until a new context is scheduled in.
- * That said, when the PF is changing this value from 0xFFFFFFFF to
- * something else, it might never take effect if the VF is running an
- * inifinitely long compute or shader kernel. In such a scenario, the
+ * That said, when the PF is changing this value from 0x0 to
+ * a non-zero value, it might never take effect if the VF is running an
+ * infinitely long compute or shader kernel. In such a scenario, the
* PF would need to trigger a VM PAUSE and then change the KLV to force
* it to take effect. Such cases might typically happen on a 1PF+1VF
* Virtualization config enabled for heavier workloads like AI/ML.
*
+ * The max value for this KLV is 100 seconds, anything exceeding that
+ * will be clamped to the max.
+ *
* :0: infinite exec quantum (default)
+ * :100000: maximum exec quantum (100000ms == 100s)
*
* _`GUC_KLV_VF_CFG_PREEMPT_TIMEOUT` : 0x8A02
* This config sets the VF-preemption-timeout in microseconds.
@@ -211,15 +215,19 @@ enum {
* different crystal from the CPUs clock. Changing this KLV on a VF
* that is currently running a context wont take effect until a new
* context is scheduled in.
- * That said, when the PF is changing this value from 0xFFFFFFFF to
- * something else, it might never take effect if the VF is running an
- * inifinitely long compute or shader kernel.
+ * That said, when the PF is changing this value from 0x0 to
+ * a non-zero value, it might never take effect if the VF is running an
+ * infinitely long compute or shader kernel.
* In this case, the PF would need to trigger a VM PAUSE and then change
* the KLV to force it to take effect. Such cases might typically happen
* on a 1PF+1VF Virtualization config enabled for heavier workloads like
* AI/ML.
*
+ * The max value for this KLV is 100 seconds, anything exceeding that
+ * will be clamped to the max.
+ *
* :0: no preemption timeout (default)
+ * :100000000: maximum preemption timeout (100000000us == 100s)
*
* _`GUC_KLV_VF_CFG_THRESHOLD_CAT_ERR` : 0x8A03
* This config sets threshold for CAT errors caused by the VF.
@@ -291,9 +299,11 @@ enum {
#define GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY 0x8a01
#define GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN 1u
+#define GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE 100000u
-#define GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY 0x8a02
-#define GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN 1u
+#define GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY 0x8a02
+#define GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN 1u
+#define GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE 100000000u
#define GUC_KLV_VF_CFG_THRESHOLD_CAT_ERR_KEY 0x8a03
#define GUC_KLV_VF_CFG_THRESHOLD_CAT_ERR_LEN 1u
diff --git a/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h
index 747e428de421..6c2834613081 100644
--- a/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h
@@ -1,11 +1,179 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2023 Intel Corporation
+ * Copyright © 2023-2024 Intel Corporation
*/
#ifndef _ABI_GUC_RELAY_ACTIONS_ABI_H_
#define _ABI_GUC_RELAY_ACTIONS_ABI_H_
+#include "abi/guc_relay_communication_abi.h"
+
+/**
+ * DOC: GuC Relay VF/PF ABI Version
+ *
+ * The _`GUC_RELAY_VERSION_BASE` defines minimum VF/PF ABI version that
+ * drivers must support. Currently this is version 1.0.
+ *
+ * The _`GUC_RELAY_VERSION_LATEST` defines latest VF/PF ABI version that
+ * drivers may use. Currently this is version 1.0.
+ *
+ * Some platforms may require different base VF/PF ABI version.
+ * No supported VF/PF ABI version can be 0.0.
+ */
+
+#define GUC_RELAY_VERSION_BASE_MAJOR 1
+#define GUC_RELAY_VERSION_BASE_MINOR 0
+
+#define GUC_RELAY_VERSION_LATEST_MAJOR 1
+#define GUC_RELAY_VERSION_LATEST_MINOR 0
+
+/**
+ * DOC: GuC Relay Actions
+ *
+ * The following actions are supported from VF/PF ABI version 1.0:
+ *
+ * * `VF2PF_HANDSHAKE`_
+ * * `VF2PF_QUERY_RUNTIME`_
+ */
+
+/**
+ * DOC: VF2PF_HANDSHAKE
+ *
+ * This `Relay Message`_ is used by the VF to establish ABI version with the PF.
+ *
+ * Prior to exchanging any other messages, both VF driver and PF driver must
+ * negotiate the VF/PF ABI version that will be used in their communication.
+ *
+ * The VF driver shall use @MAJOR and @MINOR fields to pass requested ABI version.
+ * The VF driver may use special version 0.0 (both @MAJOR and @MINOR set to 0)
+ * to request latest (or any) ABI version that is supported by the PF driver.
+ *
+ * This message definition shall be supported by all future ABI versions.
+ * This message definition shall not be changed by future ABI versions.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_RELAY_ACTION_VF2PF_HANDSHAKE` = 0x0001 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:16 | **MAJOR** - requested major version of the VFPF interface |
+ * | | | (use MAJOR_ANY to request latest version supported by PF) |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **MINOR** - requested minor version of the VFPF interface |
+ * | | | (use MINOR_ANY to request latest version supported by PF) |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:16 | **MAJOR** - agreed major version of the VFPF interface |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **MINOR** - agreed minor version of the VFPF interface |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_RELAY_ACTION_VF2PF_HANDSHAKE 0x0001u
+
+#define VF2PF_HANDSHAKE_REQUEST_MSG_LEN 2u
+#define VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR (0xffffu << 16)
+#define VF2PF_HANDSHAKE_MAJOR_ANY 0
+#define VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR (0xffffu << 0)
+#define VF2PF_HANDSHAKE_MINOR_ANY 0
+
+#define VF2PF_HANDSHAKE_RESPONSE_MSG_LEN 2u
+#define VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+#define VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR (0xffffu << 16)
+#define VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR (0xffffu << 0)
+
+/**
+ * DOC: VF2PF_QUERY_RUNTIME
+ *
+ * This `Relay Message`_ is used by the VF to query values of runtime registers.
+ *
+ * On some platforms, VF drivers may not have access to the some fuse registers
+ * (referred here as 'runtime registers') and therefore VF drivers need to ask
+ * the PF driver to obtain their values.
+ *
+ * However, the list of such registers, and their values, is fully owned and
+ * maintained by the PF driver and the VF driver may only initiate the query
+ * sequence and indicate in the @START field the starting index of the next
+ * requested register from this predefined list.
+ *
+ * In the response, the PF driver will return tuple of 32-bit register offset and
+ * the 32-bit value of that register (respectively @REG_OFFSET and @REG_VALUE).
+ *
+ * The VF driver can use @LIMIT field to limit number of returned register tuples.
+ * If @LIMIT is unset then PF decides about number of returned register tuples.
+ *
+ * This message definition is supported from ABI version 1.0.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = **LIMIT** - limit number of returned entries |
+ * | | | (use zero to not enforce any limits on the response) |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME` = 0x0101 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | DATA1 = **START** - index of the first requested entry |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = **COUNT** - number of entries included in response |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | DATA1 = **REMAINING** - number of remaining entries |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | DATA2 = **REG_OFFSET** - offset of register[START] |
+ * +---+-------+--------------------------------------------------------------+
+ * | 3 | 31:0 | DATA3 = **REG_VALUE** - value of register[START] |
+ * +---+-------+--------------------------------------------------------------+
+ * | | | |
+ * +---+-------+--------------------------------------------------------------+
+ * |n-1| 31:0 | REG_OFFSET - offset of register[START + x] |
+ * +---+-------+--------------------------------------------------------------+
+ * | n | 31:0 | REG_VALUE - value of register[START + x] |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME 0x0101u
+
+#define VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN 2u
+#define VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT GUC_HXG_REQUEST_MSG_0_DATA0
+#define VF2PF_QUERY_RUNTIME_NO_LIMIT 0u
+#define VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START GUC_HXG_REQUEST_MSG_n_DATAn
+
+#define VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN (GUC_HXG_MSG_MIN_LEN + 1u)
+#define VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN \
+ (VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + VF2PF_QUERY_RUNTIME_MAX_COUNT * 2)
+#define VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT GUC_HXG_RESPONSE_MSG_0_DATA0
+#define VF2PF_QUERY_RUNTIME_MIN_COUNT 0
+#define VF2PF_QUERY_RUNTIME_MAX_COUNT \
+ ((GUC_RELAY_MSG_MAX_LEN - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2)
+#define VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING GUC_HXG_RESPONSE_MSG_n_DATAn
+#define VF2PF_QUERY_RUNTIME_RESPONSE_DATAn_REG_OFFSETx GUC_HXG_RESPONSE_MSG_n_DATAn
+#define VF2PF_QUERY_RUNTIME_RESPONSE_DATAn_REG_VALUEx GUC_HXG_RESPONSE_MSG_n_DATAn
+
/**
* DOC: GuC Relay Debug Actions
*
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index ffaa4d2f1eed..cd4632276141 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -78,7 +78,8 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
-#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P)
+#define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \
+ IS_PLATFORM(dev_priv, XE_ALDERLAKE_N))
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index af71b87d8030..97d2aed63e01 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -65,6 +65,7 @@
#define RING_ACTHD_UDW(base) XE_REG((base) + 0x5c)
#define RING_DMA_FADD_UDW(base) XE_REG((base) + 0x60)
#define RING_IPEHR(base) XE_REG((base) + 0x68)
+#define RING_INSTDONE(base) XE_REG((base) + 0x6c)
#define RING_ACTHD(base) XE_REG((base) + 0x74)
#define RING_DMA_FADD(base) XE_REG((base) + 0x78)
#define RING_HWS_PGA(base) XE_REG((base) + 0x80)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 94445810ccc9..83847f2da72a 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -98,6 +98,8 @@
#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
+#define XEHPG_INSTDONE_GEOM_SVGUNIT XE_REG_MCR(0x666c)
+
#define CACHE_MODE_1 XE_REG(0x7004, XE_REG_OPTION_MASKED)
#define MSAA_OPTIMIZATION_REDUC_DISABLE REG_BIT(11)
@@ -115,6 +117,14 @@
#define FLSH_IGNORES_PSD REG_BIT(10)
#define FD_END_COLLECT REG_BIT(5)
+#define SC_INSTDONE XE_REG(0x7100)
+#define SC_INSTDONE_EXTRA XE_REG(0x7104)
+#define SC_INSTDONE_EXTRA2 XE_REG(0x7108)
+
+#define XEHPG_SC_INSTDONE XE_REG_MCR(0x7100)
+#define XEHPG_SC_INSTDONE_EXTRA XE_REG_MCR(0x7104)
+#define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108)
+
#define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED)
#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
@@ -173,8 +183,11 @@
#define MAX_MSLICES 4
#define MEML3_EN_MASK REG_GENMASK(3, 0)
+#define MIRROR_FUSE1 XE_REG(0x911c)
+
#define XELP_EU_ENABLE XE_REG(0x9134) /* "_DISABLE" on Xe_LP */
#define XELP_EU_MASK REG_GENMASK(7, 0)
+#define XELP_GT_SLICE_ENABLE XE_REG(0x9138)
#define XELP_GT_GEOMETRY_DSS_ENABLE XE_REG(0x913c)
#define GT_VEBOX_VDBOX_DISABLE XE_REG(0x9140)
@@ -342,6 +355,9 @@
#define HALF_SLICE_CHICKEN5 XE_REG_MCR(0xe188, XE_REG_OPTION_MASKED)
#define DISABLE_SAMPLE_G_PERFORMANCE REG_BIT(0)
+#define SAMPLER_INSTDONE XE_REG_MCR(0xe160)
+#define ROW_INSTDONE XE_REG_MCR(0xe164)
+
#define SAMPLER_MODE XE_REG_MCR(0xe18c, XE_REG_OPTION_MASKED)
#define ENABLE_SMALLPL REG_BIT(15)
#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
diff --git a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
index 617ddb84b7fa..017b4ddd1ecf 100644
--- a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
@@ -14,6 +14,9 @@
#define LMEM_EN REG_BIT(31)
#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
+#define VIRTUAL_CTRL_REG XE_REG(0x10108c)
+#define GUEST_GTT_UPDATE_EN REG_BIT(8)
+
#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
#define VF_CAP REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
new file mode 100644
index 000000000000..b683585db852
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+static int pf_service_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_device *xe;
+ struct xe_gt *gt;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ xe = test->priv;
+ KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
+
+ gt = xe_device_get_gt(xe, 0);
+ pf_init_versions(gt);
+
+ /*
+ * sanity check:
+ * - all supported platforms VF/PF ABI versions must be defined
+ * - base version can't be newer than latest
+ */
+ KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.major,
+ gt->sriov.pf.service.version.latest.major);
+ if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.minor,
+ gt->sriov.pf.service.version.latest.minor);
+
+ test->priv = gt;
+ return 0;
+}
+
+static void pf_negotiate_any(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt, VF2PF_HANDSHAKE_MAJOR_ANY,
+ VF2PF_HANDSHAKE_MINOR_ANY,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_base_match(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major,
+ gt->sriov.pf.service.version.base.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.base.minor);
+}
+
+static void pf_negotiate_base_newer(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major,
+ gt->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_GE(test, minor, gt->sriov.pf.service.version.base.minor);
+ if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_next(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_LE(test, major, gt->sriov.pf.service.version.latest.major);
+ if (major == gt->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_older(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ if (!gt->sriov.pf.service.version.base.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major,
+ gt->sriov.pf.service.version.base.minor - 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_base_prev(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major - 1, 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_latest_match(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major,
+ gt->sriov.pf.service.version.latest.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_newer(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major,
+ gt->sriov.pf.service.version.latest.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_next(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_older(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ if (!gt->sriov.pf.service.version.latest.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major,
+ gt->sriov.pf.service.version.latest.minor - 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor - 1);
+}
+
+static void pf_negotiate_latest_prev(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
+ kunit_skip(test, "no prev major");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major - 1,
+ gt->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major - 1);
+ KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
+}
+
+static struct kunit_case pf_service_test_cases[] = {
+ KUNIT_CASE(pf_negotiate_any),
+ KUNIT_CASE(pf_negotiate_base_match),
+ KUNIT_CASE(pf_negotiate_base_newer),
+ KUNIT_CASE(pf_negotiate_base_next),
+ KUNIT_CASE(pf_negotiate_base_older),
+ KUNIT_CASE(pf_negotiate_base_prev),
+ KUNIT_CASE(pf_negotiate_latest_match),
+ KUNIT_CASE(pf_negotiate_latest_newer),
+ KUNIT_CASE(pf_negotiate_latest_next),
+ KUNIT_CASE(pf_negotiate_latest_older),
+ KUNIT_CASE(pf_negotiate_latest_prev),
+ {}
+};
+
+static struct kunit_suite pf_service_suite = {
+ .name = "pf_service",
+ .test_cases = pf_service_test_cases,
+ .init = pf_service_test_init,
+};
+
+kunit_test_suite(pf_service_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 977d5f4e4490..b6e7f80c3774 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -62,36 +62,6 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
return 0;
}
-static void
-sanity_populate_cb(struct xe_migrate_pt_update *pt_update,
- struct xe_tile *tile, struct iosys_map *map, void *dst,
- u32 qword_ofs, u32 num_qwords,
- const struct xe_vm_pgtable_update *update)
-{
- struct migrate_test_params *p =
- to_migrate_test_params(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));
- int i;
- u64 *ptr = dst;
- u64 value;
-
- for (i = 0; i < num_qwords; i++) {
- value = (qword_ofs + i - update->ofs) * 0x1111111111111111ULL;
- if (map)
- xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
- sizeof(u64), u64, value);
- else
- ptr[i] = value;
- }
-
- kunit_info(xe_cur_kunit(), "Used %s.\n", map ? "CPU" : "GPU");
- if (p->force_gpu && map)
- KUNIT_FAIL(xe_cur_kunit(), "GPU pagetable update used CPU.\n");
-}
-
-static const struct xe_migrate_pt_update_ops sanity_ops = {
- .populate = sanity_populate_cb,
-};
-
#define check(_retval, _expected, str, _test) \
do { if ((_retval) != (_expected)) { \
KUNIT_FAIL(_test, "Sanity check failed: " str \
@@ -209,57 +179,6 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
test_copy(m, bo, test, region);
}
-static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
- struct kunit *test, bool force_gpu)
-{
- struct xe_device *xe = tile_to_xe(m->tile);
- struct dma_fence *fence;
- u64 retval, expected;
- ktime_t then, now;
- int i;
-
- struct xe_vm_pgtable_update update = {
- .ofs = 1,
- .qwords = 0x10,
- .pt_bo = pt,
- };
- struct xe_migrate_pt_update pt_update = {
- .ops = &sanity_ops,
- };
- struct migrate_test_params p = {
- .base.id = XE_TEST_LIVE_MIGRATE,
- .force_gpu = force_gpu,
- };
-
- test->priv = &p;
- /* Test xe_migrate_update_pgtables() updates the pagetable as expected */
- expected = 0xf0f0f0f0f0f0f0f0ULL;
- xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
-
- then = ktime_get();
- fence = xe_migrate_update_pgtables(m, m->q->vm, NULL, m->q, &update, 1,
- NULL, 0, &pt_update);
- now = ktime_get();
- if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
- return;
-
- kunit_info(test, "Updating without syncing took %llu us,\n",
- (unsigned long long)ktime_to_us(ktime_sub(now, then)));
-
- dma_fence_put(fence);
- retval = xe_map_rd(xe, &pt->vmap, 0, u64);
- check(retval, expected, "PTE[0] must stay untouched", test);
-
- for (i = 0; i < update.qwords; i++) {
- retval = xe_map_rd(xe, &pt->vmap, (update.ofs + i) * 8, u64);
- check(retval, i * 0x1111111111111111ULL, "PTE update", test);
- }
-
- retval = xe_map_rd(xe, &pt->vmap, 8 * (update.ofs + update.qwords),
- u64);
- check(retval, expected, "PTE[0x11] must stay untouched", test);
-}
-
static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
{
struct xe_tile *tile = m->tile;
@@ -398,11 +317,6 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
test_copy_vram(m, big, test);
}
- kunit_info(test, "Testing page table update using CPU if GPU idle.\n");
- test_pt_update(m, pt, test, false);
- kunit_info(test, "Testing page table update using GPU\n");
- test_pt_update(m, pt, test, true);
-
out:
xe_bb_free(bb, NULL);
free_tiny:
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 0b7aebaae843..98e3fbde50ea 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -13,6 +13,8 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt_debugfs.h"
+#include "xe_gt_printk.h"
+#include "xe_guc_ads.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_step.h"
@@ -118,6 +120,56 @@ static const struct file_operations forcewake_all_fops = {
.release = forcewake_release,
};
+static ssize_t wedged_mode_show(struct file *f, char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ char buf[32];
+ int len = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", xe->wedged.mode);
+
+ return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ struct xe_gt *gt;
+ u32 wedged_mode;
+ ssize_t ret;
+ u8 id;
+
+ ret = kstrtouint_from_user(ubuf, size, 0, &wedged_mode);
+ if (ret)
+ return ret;
+
+ if (wedged_mode > 2)
+ return -EINVAL;
+
+ if (xe->wedged.mode == wedged_mode)
+ return 0;
+
+ xe->wedged.mode = wedged_mode;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads);
+ if (ret) {
+ xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n");
+ return -EIO;
+ }
+ }
+
+ return size;
+}
+
+static const struct file_operations wedged_mode_fops = {
+ .owner = THIS_MODULE,
+ .read = wedged_mode_show,
+ .write = wedged_mode_set,
+};
+
void xe_debugfs_register(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
@@ -135,6 +187,9 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("forcewake_all", 0400, root, xe,
&forcewake_all_fops);
+ debugfs_create_file("wedged_mode", 0400, root, xe,
+ &wedged_mode_fops);
+
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
man = ttm_manager_type(bdev, mem_type);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 5ef9b50a20d0..b61f8356e23e 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -138,6 +138,9 @@ static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct xe_device *xe = to_xe_device(file_priv->minor->dev);
long ret;
+ if (xe_device_wedged(xe))
+ return -ECANCELED;
+
ret = xe_pm_runtime_get_ioctl(xe);
if (ret >= 0)
ret = drm_ioctl(file, cmd, arg);
@@ -153,6 +156,9 @@ static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
struct xe_device *xe = to_xe_device(file_priv->minor->dev);
long ret;
+ if (xe_device_wedged(xe))
+ return -ECANCELED;
+
ret = xe_pm_runtime_get_ioctl(xe);
if (ret >= 0)
ret = drm_compat_ioctl(file, cmd, arg);
@@ -269,7 +275,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
init_waitqueue_head(&xe->ufence_wq);
- drmm_mutex_init(&xe->drm, &xe->usm.lock);
+ err = drmm_mutex_init(&xe->drm, &xe->usm.lock);
+ if (err)
+ goto err;
+
xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
@@ -501,6 +510,8 @@ int xe_device_probe_early(struct xe_device *xe)
if (err)
return err;
+ xe->wedged.mode = xe_modparam.wedged_mode;
+
return 0;
}
@@ -759,3 +770,34 @@ u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
{
return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
}
+
+/**
+ * xe_device_declare_wedged - Declare device wedged
+ * @xe: xe device instance
+ *
+ * This is a final state that can only be cleared with a mudule
+ * re-probe (unbind + bind).
+ * In this state every IOCTL will be blocked so the GT cannot be used.
+ * In general it will be called upon any critical error such as gt reset
+ * failure or guc loading failure.
+ * If xe.wedged module parameter is set to 2, this function will be called
+ * on every single execution timeout (a.k.a. GPU hang) right after devcoredump
+ * snapshot capture. In this mode, GT reset won't be attempted so the state of
+ * the issue is preserved for further debugging.
+ */
+void xe_device_declare_wedged(struct xe_device *xe)
+{
+ if (xe->wedged.mode == 0) {
+ drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n");
+ return;
+ }
+
+ if (!atomic_xchg(&xe->wedged.flag, 1)) {
+ xe->needs_flr_on_fini = true;
+ drm_err(&xe->drm,
+ "CRITICAL: Xe has declared device %s as wedged.\n"
+ "IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
+ "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
+ dev_name(xe->drm.dev));
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 36d4434ebccc..82317580f4bf 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -167,4 +167,11 @@ void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p);
u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address);
u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address);
+static inline bool xe_device_wedged(struct xe_device *xe)
+{
+ return atomic_read(&xe->wedged.flag);
+}
+
+void xe_device_declare_wedged(struct xe_device *xe);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 2e62450d86e1..0f68c55ea405 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -459,6 +459,14 @@ struct xe_device {
/** @needs_flr_on_fini: requests function-reset on fini */
bool needs_flr_on_fini;
+ /** @wedged: Struct to control Wedged States and mode */
+ struct {
+ /** @wedged.flag: Xe device faced a critical error and is now blocked. */
+ atomic_t flag;
+ /** @wedged.mode: Mode controlled by kernel parameter and debugfs */
+ int mode;
+ } wedged;
+
/* private: */
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 02ce8d204622..48f6da53a292 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -26,6 +26,15 @@ void xe_exec_queue_fini(struct xe_exec_queue *q);
void xe_exec_queue_destroy(struct kref *ref);
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance);
+static inline struct xe_exec_queue *
+xe_exec_queue_get_unless_zero(struct xe_exec_queue *q)
+{
+ if (kref_get_unless_zero(&q->refcount))
+ return q;
+
+ return NULL;
+}
+
struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 491d0413de15..0528d599c3fe 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -477,6 +477,9 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
+ if (IS_SRIOV_PF(gt_to_xe(gt)))
+ xe_gt_sriov_pf_init_hw(gt);
+
err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
XE_WARN_ON(err);
@@ -613,6 +616,9 @@ static int do_gt_restart(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
+ if (IS_SRIOV_PF(gt_to_xe(gt)))
+ xe_gt_sriov_pf_init_hw(gt);
+
xe_mocs_init(gt);
err = xe_uc_start(&gt->uc);
if (err)
@@ -633,6 +639,9 @@ static int gt_reset(struct xe_gt *gt)
{
int err;
+ if (xe_device_wedged(gt_to_xe(gt)))
+ return -ECANCELED;
+
/* We only support GT resets with GuC submission */
if (!xe_device_uc_enabled(gt_to_xe(gt)))
return -ENODEV;
@@ -655,9 +664,7 @@ static int gt_reset(struct xe_gt *gt)
xe_uc_stop_prepare(&gt->uc);
xe_gt_pagefault_reset(gt);
- err = xe_uc_stop(&gt->uc);
- if (err)
- goto err_out;
+ xe_uc_stop(&gt->uc);
xe_gt_tlb_invalidation_reset(gt);
@@ -685,7 +692,7 @@ err_msg:
err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
- gt_to_xe(gt)->needs_flr_on_fini = true;
+ xe_device_declare_wedged(gt_to_xe(gt));
return err;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 8cf0b2625efc..94f226a4438e 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -15,6 +15,7 @@
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_gt_mcr.h"
+#include "xe_gt_sriov_pf_debugfs.h"
#include "xe_gt_topology.h"
#include "xe_hw_engine.h"
#include "xe_lrc.h"
@@ -23,6 +24,7 @@
#include "xe_pm.h"
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
+#include "xe_sriov.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
@@ -290,4 +292,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
root, minor);
xe_uc_debugfs_register(&gt->uc, root);
+
+ if (IS_SRIOV_PF(xe))
+ xe_gt_sriov_pf_debugfs_register(gt, root);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index a7f4ab1aa584..e7d03e001a49 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -40,4 +40,28 @@ void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group,
for_each_dss((dss), (gt)) \
for_each_if((xe_gt_mcr_get_dss_steering((gt), (dss), &(group), &(instance)), true))
+/*
+ * Loop over each DSS available for geometry and determine the group and
+ * instance IDs that should be used to steer MCR accesses toward this DSS.
+ * @dss: DSS ID to obtain steering for
+ * @gt: GT structure
+ * @group: steering group ID, data type: u16
+ * @instance: steering instance ID, data type: u16
+ */
+#define for_each_geometry_dss(dss, gt, group, instance) \
+ for_each_dss_steering(dss, gt, group, instance) \
+ if (xe_gt_has_geometry_dss(gt, dss))
+
+/*
+ * Loop over each DSS available for compute and determine the group and
+ * instance IDs that should be used to steer MCR accesses toward this DSS.
+ * @dss: DSS ID to obtain steering for
+ * @gt: GT structure
+ * @group: steering group ID, data type: u16
+ * @instance: steering instance ID, data type: u16
+ */
+#define for_each_compute_dss(dss, gt, group, instance) \
+ for_each_dss_steering(dss, gt, group, instance) \
+ if (xe_gt_has_compute_dss(gt, dss))
+
#endif /* _XE_GT_MCR_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index fa9e9853c53b..040dd142c49c 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -19,7 +19,6 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
-#include "xe_pt.h"
#include "xe_trace.h"
#include "xe_vm.h"
@@ -204,15 +203,14 @@ retry_userptr:
drm_exec_retry_on_contention(&exec);
if (ret)
goto unlock_dma_resv;
- }
- /* Bind VMA only to the GT that has faulted */
- trace_xe_vma_pf_bind(vma);
- fence = __xe_pt_bind_vma(tile, vma, xe_tile_migrate_engine(tile), NULL, 0,
- vma->tile_present & BIT(tile->id));
- if (IS_ERR(fence)) {
- ret = PTR_ERR(fence);
- goto unlock_dma_resv;
+ /* Bind VMA only to the GT that has faulted */
+ trace_xe_vma_pf_bind(vma);
+ fence = xe_vma_rebind(vm, vma, BIT(tile->id));
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto unlock_dma_resv;
+ }
}
/*
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 791dcdd767e2..7decf71c2b7d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -5,8 +5,12 @@
#include <drm/drm_managed.h>
+#include "regs/xe_sriov_regs.h"
+
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_service.h"
+#include "xe_mmio.h"
/*
* VF's metadata is maintained in the flexible array where:
@@ -48,5 +52,33 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
if (err)
return err;
+ err = xe_gt_sriov_pf_service_init(gt);
+ if (err)
+ return err;
+
return 0;
}
+
+static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) == 1200;
+}
+
+static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
+{
+ xe_mmio_write32(gt, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
+}
+
+/**
+ * xe_gt_sriov_pf_init_hw - Initialize SR-IOV hardware support.
+ * @gt: the &xe_gt to initialize
+ *
+ * On some platforms the PF must explicitly enable VF's access to the GGTT.
+ */
+void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
+{
+ if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt)))
+ pf_enable_ggtt_guest_update(gt);
+
+ xe_gt_sriov_pf_service_update(gt);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index 05142ffc4319..37d7d6c3df03 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -10,11 +10,16 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
#else
static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
{
return 0;
}
+
+static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
+{
+}
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 79116ad58620..7eac01e04cc5 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -187,14 +187,20 @@ static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u3
return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
}
-static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum)
+static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
{
- return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, exec_quantum);
+ /* GuC will silently clamp values exceeding max */
+ *exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
+
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
}
-static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 preempt_timeout)
+static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
{
- return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, preempt_timeout);
+ /* GuC will silently clamp values exceeding max */
+ *preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
+
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
}
static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
@@ -1604,7 +1610,7 @@ static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
int err;
- err = pf_push_vf_cfg_exec_quantum(gt, vfid, exec_quantum);
+ err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
if (unlikely(err))
return err;
@@ -1674,7 +1680,7 @@ static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
int err;
- err = pf_push_vf_cfg_preempt_timeout(gt, vfid, preempt_timeout);
+ err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
if (unlikely(err))
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
new file mode 100644
index 000000000000..5102035faa7e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
+
+#include "xe_bo.h"
+#include "xe_debugfs.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_debugfs.h"
+#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_pf_debugfs.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_policy.h"
+#include "xe_gt_sriov_pf_service.h"
+#include "xe_pm.h"
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0 # d_inode->i_private = gt
+ * │   ├── pf # d_inode->i_private = gt
+ * │   ├── vf1 # d_inode->i_private = VFID(1)
+ * :   :
+ * │   ├── vfN # d_inode->i_private = VFID(N)
+ */
+
+static void *extract_priv(struct dentry *d)
+{
+ return d->d_inode->i_private;
+}
+
+static struct xe_gt *extract_gt(struct dentry *d)
+{
+ return extract_priv(d->d_parent);
+}
+
+static unsigned int extract_vfid(struct dentry *d)
+{
+ return extract_priv(d) == extract_gt(d) ? PFID : (uintptr_t)extract_priv(d);
+}
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ * │   │   ├── ggtt_available
+ * │   │   ├── ggtt_provisioned
+ * │   │   ├── contexts_provisioned
+ * │   │   ├── doorbells_provisioned
+ * │   │   ├── runtime_registers
+ * │   │   ├── negotiated_versions
+ */
+
+static const struct drm_info_list pf_info[] = {
+ {
+ "ggtt_available",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_available_ggtt,
+ },
+ {
+ "ggtt_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_ggtt,
+ },
+ {
+ "contexts_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_ctxs,
+ },
+ {
+ "doorbells_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_dbs,
+ },
+ {
+ "runtime_registers",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_service_print_runtime,
+ },
+ {
+ "negotiated_versions",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_service_print_version,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ * │   │   ├── reset_engine
+ * │   │   ├── sample_period
+ * │   │   ├── sched_if_idle
+ */
+
+#define DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(POLICY, TYPE, FORMAT) \
+ \
+static int POLICY##_set(void *data, u64 val) \
+{ \
+ struct xe_gt *gt = extract_gt(data); \
+ struct xe_device *xe = gt_to_xe(gt); \
+ int err; \
+ \
+ if (val > (TYPE)~0ull) \
+ return -EOVERFLOW; \
+ \
+ xe_pm_runtime_get(xe); \
+ err = xe_gt_sriov_pf_policy_set_##POLICY(gt, val); \
+ xe_pm_runtime_put(xe); \
+ \
+ return err; \
+} \
+ \
+static int POLICY##_get(void *data, u64 *val) \
+{ \
+ struct xe_gt *gt = extract_gt(data); \
+ \
+ *val = xe_gt_sriov_pf_policy_get_##POLICY(gt); \
+ return 0; \
+} \
+ \
+DEFINE_DEBUGFS_ATTRIBUTE(POLICY##_fops, POLICY##_get, POLICY##_set, FORMAT)
+
+DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(reset_engine, bool, "%llu\n");
+DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(sched_if_idle, bool, "%llu\n");
+DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(sample_period, u32, "%llu\n");
+
+static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
+{
+ xe_gt_assert(gt, gt == extract_gt(parent));
+ xe_gt_assert(gt, PFID == extract_vfid(parent));
+
+ debugfs_create_file_unsafe("reset_engine", 0644, parent, parent, &reset_engine_fops);
+ debugfs_create_file_unsafe("sched_if_idle", 0644, parent, parent, &sched_if_idle_fops);
+ debugfs_create_file_unsafe("sample_period_ms", 0644, parent, parent, &sample_period_fops);
+}
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ * │   │   ├── ggtt_spare
+ * │   │   ├── lmem_spare
+ * │   │   ├── doorbells_spare
+ * │   │   ├── contexts_spare
+ * │   │   ├── exec_quantum_ms
+ * │   │   ├── preempt_timeout_us
+ * │   ├── vf1
+ * │   │   ├── ggtt_quota
+ * │   │   ├── lmem_quota
+ * │   │   ├── doorbells_quota
+ * │   │   ├── contexts_quota
+ * │   │   ├── exec_quantum_ms
+ * │   │   ├── preempt_timeout_us
+ */
+
+#define DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(CONFIG, TYPE, FORMAT) \
+ \
+static int CONFIG##_set(void *data, u64 val) \
+{ \
+ struct xe_gt *gt = extract_gt(data); \
+ unsigned int vfid = extract_vfid(data); \
+ struct xe_device *xe = gt_to_xe(gt); \
+ int err; \
+ \
+ if (val > (TYPE)~0ull) \
+ return -EOVERFLOW; \
+ \
+ xe_pm_runtime_get(xe); \
+ err = xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
+ xe_pm_runtime_put(xe); \
+ \
+ return err; \
+} \
+ \
+static int CONFIG##_get(void *data, u64 *val) \
+{ \
+ struct xe_gt *gt = extract_gt(data); \
+ unsigned int vfid = extract_vfid(data); \
+ \
+ *val = xe_gt_sriov_pf_config_get_##CONFIG(gt, vfid); \
+ return 0; \
+} \
+ \
+DEFINE_DEBUGFS_ATTRIBUTE(CONFIG##_fops, CONFIG##_get, CONFIG##_set, FORMAT)
+
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ggtt, u64, "%llu\n");
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(lmem, u64, "%llu\n");
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ctxs, u32, "%llu\n");
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(dbs, u32, "%llu\n");
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(exec_quantum, u32, "%llu\n");
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(preempt_timeout, u32, "%llu\n");
+
+static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigned int vfid)
+{
+ xe_gt_assert(gt, gt == extract_gt(parent));
+ xe_gt_assert(gt, vfid == extract_vfid(parent));
+
+ if (!xe_gt_is_media_type(gt)) {
+ debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare",
+ 0644, parent, parent, &ggtt_fops);
+ if (IS_DGFX(gt_to_xe(gt)))
+ debugfs_create_file_unsafe(vfid ? "lmem_quota" : "lmem_spare",
+ 0644, parent, parent, &lmem_fops);
+ }
+ debugfs_create_file_unsafe(vfid ? "doorbells_quota" : "doorbells_spare",
+ 0644, parent, parent, &dbs_fops);
+ debugfs_create_file_unsafe(vfid ? "contexts_quota" : "contexts_spare",
+ 0644, parent, parent, &ctxs_fops);
+ debugfs_create_file_unsafe("exec_quantum_ms", 0644, parent, parent,
+ &exec_quantum_fops);
+ debugfs_create_file_unsafe("preempt_timeout_us", 0644, parent, parent,
+ &preempt_timeout_fops);
+}
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── vf1
+ * │   │   ├── control { stop, pause, resume }
+ */
+
+static const struct {
+ const char *cmd;
+ int (*fn)(struct xe_gt *gt, unsigned int vfid);
+} control_cmds[] = {
+ { "stop", xe_gt_sriov_pf_control_stop_vf },
+ { "pause", xe_gt_sriov_pf_control_pause_vf },
+ { "resume", xe_gt_sriov_pf_control_resume_vf },
+};
+
+static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file);
+ struct dentry *parent = dent->d_parent;
+ struct xe_gt *gt = extract_gt(parent);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int vfid = extract_vfid(parent);
+ int ret = -EINVAL;
+ char cmd[32];
+ size_t n;
+
+ xe_gt_assert(gt, vfid);
+ xe_gt_sriov_pf_assert_vfid(gt, vfid);
+
+ if (*pos)
+ return -ESPIPE;
+
+ if (count > sizeof(cmd) - 1)
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(cmd, sizeof(cmd) - 1, pos, buf, count);
+ if (ret < 0)
+ return ret;
+ cmd[ret] = '\0';
+
+ for (n = 0; n < ARRAY_SIZE(control_cmds); n++) {
+ xe_gt_assert(gt, sizeof(cmd) > strlen(control_cmds[n].cmd));
+
+ if (sysfs_streq(cmd, control_cmds[n].cmd)) {
+ xe_pm_runtime_get(xe);
+ ret = control_cmds[n].fn ? (*control_cmds[n].fn)(gt, vfid) : 0;
+ xe_pm_runtime_put(xe);
+ break;
+ }
+ }
+
+ return (ret < 0) ? ret : count;
+}
+
+static ssize_t control_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ char help[128];
+ size_t n;
+
+ help[0] = '\0';
+ for (n = 0; n < ARRAY_SIZE(control_cmds); n++) {
+ strlcat(help, control_cmds[n].cmd, sizeof(help));
+ strlcat(help, "\n", sizeof(help));
+ }
+
+ return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
+}
+
+static const struct file_operations control_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = control_write,
+ .read = control_read,
+ .llseek = default_llseek,
+};
+
+/**
+ * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs.
+ * @gt: the &xe_gt to register
+ * @root: the &dentry that represents the GT directory
+ *
+ * Register SR-IOV PF entries that are GT related and must be shown under GT debugfs.
+ */
+void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct drm_minor *minor = xe->drm.primary;
+ int n, totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ struct dentry *pfdentry;
+ struct dentry *vfdentry;
+ char buf[14]; /* should be enough up to "vf%u\0" for 2^32 - 1 */
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ xe_gt_assert(gt, root->d_inode->i_private == gt);
+
+ /*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ */
+ pfdentry = debugfs_create_dir("pf", root);
+ if (IS_ERR(pfdentry))
+ return;
+ pfdentry->d_inode->i_private = gt;
+
+ drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
+ pf_add_policy_attrs(gt, pfdentry);
+ pf_add_config_attrs(gt, pfdentry, PFID);
+
+ for (n = 1; n <= totalvfs; n++) {
+ /*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── vf1
+ * │   ├── vf2
+ */
+ snprintf(buf, sizeof(buf), "vf%u", n);
+ vfdentry = debugfs_create_dir(buf, root);
+ if (IS_ERR(vfdentry))
+ break;
+ vfdentry->d_inode->i_private = (void *)(uintptr_t)n;
+
+ pf_add_config_attrs(gt, vfdentry, VFID(n));
+ debugfs_create_file("control", 0600, vfdentry, NULL, &control_ops);
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h
new file mode 100644
index 000000000000..038cc8ddc244
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_DEBUGFS_H_
+#define _XE_GT_SRIOV_PF_DEBUGFS_H_
+
+struct xe_gt;
+struct dentry;
+
+#ifdef CONFIG_PCI_IOV
+void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root);
+#else
+static inline void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) { }
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
new file mode 100644
index 000000000000..0e23b7ea4f3e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_sriov_abi.h"
+#include "abi/guc_relay_actions_abi.h"
+
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "regs/xe_regs.h"
+
+#include "xe_mmio.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_service.h"
+#include "xe_gt_sriov_pf_service_types.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_hxg_helpers.h"
+
+static void pf_init_versions(struct xe_gt *gt)
+{
+ BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
+ BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
+
+ /* base versions may differ between platforms */
+ gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
+ gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
+
+ /* latest version is same for all platforms */
+ gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
+ gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
+}
+
+/* Return: 0 on success or a negative error code on failure. */
+static int pf_negotiate_version(struct xe_gt *gt,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor)
+{
+ struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base;
+ struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest;
+
+ xe_gt_assert(gt, base.major);
+ xe_gt_assert(gt, base.major <= latest.major);
+ xe_gt_assert(gt, (base.major < latest.major) || (base.minor <= latest.minor));
+
+ /* VF doesn't care - return our latest */
+ if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
+ wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
+ *major = latest.major;
+ *minor = latest.minor;
+ return 0;
+ }
+
+ /* VF wants newer than our - return our latest */
+ if (wanted_major > latest.major) {
+ *major = latest.major;
+ *minor = latest.minor;
+ return 0;
+ }
+
+ /* VF wants older than min required - reject */
+ if (wanted_major < base.major ||
+ (wanted_major == base.major && wanted_minor < base.minor)) {
+ return -EPERM;
+ }
+
+ /* previous major - return wanted, as we should still support it */
+ if (wanted_major < latest.major) {
+ /* XXX: we are not prepared for multi-versions yet */
+ xe_gt_assert(gt, base.major == latest.major);
+ return -ENOPKG;
+ }
+
+ /* same major - return common minor */
+ *major = wanted_major;
+ *minor = min_t(u32, latest.minor, wanted_minor);
+ return 0;
+}
+
+static void pf_connect(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
+{
+ xe_gt_sriov_pf_assert_vfid(gt, vfid);
+ xe_gt_assert(gt, major || minor);
+
+ gt->sriov.pf.vfs[vfid].version.major = major;
+ gt->sriov.pf.vfs[vfid].version.minor = minor;
+}
+
+static void pf_disconnect(struct xe_gt *gt, u32 vfid)
+{
+ xe_gt_sriov_pf_assert_vfid(gt, vfid);
+
+ gt->sriov.pf.vfs[vfid].version.major = 0;
+ gt->sriov.pf.vfs[vfid].version.minor = 0;
+}
+
+static bool pf_is_negotiated(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
+{
+ xe_gt_sriov_pf_assert_vfid(gt, vfid);
+
+ return major == gt->sriov.pf.vfs[vfid].version.major &&
+ minor <= gt->sriov.pf.vfs[vfid].version.minor;
+}
+
+static const struct xe_reg tgl_runtime_regs[] = {
+ RPM_CONFIG0, /* _MMIO(0x0d00) */
+ MIRROR_FUSE3, /* _MMIO(0x9118) */
+ XELP_EU_ENABLE, /* _MMIO(0x9134) */
+ XELP_GT_SLICE_ENABLE, /* _MMIO(0x9138) */
+ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
+ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
+ CTC_MODE, /* _MMIO(0xa26c) */
+ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
+ TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
+};
+
+static const struct xe_reg ats_m_runtime_regs[] = {
+ RPM_CONFIG0, /* _MMIO(0x0d00) */
+ MIRROR_FUSE3, /* _MMIO(0x9118) */
+ MIRROR_FUSE1, /* _MMIO(0x911c) */
+ XELP_EU_ENABLE, /* _MMIO(0x9134) */
+ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
+ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
+ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
+ CTC_MODE, /* _MMIO(0xa26c) */
+ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
+ TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
+};
+
+static const struct xe_reg pvc_runtime_regs[] = {
+ RPM_CONFIG0, /* _MMIO(0x0d00) */
+ MIRROR_FUSE3, /* _MMIO(0x9118) */
+ XELP_EU_ENABLE, /* _MMIO(0x9134) */
+ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
+ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
+ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
+ CTC_MODE, /* _MMIO(0xA26C) */
+ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
+ TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
+};
+
+static const struct xe_reg ver_1270_runtime_regs[] = {
+ RPM_CONFIG0, /* _MMIO(0x0d00) */
+ XEHP_FUSE4, /* _MMIO(0x9114) */
+ MIRROR_FUSE3, /* _MMIO(0x9118) */
+ MIRROR_FUSE1, /* _MMIO(0x911c) */
+ XELP_EU_ENABLE, /* _MMIO(0x9134) */
+ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
+ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
+ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
+ CTC_MODE, /* _MMIO(0xa26c) */
+ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
+ TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
+};
+
+static const struct xe_reg ver_2000_runtime_regs[] = {
+ RPM_CONFIG0, /* _MMIO(0x0d00) */
+ XEHP_FUSE4, /* _MMIO(0x9114) */
+ MIRROR_FUSE3, /* _MMIO(0x9118) */
+ MIRROR_FUSE1, /* _MMIO(0x911c) */
+ XELP_EU_ENABLE, /* _MMIO(0x9134) */
+ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
+ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
+ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
+ XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
+ XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
+ XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
+ CTC_MODE, /* _MMIO(0xa26c) */
+ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
+ TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
+};
+
+static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
+{
+ const struct xe_reg *regs;
+
+ if (GRAPHICS_VERx100(xe) >= 2000) {
+ *count = ARRAY_SIZE(ver_2000_runtime_regs);
+ regs = ver_2000_runtime_regs;
+ } else if (GRAPHICS_VERx100(xe) >= 1270) {
+ *count = ARRAY_SIZE(ver_1270_runtime_regs);
+ regs = ver_1270_runtime_regs;
+ } else if (GRAPHICS_VERx100(xe) == 1260) {
+ *count = ARRAY_SIZE(pvc_runtime_regs);
+ regs = pvc_runtime_regs;
+ } else if (GRAPHICS_VERx100(xe) == 1255) {
+ *count = ARRAY_SIZE(ats_m_runtime_regs);
+ regs = ats_m_runtime_regs;
+ } else if (GRAPHICS_VERx100(xe) == 1200) {
+ *count = ARRAY_SIZE(tgl_runtime_regs);
+ regs = tgl_runtime_regs;
+ } else {
+ regs = ERR_PTR(-ENOPKG);
+ *count = 0;
+ }
+
+ return regs;
+}
+
+static int pf_alloc_runtime_info(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ const struct xe_reg *regs;
+ unsigned int size;
+ u32 *values;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ xe_gt_assert(gt, !gt->sriov.pf.service.runtime.size);
+ xe_gt_assert(gt, !gt->sriov.pf.service.runtime.regs);
+ xe_gt_assert(gt, !gt->sriov.pf.service.runtime.values);
+
+ regs = pick_runtime_regs(xe, &size);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ if (unlikely(!size))
+ return 0;
+
+ values = drmm_kcalloc(&xe->drm, size, sizeof(u32), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ gt->sriov.pf.service.runtime.size = size;
+ gt->sriov.pf.service.runtime.regs = regs;
+ gt->sriov.pf.service.runtime.values = values;
+
+ return 0;
+}
+
+static void read_many(struct xe_gt *gt, unsigned int count,
+ const struct xe_reg *regs, u32 *values)
+{
+ while (count--)
+ *values++ = xe_mmio_read32(gt, *regs++);
+}
+
+static void pf_prepare_runtime_info(struct xe_gt *gt)
+{
+ const struct xe_reg *regs;
+ unsigned int size;
+ u32 *values;
+
+ if (!gt->sriov.pf.service.runtime.size)
+ return;
+
+ size = gt->sriov.pf.service.runtime.size;
+ regs = gt->sriov.pf.service.runtime.regs;
+ values = gt->sriov.pf.service.runtime.values;
+
+ read_many(gt, size, regs, values);
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ struct drm_printer p = xe_gt_info_printer(gt);
+
+ xe_gt_sriov_pf_service_print_runtime(gt, &p);
+ }
+}
+
+/**
+ * xe_gt_sriov_pf_service_init - Early initialization of the GT SR-IOV PF services.
+ * @gt: the &xe_gt to initialize
+ *
+ * Performs early initialization of the GT SR-IOV PF services, including preparation
+ * of the runtime info that will be shared with VFs.
+ *
+ * This function can only be called on PF.
+ */
+int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
+{
+ int err;
+
+ pf_init_versions(gt);
+
+ err = pf_alloc_runtime_info(gt);
+ if (unlikely(err))
+ goto failed;
+
+ return 0;
+failed:
+ xe_gt_sriov_err(gt, "Failed to initialize service (%pe)\n", ERR_PTR(err));
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_service_update - Update PF SR-IOV services.
+ * @gt: the &xe_gt to update
+ *
+ * Updates runtime data shared with VFs.
+ *
+ * This function can be called more than once.
+ * This function can only be called on PF.
+ */
+void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
+{
+ pf_prepare_runtime_info(gt);
+}
+
+/**
+ * xe_gt_sriov_pf_service_reset - Reset a connection with the VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * Reset a VF driver negotiated VF/PF ABI version.
+ * After that point, the VF driver will have to perform new version handshake
+ * to continue use of the PF services again.
+ *
+ * This function can only be called on PF.
+ */
+void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_disconnect(gt, vfid);
+}
+
+/* Return: 0 on success or a negative error code on failure. */
+static int pf_process_handshake(struct xe_gt *gt, u32 vfid,
+ u32 wanted_major, u32 wanted_minor,
+ u32 *major, u32 *minor)
+{
+ int err;
+
+ xe_gt_sriov_dbg_verbose(gt, "VF%u wants ABI version %u.%u\n",
+ vfid, wanted_major, wanted_minor);
+
+ err = pf_negotiate_version(gt, wanted_major, wanted_minor, major, minor);
+
+ if (err < 0) {
+ xe_gt_sriov_notice(gt, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
+ vfid, wanted_major, wanted_minor, ERR_PTR(err));
+ pf_disconnect(gt, vfid);
+ } else {
+ xe_gt_sriov_dbg(gt, "VF%u negotiated ABI version %u.%u\n",
+ vfid, *major, *minor);
+ pf_connect(gt, vfid, *major, *minor);
+ }
+
+ return 0;
+}
+
+/* Return: length of the response message or a negative error code on failure. */
+static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
+ const u32 *request, u32 len, u32 *response, u32 size)
+{
+ u32 wanted_major, wanted_minor;
+ u32 major, minor;
+ u32 mbz;
+ int err;
+
+ if (unlikely(len != VF2PF_HANDSHAKE_REQUEST_MSG_LEN))
+ return -EMSGSIZE;
+
+ mbz = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ, request[0]);
+ if (unlikely(mbz))
+ return -EPFNOSUPPORT;
+
+ wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
+ wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
+
+ err = pf_process_handshake(gt, origin, wanted_major, wanted_minor, &major, &minor);
+ if (err < 0)
+ return err;
+
+ xe_gt_assert(gt, major || minor);
+ xe_gt_assert(gt, size >= VF2PF_HANDSHAKE_RESPONSE_MSG_LEN);
+
+ response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
+ FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, 0);
+ response[1] = FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, major) |
+ FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, minor);
+
+ return VF2PF_HANDSHAKE_RESPONSE_MSG_LEN;
+}
+
+struct reg_data {
+ u32 offset;
+ u32 value;
+} __packed;
+static_assert(hxg_sizeof(struct reg_data) == 2);
+
+/* Return: number of entries copied or negative error code on failure. */
+static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit,
+ struct reg_data *data, u32 *remaining)
+{
+ struct xe_gt_sriov_pf_service_runtime_regs *runtime;
+ unsigned int count, i;
+ u32 addr;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ runtime = &gt->sriov.pf.service.runtime;
+
+ if (start > runtime->size)
+ return -ERANGE;
+
+ count = min_t(u32, runtime->size - start, limit);
+
+ for (i = 0; i < count; ++i, ++data) {
+ addr = runtime->regs[start + i].addr;
+ data->offset = xe_mmio_adjusted_addr(gt, addr);
+ data->value = runtime->values[start + i];
+ }
+
+ *remaining = runtime->size - start - count;
+ return count;
+}
+
+/* Return: length of the response message or a negative error code on failure. */
+static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
+ const u32 *msg, u32 msg_len, u32 *response, u32 resp_size)
+{
+ const u32 chunk_size = hxg_sizeof(struct reg_data);
+ struct reg_data *reg_data_buf;
+ u32 limit, start, max_chunks;
+ u32 remaining = 0;
+ int ret;
+
+ if (!pf_is_negotiated(gt, origin, 1, 0))
+ return -EACCES;
+ if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
+ return -EMSGSIZE;
+ if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
+ return -EPROTO;
+ if (unlikely(resp_size < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN))
+ return -EINVAL;
+
+ limit = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, msg[0]);
+ start = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, msg[1]);
+
+ resp_size = min_t(u32, resp_size, VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN);
+ max_chunks = (resp_size - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / chunk_size;
+ limit = limit == VF2PF_QUERY_RUNTIME_NO_LIMIT ? max_chunks : min_t(u32, max_chunks, limit);
+ reg_data_buf = (void *)(response + VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN);
+
+ ret = pf_service_runtime_query(gt, start, limit, reg_data_buf, &remaining);
+ if (ret < 0)
+ return ret;
+
+ response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
+ FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, ret);
+ response[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, remaining);
+
+ return VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + ret * hxg_sizeof(struct reg_data);
+}
+
+/**
+ * xe_gt_sriov_pf_service_process_request - Service GT level SR-IOV request message from the VF.
+ * @gt: the &xe_gt that provides the service
+ * @origin: VF number that is requesting the service
+ * @msg: request message
+ * @msg_len: length of the request message (in dwords)
+ * @response: placeholder for the response message
+ * @resp_size: length of the response message buffer (in dwords)
+ *
+ * This function processes `Relay Message`_ request from the VF.
+ *
+ * Return: length of the response message or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_service_process_request(struct xe_gt *gt, u32 origin,
+ const u32 *msg, u32 msg_len,
+ u32 *response, u32 resp_size)
+{
+ u32 action, data __maybe_unused;
+ int ret;
+
+ xe_gt_assert(gt, msg_len >= GUC_HXG_MSG_MIN_LEN);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_REQUEST);
+
+ action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
+ data = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
+ xe_gt_sriov_dbg_verbose(gt, "service action %#x:%u from VF%u\n",
+ action, data, origin);
+
+ switch (action) {
+ case GUC_RELAY_ACTION_VF2PF_HANDSHAKE:
+ ret = pf_process_handshake_msg(gt, origin, msg, msg_len, response, resp_size);
+ break;
+ case GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME:
+ ret = pf_process_runtime_query_msg(gt, origin, msg, msg_len, response, resp_size);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xe_gt_sriov_pf_service_print_runtime - Print PF runtime data shared with VFs.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * This function is for PF use only.
+ */
+int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p)
+{
+ const struct xe_reg *regs;
+ unsigned int size;
+ u32 *values;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ size = gt->sriov.pf.service.runtime.size;
+ regs = gt->sriov.pf.service.runtime.regs;
+ values = gt->sriov.pf.service.runtime.values;
+
+ for (; size--; regs++, values++) {
+ drm_printf(p, "reg[%#x] = %#x\n",
+ xe_mmio_adjusted_addr(gt, regs->addr), *values);
+ }
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_service_print_version - Print ABI versions negotiated with VFs.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * This function is for PF use only.
+ */
+int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
+ struct xe_gt_sriov_pf_service_version *version;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ for (n = 1; n <= total_vfs; n++) {
+ version = &gt->sriov.pf.vfs[n].version;
+ if (!version->major && !version->minor)
+ continue;
+
+ drm_printf(p, "VF%u:\t%u.%u\n", n, version->major, version->minor);
+ }
+
+ return 0;
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_gt_sriov_pf_service_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
new file mode 100644
index 000000000000..56aaadf0360d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_SERVICE_H_
+#define _XE_GT_SRIOV_PF_SERVICE_H_
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_gt;
+
+int xe_gt_sriov_pf_service_init(struct xe_gt *gt);
+void xe_gt_sriov_pf_service_update(struct xe_gt *gt);
+void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid);
+
+int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p);
+int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p);
+
+#ifdef CONFIG_PCI_IOV
+int xe_gt_sriov_pf_service_process_request(struct xe_gt *gt, u32 origin,
+ const u32 *msg, u32 msg_len,
+ u32 *response, u32 resp_size);
+#else
+static inline int
+xe_gt_sriov_pf_service_process_request(struct xe_gt *gt, u32 origin,
+ const u32 *msg, u32 msg_len,
+ u32 *response, u32 resp_size)
+{
+ return -EPROTO;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service_types.h
new file mode 100644
index 000000000000..ad6dd75f0056
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service_types.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_SERVICE_TYPES_H_
+#define _XE_GT_SRIOV_PF_SERVICE_TYPES_H_
+
+#include <linux/types.h>
+
+struct xe_reg;
+
+/**
+ * struct xe_gt_sriov_pf_service_version - VF/PF ABI Version.
+ * @major: the major version of the VF/PF ABI
+ * @minor: the minor version of the VF/PF ABI
+ *
+ * See `GuC Relay Communication`_.
+ */
+struct xe_gt_sriov_pf_service_version {
+ u16 major;
+ u16 minor;
+};
+
+/**
+ * struct xe_gt_sriov_pf_service_runtime_regs - Runtime data shared with VFs.
+ * @regs: pointer to static array with register offsets.
+ * @values: pointer to array with captured register values.
+ * @size: size of the regs and value arrays.
+ */
+struct xe_gt_sriov_pf_service_runtime_regs {
+ const struct xe_reg *regs;
+ u32 *values;
+ u32 size;
+};
+
+/**
+ * struct xe_gt_sriov_pf_service - Data used by the PF service.
+ * @version: information about VF/PF ABI versions for current platform.
+ * @version.base: lowest VF/PF ABI version that could be negotiated with VF.
+ * @version.latest: latest VF/PF ABI version supported by the PF driver.
+ * @runtime: runtime data shared with VFs.
+ */
+struct xe_gt_sriov_pf_service {
+ struct {
+ struct xe_gt_sriov_pf_service_version base;
+ struct xe_gt_sriov_pf_service_version latest;
+ } version;
+ struct xe_gt_sriov_pf_service_runtime_regs runtime;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index faf9ee8266ce..880754f3e215 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -10,6 +10,7 @@
#include "xe_gt_sriov_pf_config_types.h"
#include "xe_gt_sriov_pf_policy_types.h"
+#include "xe_gt_sriov_pf_service_types.h"
/**
* struct xe_gt_sriov_metadata - GT level per-VF metadata.
@@ -17,15 +18,19 @@
struct xe_gt_sriov_metadata {
/** @config: per-VF provisioning data. */
struct xe_gt_sriov_config config;
+ /** @version: negotiated VF/PF ABI version */
+ struct xe_gt_sriov_pf_service_version version;
};
/**
* struct xe_gt_sriov_pf - GT level PF virtualization data.
+ * @service: service data.
* @policy: policy data.
* @spare: PF-only provisioning configuration.
* @vfs: metadata for all VFs.
*/
struct xe_gt_sriov_pf {
+ struct xe_gt_sriov_pf_service service;
struct xe_gt_sriov_pf_policy policy;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 93df2d7969b3..d0ee1e0df0bd 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -245,7 +245,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
return seqno;
xe_gt_tlb_invalidation_wait(gt, seqno);
- } else if (xe_device_uc_enabled(xe)) {
+ } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
@@ -263,11 +263,15 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
}
/**
- * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
+ * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
+ * address range
+ *
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
* completion, can be NULL
- * @vma: VMA to invalidate
+ * @start: start address
+ * @end: end address
+ * @asid: address space id
*
* Issue a range based TLB invalidation if supported, if not fallback to a full
* TLB invalidation. Completion of TLB is asynchronous and caller can either use
@@ -277,17 +281,15 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
* Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
* negative error code on error.
*/
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma)
+int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ u64 start, u64 end, u32 asid)
{
struct xe_device *xe = gt_to_xe(gt);
#define MAX_TLB_INVALIDATION_LEN 7
u32 action[MAX_TLB_INVALIDATION_LEN];
int len = 0;
- xe_gt_assert(gt, vma);
-
/* Execlists not supported */
if (gt_to_xe(gt)->info.force_execlist) {
if (fence)
@@ -301,8 +303,8 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
if (!xe->info.has_range_tlb_invalidation) {
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
} else {
- u64 start = xe_vma_start(vma);
- u64 length = xe_vma_size(vma);
+ u64 orig_start = start;
+ u64 length = end - start;
u64 align, end;
if (length < SZ_4K)
@@ -315,12 +317,12 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
* address mask covering the required range.
*/
align = roundup_pow_of_two(length);
- start = ALIGN_DOWN(xe_vma_start(vma), align);
- end = ALIGN(xe_vma_end(vma), align);
+ start = ALIGN_DOWN(start, align);
+ end = ALIGN(end, align);
length = align;
while (start + length < end) {
length <<= 1;
- start = ALIGN_DOWN(xe_vma_start(vma), length);
+ start = ALIGN_DOWN(orig_start, length);
}
/*
@@ -329,16 +331,17 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
*/
if (length >= SZ_2M) {
length = max_t(u64, SZ_16M, length);
- start = ALIGN_DOWN(xe_vma_start(vma), length);
+ start = ALIGN_DOWN(orig_start, length);
}
xe_gt_assert(gt, length >= SZ_4K);
xe_gt_assert(gt, is_power_of_2(length));
- xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)));
+ xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
+ ilog2(SZ_2M) + 1)));
xe_gt_assert(gt, IS_ALIGNED(start, length));
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
- action[len++] = xe_vma_vm(vma)->usm.asid;
+ action[len++] = asid;
action[len++] = lower_32_bits(start);
action[len++] = upper_32_bits(start);
action[len++] = ilog2(length) - ilog2(SZ_4K);
@@ -350,6 +353,32 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
}
/**
+ * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
+ * @gt: graphics tile
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion, can be NULL
+ * @vma: VMA to invalidate
+ *
+ * Issue a range based TLB invalidation if supported, if not fallback to a full
+ * TLB invalidation. Completion of TLB is asynchronous and caller can either use
+ * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
+ * completion.
+ *
+ * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
+ * negative error code on error.
+ */
+int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_vma *vma)
+{
+ xe_gt_assert(gt, vma);
+
+ return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
+ xe_vma_end(vma),
+ xe_vma_vm(vma)->usm.asid);
+}
+
+/**
* xe_gt_tlb_invalidation_wait - Wait for TLB to complete
* @gt: graphics tile
* @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index fbb743d80d2c..bf3bebd9f985 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -20,6 +20,9 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
struct xe_vma *vma);
+int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ u64 start, u64 end, u32 asid);
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 3733e7a6860d..af841d801a8f 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -278,3 +278,13 @@ bool xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad)
return quad_first < (quad + 1) * dss_per_quad;
}
+
+bool xe_gt_has_geometry_dss(struct xe_gt *gt, unsigned int dss)
+{
+ return test_bit(dss, gt->fuse_topo.g_dss_mask);
+}
+
+bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss)
+{
+ return test_bit(dss, gt->fuse_topo.c_dss_mask);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index b3e357777a6e..746b325bbf6e 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -33,4 +33,7 @@ bool xe_dss_mask_empty(const xe_dss_mask_t mask);
bool
xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
+bool xe_gt_has_geometry_dss(struct xe_gt *gt, unsigned int dss);
+bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss);
+
#endif /* _XE_GT_TOPOLOGY_H_ */
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 240e7a4bbff1..0c9938e0ab8c 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -451,7 +451,7 @@ static int guc_xfer_rsa(struct xe_guc *guc)
return 0;
}
-static int guc_wait_ucode(struct xe_guc *guc)
+static void guc_wait_ucode(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
u32 status;
@@ -479,30 +479,26 @@ static int guc_wait_ucode(struct xe_guc *guc)
200000, &status, false);
if (ret) {
- xe_gt_info(gt, "GuC load failed: status = 0x%08X\n", status);
- xe_gt_info(gt, "GuC status: Reset = %u, BootROM = %#X, UKernel = %#X, MIA = %#X, Auth = %#X\n",
- REG_FIELD_GET(GS_MIA_IN_RESET, status),
- REG_FIELD_GET(GS_BOOTROM_MASK, status),
- REG_FIELD_GET(GS_UKERNEL_MASK, status),
- REG_FIELD_GET(GS_MIA_MASK, status),
- REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
-
- if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- xe_gt_info(gt, "GuC firmware signature verification failed\n");
- ret = -ENOEXEC;
- }
+ xe_gt_err(gt, "GuC load failed: status = 0x%08X\n", status);
+ xe_gt_err(gt, "GuC status: Reset = %u, BootROM = %#X, UKernel = %#X, MIA = %#X, Auth = %#X\n",
+ REG_FIELD_GET(GS_MIA_IN_RESET, status),
+ REG_FIELD_GET(GS_BOOTROM_MASK, status),
+ REG_FIELD_GET(GS_UKERNEL_MASK, status),
+ REG_FIELD_GET(GS_MIA_MASK, status),
+ REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
+
+ if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED)
+ xe_gt_err(gt, "GuC firmware signature verification failed\n");
if (REG_FIELD_GET(GS_UKERNEL_MASK, status) ==
- XE_GUC_LOAD_STATUS_EXCEPTION) {
- xe_gt_info(gt, "GuC firmware exception. EIP: %#x\n",
- xe_mmio_read32(gt, SOFT_SCRATCH(13)));
- ret = -ENXIO;
- }
+ XE_GUC_LOAD_STATUS_EXCEPTION)
+ xe_gt_err(gt, "GuC firmware exception. EIP: %#x\n",
+ xe_mmio_read32(gt, SOFT_SCRATCH(13)));
+
+ xe_device_declare_wedged(gt_to_xe(gt));
} else {
xe_gt_dbg(gt, "GuC successfully loaded\n");
}
-
- return ret;
}
static int __xe_guc_upload(struct xe_guc *guc)
@@ -532,9 +528,7 @@ static int __xe_guc_upload(struct xe_guc *guc)
goto out;
/* Wait for authentication */
- ret = guc_wait_ucode(guc);
- if (ret)
- goto out;
+ guc_wait_ucode(guc);
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
return 0;
@@ -891,17 +885,11 @@ void xe_guc_stop_prepare(struct xe_guc *guc)
XE_WARN_ON(xe_guc_pc_stop(&guc->pc));
}
-int xe_guc_stop(struct xe_guc *guc)
+void xe_guc_stop(struct xe_guc *guc)
{
- int ret;
-
xe_guc_ct_stop(&guc->ct);
- ret = xe_guc_submit_stop(guc);
- if (ret)
- return ret;
-
- return 0;
+ xe_guc_submit_stop(guc);
}
int xe_guc_start(struct xe_guc *guc)
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 94f2dc5f6f90..a3c92b74a3d5 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -35,7 +35,7 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p);
int xe_guc_reset_prepare(struct xe_guc *guc);
void xe_guc_reset_wait(struct xe_guc *guc);
void xe_guc_stop_prepare(struct xe_guc *guc);
-int xe_guc_stop(struct xe_guc *guc);
+void xe_guc_stop(struct xe_guc *guc);
int xe_guc_start(struct xe_guc *guc);
bool xe_guc_in_reset(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 1aafa486edec..6a5eb21748b1 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -9,6 +9,7 @@
#include <generated/xe_wa_oob.h>
+#include "abi/guc_actions_abi.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_guc_regs.h"
@@ -16,6 +17,7 @@
#include "xe_gt.h"
#include "xe_gt_ccs_mode.h"
#include "xe_guc.h"
+#include "xe_guc_ct.h"
#include "xe_hw_engine.h"
#include "xe_lrc.h"
#include "xe_map.h"
@@ -440,11 +442,18 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads)
static void guc_policies_init(struct xe_guc_ads *ads)
{
+ struct xe_device *xe = ads_to_xe(ads);
+ u32 global_flags = 0;
+
ads_blob_write(ads, policies.dpc_promote_time,
GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US);
ads_blob_write(ads, policies.max_num_work_items,
GLOBAL_POLICY_MAX_NUM_WI);
- ads_blob_write(ads, policies.global_flags, 0);
+
+ if (xe->wedged.mode == 2)
+ global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
+
+ ads_blob_write(ads, policies.global_flags, global_flags);
ads_blob_write(ads, policies.is_valid, 1);
}
@@ -799,3 +808,57 @@ void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
{
guc_populate_golden_lrc(ads);
}
+
+static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset)
+{
+ struct xe_guc_ct *ct = &ads_to_guc(ads)->ct;
+ u32 action[] = {
+ XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE,
+ policy_offset
+ };
+
+ return xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
+}
+
+/**
+ * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy
+ * @ads: Additional data structures object
+ *
+ * This function update the GuC's engine reset policy based on wedged.mode.
+ *
+ * Return: 0 on success, and negative error code otherwise.
+ */
+int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct guc_policies *policies;
+ struct xe_bo *bo;
+ int ret = 0;
+
+ policies = kmalloc(sizeof(*policies), GFP_KERNEL);
+ if (!policies)
+ return -ENOMEM;
+
+ policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time);
+ policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items);
+ policies->is_valid = 1;
+ if (xe->wedged.mode == 2)
+ policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
+ else
+ policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
+
+ bo = xe_managed_bo_create_from_data(xe, tile, policies, sizeof(struct guc_policies),
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT);
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ goto out;
+ }
+
+ ret = guc_ads_action_update_policies(ads, xe_bo_ggtt_addr(bo));
+out:
+ kfree(policies);
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.h b/drivers/gpu/drm/xe/xe_guc_ads.h
index 138ef6267671..2e2531779122 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.h
+++ b/drivers/gpu/drm/xe/xe_guc_ads.h
@@ -13,5 +13,6 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads);
void xe_guc_ads_populate(struct xe_guc_ads *ads);
void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads);
void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads);
+int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_db_mgr.c b/drivers/gpu/drm/xe/xe_guc_db_mgr.c
index 8d9a0287df6b..6767e8076e6b 100644
--- a/drivers/gpu/drm/xe/xe_guc_db_mgr.c
+++ b/drivers/gpu/drm/xe/xe_guc_db_mgr.c
@@ -106,7 +106,8 @@ int xe_guc_db_mgr_init(struct xe_guc_db_mgr *dbm, unsigned int count)
if (ret)
return ret;
done:
- xe_gt_dbg(dbm_to_gt(dbm), "using %u doorbell(s)\n", dbm->count);
+ xe_gt_dbg(dbm_to_gt(dbm), "using %u doorbell%s\n",
+ dbm->count, str_plural(dbm->count));
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.c b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
index 0fb7c6b78c31..cd0549d0ef89 100644
--- a/drivers/gpu/drm/xe/xe_guc_id_mgr.c
+++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
@@ -97,7 +97,8 @@ int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int limit)
if (ret)
return ret;
- xe_gt_info(idm_to_gt(idm), "using %u GUC ID(s)\n", idm->total);
+ xe_gt_info(idm_to_gt(idm), "using %u GUC ID%s\n",
+ idm->total, str_plural(idm->total));
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 509649d0e65e..8fc757900ed1 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -902,6 +902,9 @@ static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
return;
}
+ if (xe_device_wedged(xe))
+ return;
+
XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
XE_WARN_ON(xe_guc_pc_stop(pc));
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
index c0a2d8d5d3b3..c3bbaf474f9a 100644
--- a/drivers/gpu/drm/xe/xe_guc_relay.c
+++ b/drivers/gpu/drm/xe/xe_guc_relay.c
@@ -19,6 +19,7 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_sriov_printk.h"
+#include "xe_gt_sriov_pf_service.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_guc_hxg_helpers.h"
@@ -664,6 +665,7 @@ static int relay_testloop_action_handler(struct xe_guc_relay *relay, u32 origin,
static int relay_action_handler(struct xe_guc_relay *relay, u32 origin,
const u32 *msg, u32 len, u32 *response, u32 size)
{
+ struct xe_gt *gt = relay_to_gt(relay);
u32 type;
int ret;
@@ -674,8 +676,10 @@ static int relay_action_handler(struct xe_guc_relay *relay, u32 origin,
type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]);
- /* XXX: PF services will be added later */
- ret = -EOPNOTSUPP;
+ if (IS_SRIOV_PF(relay_to_xe(relay)))
+ ret = xe_gt_sriov_pf_service_process_request(gt, origin, msg, len, response, size);
+ else
+ ret = -EOPNOTSUPP;
if (type == GUC_HXG_TYPE_EVENT)
relay_assert(relay, ret <= 0);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index c7d38469fb46..d274a139010b 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -52,13 +52,14 @@ exec_queue_to_guc(struct xe_exec_queue *q)
* engine done being processed).
*/
#define EXEC_QUEUE_STATE_REGISTERED (1 << 0)
-#define ENGINE_STATE_ENABLED (1 << 1)
-#define EXEC_QUEUE_STATE_PENDING_ENABLE (1 << 2)
+#define EXEC_QUEUE_STATE_ENABLED (1 << 1)
+#define EXEC_QUEUE_STATE_PENDING_ENABLE (1 << 2)
#define EXEC_QUEUE_STATE_PENDING_DISABLE (1 << 3)
#define EXEC_QUEUE_STATE_DESTROYED (1 << 4)
-#define ENGINE_STATE_SUSPENDED (1 << 5)
-#define EXEC_QUEUE_STATE_RESET (1 << 6)
-#define ENGINE_STATE_KILLED (1 << 7)
+#define EXEC_QUEUE_STATE_SUSPENDED (1 << 5)
+#define EXEC_QUEUE_STATE_RESET (1 << 6)
+#define EXEC_QUEUE_STATE_KILLED (1 << 7)
+#define EXEC_QUEUE_STATE_WEDGED (1 << 8)
static bool exec_queue_registered(struct xe_exec_queue *q)
{
@@ -77,17 +78,17 @@ static void clear_exec_queue_registered(struct xe_exec_queue *q)
static bool exec_queue_enabled(struct xe_exec_queue *q)
{
- return atomic_read(&q->guc->state) & ENGINE_STATE_ENABLED;
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED;
}
static void set_exec_queue_enabled(struct xe_exec_queue *q)
{
- atomic_or(ENGINE_STATE_ENABLED, &q->guc->state);
+ atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
}
static void clear_exec_queue_enabled(struct xe_exec_queue *q)
{
- atomic_and(~ENGINE_STATE_ENABLED, &q->guc->state);
+ atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
}
static bool exec_queue_pending_enable(struct xe_exec_queue *q)
@@ -142,17 +143,17 @@ static void set_exec_queue_banned(struct xe_exec_queue *q)
static bool exec_queue_suspended(struct xe_exec_queue *q)
{
- return atomic_read(&q->guc->state) & ENGINE_STATE_SUSPENDED;
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED;
}
static void set_exec_queue_suspended(struct xe_exec_queue *q)
{
- atomic_or(ENGINE_STATE_SUSPENDED, &q->guc->state);
+ atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
}
static void clear_exec_queue_suspended(struct xe_exec_queue *q)
{
- atomic_and(~ENGINE_STATE_SUSPENDED, &q->guc->state);
+ atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
}
static bool exec_queue_reset(struct xe_exec_queue *q)
@@ -167,17 +168,28 @@ static void set_exec_queue_reset(struct xe_exec_queue *q)
static bool exec_queue_killed(struct xe_exec_queue *q)
{
- return atomic_read(&q->guc->state) & ENGINE_STATE_KILLED;
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED;
}
static void set_exec_queue_killed(struct xe_exec_queue *q)
{
- atomic_or(ENGINE_STATE_KILLED, &q->guc->state);
+ atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state);
}
-static bool exec_queue_killed_or_banned(struct xe_exec_queue *q)
+static bool exec_queue_wedged(struct xe_exec_queue *q)
{
- return exec_queue_killed(q) || exec_queue_banned(q);
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED;
+}
+
+static void set_exec_queue_wedged(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state);
+}
+
+static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
+{
+ return exec_queue_banned(q) || (atomic_read(&q->guc->state) &
+ (EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_KILLED));
}
#ifdef CONFIG_PROVE_LOCKING
@@ -240,6 +252,17 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
free_submit_wq(guc);
}
+static void guc_submit_wedged_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_guc *guc = arg;
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ if (exec_queue_wedged(q))
+ xe_exec_queue_put(q);
+}
+
static const struct xe_exec_queue_ops guc_exec_queue_ops;
static void primelockdep(struct xe_guc *guc)
@@ -250,7 +273,6 @@ static void primelockdep(struct xe_guc *guc)
fs_reclaim_acquire(GFP_KERNEL);
mutex_lock(&guc->submission_state.lock);
- might_lock(&guc->submission_state.suspend.lock);
mutex_unlock(&guc->submission_state.lock);
fs_reclaim_release(GFP_KERNEL);
@@ -278,9 +300,6 @@ int xe_guc_submit_init(struct xe_guc *guc)
xa_init(&guc->submission_state.exec_queue_lookup);
- spin_lock_init(&guc->submission_state.suspend.lock);
- guc->submission_state.suspend.context = dma_fence_context_alloc(1);
-
primelockdep(guc);
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
@@ -430,9 +449,9 @@ static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue
xe_map_wr_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
field_, val_)
-static void __register_mlrc_engine(struct xe_guc *guc,
- struct xe_exec_queue *q,
- struct guc_ctxt_registration_info *info)
+static void __register_mlrc_exec_queue(struct xe_guc *guc,
+ struct xe_exec_queue *q,
+ struct guc_ctxt_registration_info *info)
{
#define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
struct xe_device *xe = guc_to_xe(guc);
@@ -469,8 +488,8 @@ static void __register_mlrc_engine(struct xe_guc *guc,
xe_guc_ct_send(&guc->ct, action, len, 0, 0);
}
-static void __register_engine(struct xe_guc *guc,
- struct guc_ctxt_registration_info *info)
+static void __register_exec_queue(struct xe_guc *guc,
+ struct guc_ctxt_registration_info *info)
{
u32 action[] = {
XE_GUC_ACTION_REGISTER_CONTEXT,
@@ -490,7 +509,7 @@ static void __register_engine(struct xe_guc *guc,
xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
-static void register_engine(struct xe_exec_queue *q)
+static void register_exec_queue(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
@@ -538,9 +557,9 @@ static void register_engine(struct xe_exec_queue *q)
set_exec_queue_registered(q);
trace_xe_exec_queue_register(q);
if (xe_exec_queue_is_parallel(q))
- __register_mlrc_engine(guc, q, &info);
+ __register_mlrc_exec_queue(guc, q, &info);
else
- __register_engine(guc, &info);
+ __register_exec_queue(guc, &info);
init_policies(guc, q);
}
@@ -708,9 +727,9 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
trace_xe_sched_job_run(job);
- if (!exec_queue_killed_or_banned(q) && !xe_sched_job_is_error(job)) {
+ if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
- register_engine(q);
+ register_exec_queue(q);
if (!lr) /* LR jobs are emitted in the exec IOCTL */
q->ring_ops->emit_job(job);
submit_exec_queue(q);
@@ -844,6 +863,40 @@ static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
xe_sched_tdr_queue_imm(&q->guc->sched);
}
+static bool guc_submit_hint_wedged(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q;
+ unsigned long index;
+ int err;
+
+ if (xe->wedged.mode != 2)
+ return false;
+
+ if (xe_device_wedged(xe))
+ return true;
+
+ xe_device_declare_wedged(xe);
+
+ xe_guc_submit_reset_prepare(guc);
+ xe_guc_ct_stop(&guc->ct);
+
+ err = drmm_add_action_or_reset(&guc_to_xe(guc)->drm,
+ guc_submit_wedged_fini, guc);
+ if (err) {
+ drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
+ return true; /* Device is wedged anyway */
+ }
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ if (xe_exec_queue_get_unless_zero(q))
+ set_exec_queue_wedged(q);
+ mutex_unlock(&guc->submission_state.lock);
+
+ return true;
+}
+
static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
{
struct xe_guc_exec_queue *ge =
@@ -852,10 +905,13 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
struct xe_gpu_scheduler *sched = &ge->sched;
+ bool wedged;
xe_assert(xe, xe_exec_queue_is_lr(q));
trace_xe_exec_queue_lr_cleanup(q);
+ wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
+
/* Kill the run_job / process_msg entry points */
xe_sched_submission_stop(sched);
@@ -870,7 +926,7 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
* xe_guc_deregister_done_handler() which treats it as an unexpected
* state.
*/
- if (exec_queue_registered(q) && !exec_queue_destroyed(q)) {
+ if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
struct xe_guc *guc = exec_queue_to_guc(q);
int ret;
@@ -905,6 +961,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
struct xe_device *xe = guc_to_xe(exec_queue_to_guc(q));
int err = -ETIME;
int i = 0;
+ bool wedged;
/*
* TDR has fired before free job worker. Common if exec queue
@@ -928,6 +985,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
trace_xe_sched_job_timedout(job);
+ wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
+
/* Kill the run_job entry point */
xe_sched_submission_stop(sched);
@@ -935,8 +994,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* Kernel jobs should never fail, nor should VM jobs if they do
* somethings has gone wrong and the GT needs a reset
*/
- if (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
- (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q))) {
+ if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
+ (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) {
if (!xe_sched_invalidate_job(job, 2)) {
xe_sched_add_pending_job(sched, job);
xe_sched_submission_start(sched);
@@ -946,7 +1005,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
}
/* Engine state now stable, disable scheduling if needed */
- if (exec_queue_registered(q)) {
+ if (!wedged && exec_queue_registered(q)) {
struct xe_guc *guc = exec_queue_to_guc(q);
int ret;
@@ -989,6 +1048,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
*/
xe_sched_add_pending_job(sched, job);
xe_sched_submission_start(sched);
+
xe_guc_exec_queue_trigger_cleanup(q);
/* Mark all outstanding jobs as bad, thus completing them */
@@ -1028,7 +1088,7 @@ static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
/* We must block on kernel engines so slabs are empty on driver unload */
- if (q->flags & EXEC_QUEUE_FLAG_PERMANENT)
+ if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
__guc_exec_queue_fini_async(&q->guc->fini_async);
else
queue_work(system_wq, &q->guc->fini_async);
@@ -1063,7 +1123,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
{
- return !exec_queue_killed_or_banned(q) && exec_queue_registered(q);
+ return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q);
}
static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *msg)
@@ -1274,7 +1334,7 @@ static void guc_exec_queue_fini(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
- if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT))
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
guc_exec_queue_add_msg(q, msg, CLEANUP);
else
__guc_exec_queue_fini(exec_queue_to_guc(q), q);
@@ -1285,7 +1345,8 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
{
struct xe_sched_msg *msg;
- if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q))
+ if (q->sched_props.priority == priority ||
+ exec_queue_killed_or_banned_or_wedged(q))
return 0;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -1303,7 +1364,7 @@ static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_u
struct xe_sched_msg *msg;
if (q->sched_props.timeslice_us == timeslice_us ||
- exec_queue_killed_or_banned(q))
+ exec_queue_killed_or_banned_or_wedged(q))
return 0;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -1322,7 +1383,7 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
struct xe_sched_msg *msg;
if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
- exec_queue_killed_or_banned(q))
+ exec_queue_killed_or_banned_or_wedged(q))
return 0;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -1339,7 +1400,7 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
- if (exec_queue_killed_or_banned(q) || q->guc->suspend_pending)
+ if (exec_queue_killed_or_banned_or_wedged(q) || q->guc->suspend_pending)
return -EINVAL;
q->guc->suspend_pending = true;
@@ -1410,7 +1471,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
set_exec_queue_suspended(q);
suspend_fence_signal(q);
}
- atomic_and(EXEC_QUEUE_STATE_DESTROYED | ENGINE_STATE_SUSPENDED,
+ atomic_and(EXEC_QUEUE_STATE_DESTROYED | EXEC_QUEUE_STATE_SUSPENDED,
&q->guc->state);
q->guc->resume_time = 0;
trace_xe_exec_queue_stop(q);
@@ -1458,7 +1519,7 @@ void xe_guc_submit_reset_wait(struct xe_guc *guc)
wait_event(guc->ct.wq, !guc_read_stopped(guc));
}
-int xe_guc_submit_stop(struct xe_guc *guc)
+void xe_guc_submit_stop(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
@@ -1478,14 +1539,13 @@ int xe_guc_submit_stop(struct xe_guc *guc)
* creation which is protected by guc->submission_state.lock.
*/
- return 0;
}
static void guc_exec_queue_start(struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
- if (!exec_queue_killed_or_banned(q)) {
+ if (!exec_queue_killed_or_banned_or_wedged(q)) {
int i;
trace_xe_exec_queue_resubmit(q);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index fad0421ead36..4275b7da9df5 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -16,7 +16,7 @@ int xe_guc_submit_init(struct xe_guc *guc);
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);
-int xe_guc_submit_stop(struct xe_guc *guc);
+void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 82bd93f7867d..546ac6350a31 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -72,15 +72,6 @@ struct xe_guc {
atomic_t stopped;
/** @submission_state.lock: protects submission state */
struct mutex lock;
- /** @submission_state.suspend: suspend fence state */
- struct {
- /** @submission_state.suspend.lock: suspend fences lock */
- spinlock_t lock;
- /** @submission_state.suspend.context: suspend fences context */
- u64 context;
- /** @submission_state.suspend.seqno: suspend fences seqno */
- u32 seqno;
- } suspend;
#ifdef CONFIG_PROVE_LOCKING
#define NUM_SUBMIT_WQ 256
/** @submission_state.submit_wq_pool: submission ordered workqueues pool */
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 455f375c1cbd..4cc757457e01 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -18,6 +18,7 @@
#include "xe_gt.h"
#include "xe_gt_ccs_mode.h"
#include "xe_gt_printk.h"
+#include "xe_gt_mcr.h"
#include "xe_gt_topology.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
@@ -766,6 +767,57 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
xe_hw_fence_irq_run(hwe->fence_irq);
}
+static bool
+is_slice_common_per_gslice(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) >= 1255;
+}
+
+static void
+xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe,
+ struct xe_hw_engine_snapshot *snapshot)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int dss;
+ u16 group, instance;
+
+ snapshot->reg.instdone.ring = hw_engine_mmio_read32(hwe, RING_INSTDONE(0));
+
+ if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER)
+ return;
+
+ if (is_slice_common_per_gslice(xe) == false) {
+ snapshot->reg.instdone.slice_common[0] =
+ xe_mmio_read32(gt, SC_INSTDONE);
+ snapshot->reg.instdone.slice_common_extra[0] =
+ xe_mmio_read32(gt, SC_INSTDONE_EXTRA);
+ snapshot->reg.instdone.slice_common_extra2[0] =
+ xe_mmio_read32(gt, SC_INSTDONE_EXTRA2);
+ } else {
+ for_each_geometry_dss(dss, gt, group, instance) {
+ snapshot->reg.instdone.slice_common[dss] =
+ xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE, group, instance);
+ snapshot->reg.instdone.slice_common_extra[dss] =
+ xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA, group, instance);
+ snapshot->reg.instdone.slice_common_extra2[dss] =
+ xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA2, group, instance);
+ }
+ }
+
+ for_each_geometry_dss(dss, gt, group, instance) {
+ snapshot->reg.instdone.sampler[dss] =
+ xe_gt_mcr_unicast_read(gt, SAMPLER_INSTDONE, group, instance);
+ snapshot->reg.instdone.row[dss] =
+ xe_gt_mcr_unicast_read(gt, ROW_INSTDONE, group, instance);
+
+ if (GRAPHICS_VERx100(xe) >= 1255)
+ snapshot->reg.instdone.geom_svg[dss] =
+ xe_gt_mcr_unicast_read(gt, XEHPG_INSTDONE_GEOM_SVGUNIT,
+ group, instance);
+ }
+}
+
/**
* xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
* @hwe: Xe HW Engine.
@@ -780,6 +832,7 @@ struct xe_hw_engine_snapshot *
xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
{
struct xe_hw_engine_snapshot *snapshot;
+ size_t len;
u64 val;
if (!xe_hw_engine_is_valid(hwe))
@@ -790,8 +843,30 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
if (!snapshot)
return NULL;
+ /* Because XE_MAX_DSS_FUSE_BITS is defined in xe_gt_types.h and it
+ * includes xe_hw_engine_types.h the length of this 3 registers can't be
+ * set in struct xe_hw_engine_snapshot, so here doing additional
+ * allocations.
+ */
+ len = (XE_MAX_DSS_FUSE_BITS * sizeof(u32));
+ snapshot->reg.instdone.slice_common = kzalloc(len, GFP_ATOMIC);
+ snapshot->reg.instdone.slice_common_extra = kzalloc(len, GFP_ATOMIC);
+ snapshot->reg.instdone.slice_common_extra2 = kzalloc(len, GFP_ATOMIC);
+ snapshot->reg.instdone.sampler = kzalloc(len, GFP_ATOMIC);
+ snapshot->reg.instdone.row = kzalloc(len, GFP_ATOMIC);
+ snapshot->reg.instdone.geom_svg = kzalloc(len, GFP_ATOMIC);
+ if (!snapshot->reg.instdone.slice_common ||
+ !snapshot->reg.instdone.slice_common_extra ||
+ !snapshot->reg.instdone.slice_common_extra2 ||
+ !snapshot->reg.instdone.sampler ||
+ !snapshot->reg.instdone.row ||
+ !snapshot->reg.instdone.geom_svg) {
+ xe_hw_engine_snapshot_free(snapshot);
+ return NULL;
+ }
+
snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
- snapshot->class = hwe->class;
+ snapshot->hwe = hwe;
snapshot->logical_instance = hwe->logical_instance;
snapshot->forcewake.domain = hwe->domain;
snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
@@ -841,13 +916,57 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0));
snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0));
snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0));
+ xe_hw_engine_snapshot_instdone_capture(hwe, snapshot);
- if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
+ if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE);
return snapshot;
}
+static void
+xe_hw_engine_snapshot_instdone_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p)
+{
+ struct xe_gt *gt = snapshot->hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ u16 group, instance;
+ unsigned int dss;
+
+ drm_printf(p, "\tRING_INSTDONE: 0x%08x\n", snapshot->reg.instdone.ring);
+
+ if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER)
+ return;
+
+ if (is_slice_common_per_gslice(xe) == false) {
+ drm_printf(p, "\tSC_INSTDONE[0]: 0x%08x\n",
+ snapshot->reg.instdone.slice_common[0]);
+ drm_printf(p, "\tSC_INSTDONE_EXTRA[0]: 0x%08x\n",
+ snapshot->reg.instdone.slice_common_extra[0]);
+ drm_printf(p, "\tSC_INSTDONE_EXTRA2[0]: 0x%08x\n",
+ snapshot->reg.instdone.slice_common_extra2[0]);
+ } else {
+ for_each_geometry_dss(dss, gt, group, instance) {
+ drm_printf(p, "\tSC_INSTDONE[%u]: 0x%08x\n", dss,
+ snapshot->reg.instdone.slice_common[dss]);
+ drm_printf(p, "\tSC_INSTDONE_EXTRA[%u]: 0x%08x\n", dss,
+ snapshot->reg.instdone.slice_common_extra[dss]);
+ drm_printf(p, "\tSC_INSTDONE_EXTRA2[%u]: 0x%08x\n", dss,
+ snapshot->reg.instdone.slice_common_extra2[dss]);
+ }
+ }
+
+ for_each_geometry_dss(dss, gt, group, instance) {
+ drm_printf(p, "\tSAMPLER_INSTDONE[%u]: 0x%08x\n", dss,
+ snapshot->reg.instdone.sampler[dss]);
+ drm_printf(p, "\tROW_INSTDONE[%u]: 0x%08x\n", dss,
+ snapshot->reg.instdone.row[dss]);
+
+ if (GRAPHICS_VERx100(xe) >= 1255)
+ drm_printf(p, "\tINSTDONE_GEOM_SVGUNIT[%u]: 0x%08x\n",
+ dss, snapshot->reg.instdone.geom_svg[dss]);
+ }
+}
+
/**
* xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
* @snapshot: Xe HW Engine snapshot object.
@@ -887,9 +1006,12 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr);
drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd);
drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr);
- if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
+ xe_hw_engine_snapshot_instdone_print(snapshot, p);
+
+ if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
drm_printf(p, "\tRCU_MODE: 0x%08x\n",
snapshot->reg.rcu_mode);
+ drm_puts(p, "\n");
}
/**
@@ -904,6 +1026,12 @@ void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
if (!snapshot)
return;
+ kfree(snapshot->reg.instdone.slice_common);
+ kfree(snapshot->reg.instdone.slice_common_extra);
+ kfree(snapshot->reg.instdone.slice_common_extra2);
+ kfree(snapshot->reg.instdone.sampler);
+ kfree(snapshot->reg.instdone.row);
+ kfree(snapshot->reg.instdone.geom_svg);
kfree(snapshot->name);
kfree(snapshot);
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index d7f828c76cc5..9f9755e31b9f 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -158,8 +158,8 @@ struct xe_hw_engine {
struct xe_hw_engine_snapshot {
/** @name: name of the hw engine */
char *name;
- /** @class: class of this hw engine */
- enum xe_engine_class class;
+ /** @hwe: hw engine */
+ struct xe_hw_engine *hwe;
/** @logical_instance: logical instance of this hw engine */
u16 logical_instance;
/** @forcewake: Force Wake information snapshot */
@@ -211,6 +211,22 @@ struct xe_hw_engine_snapshot {
u32 ipehr;
/** @reg.rcu_mode: RCU_MODE */
u32 rcu_mode;
+ struct {
+ /** @reg.instdone.ring: RING_INSTDONE */
+ u32 ring;
+ /** @reg.instdone.slice_common: SC_INSTDONE */
+ u32 *slice_common;
+ /** @reg.instdone.slice_common_extra: SC_INSTDONE_EXTRA */
+ u32 *slice_common_extra;
+ /** @reg.instdone.slice_common_extra2: SC_INSTDONE_EXTRA2 */
+ u32 *slice_common_extra2;
+ /** @reg.instdone.sampler: SAMPLER_INSTDONE */
+ u32 *sampler;
+ /** @reg.instdone.row: ROW_INSTDONE */
+ u32 *row;
+ /** @reg.instdone.geom_svg: INSTDONE_GEOM_SVGUNIT */
+ u32 *geom_svg;
+ } instdone;
} reg;
};
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 615bbc372ac6..2066d34ddf0b 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -1382,7 +1382,7 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
if (!snapshot->lrc_snapshot)
goto put_bo;
- dma_resv_lock(bo->ttm.base.resv, NULL);
+ xe_bo_lock(bo, false);
if (!ttm_bo_vmap(&bo->ttm, &src)) {
xe_map_memcpy_from(xe_bo_device(bo),
snapshot->lrc_snapshot, &src, snapshot->lrc_offset,
@@ -1392,7 +1392,7 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
kvfree(snapshot->lrc_snapshot);
snapshot->lrc_snapshot = NULL;
}
- dma_resv_unlock(bo->ttm.base.resv);
+ xe_bo_unlock(bo);
put_bo:
xe_bo_put(bo);
}
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 334637511e75..2b18e8149ec3 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -423,41 +423,33 @@ int xe_mmio_init(struct xe_device *xe)
u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
+ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+ return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
}
u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
+ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+ return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
}
void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
{
struct xe_tile *tile = gt_to_tile(gt);
+ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+ writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
}
u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
+ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
-
- return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+ return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
}
u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
@@ -486,10 +478,9 @@ bool xe_mmio_in_range(const struct xe_gt *gt,
const struct xe_mmio_range *range,
struct xe_reg reg)
{
- if (reg.addr < gt->mmio.adj_limit)
- reg.addr += gt->mmio.adj_offset;
+ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
- return range && reg.addr >= range->start && reg.addr <= range->end;
+ return range && addr >= range->start && addr <= range->end;
}
/**
@@ -519,10 +510,11 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
u32 ldw, udw, oldudw, retries;
- if (reg.addr < gt->mmio.adj_limit) {
- reg.addr += gt->mmio.adj_offset;
- reg_udw.addr += gt->mmio.adj_offset;
- }
+ reg.addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr);
+
+ /* we shouldn't adjust just one register address */
+ xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4);
oldudw = xe_mmio_read32(gt, reg_udw);
for (retries = 5; retries; --retries) {
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index a3cd7b3036c7..445ec6a0753e 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -36,4 +36,11 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic);
+static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr)
+{
+ if (addr < gt->mmio.adj_limit)
+ addr += gt->mmio.adj_offset;
+ return addr;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index ceb8345cbca6..3edeb30d5ccb 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -17,6 +17,7 @@ struct xe_modparam xe_modparam = {
.enable_display = true,
.guc_log_level = 5,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
+ .wedged_mode = 1,
/* the rest are 0 by default */
};
@@ -55,6 +56,10 @@ MODULE_PARM_DESC(max_vfs,
"(0 = no VFs [default]; N = allow up to N VFs)");
#endif
+module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);
+MODULE_PARM_DESC(wedged_mode,
+ "Module's default policy for the wedged mode - 0=never, 1=upon-critical-errors[default], 2=upon-any-hang");
+
struct init_funcs {
int (*init)(void);
void (*exit)(void);
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index b369984f08ec..61a0d28a28c8 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -21,6 +21,7 @@ struct xe_modparam {
#ifdef CONFIG_PCI_IOV
unsigned int max_vfs;
#endif
+ int wedged_mode;
};
extern struct xe_modparam xe_modparam;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 37fbeda12d3b..c1831106ea4b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -69,7 +69,7 @@
*/
#ifdef CONFIG_LOCKDEP
-struct lockdep_map xe_pm_runtime_lockdep_map = {
+static struct lockdep_map xe_pm_runtime_lockdep_map = {
.name = "xe_pm_runtime_lockdep_map"
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 7d50c6e89d8e..5b243b7feb59 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -23,11 +23,19 @@ static void preempt_fence_work_func(struct work_struct *w)
q->ops->suspend_wait(q);
dma_fence_signal(&pfence->base);
- dma_fence_end_signalling(cookie);
-
+ /*
+ * Opt for keep everything in the fence critical section. This looks really strange since we
+ * have just signalled the fence, however the preempt fences are all signalled via single
+ * global ordered-wq, therefore anything that happens in this callback can easily block
+ * progress on the entire wq, which itself may prevent other published preempt fences from
+ * ever signalling. Therefore try to keep everything here in the callback in the fence
+ * critical section. For example if something below grabs a scary lock like vm->lock,
+ * lockdep should complain since we also hold that lock whilst waiting on preempt fences to
+ * complete.
+ */
xe_vm_queue_rebind_worker(q->vm);
-
xe_exec_queue_put(q);
+ dma_fence_end_signalling(cookie);
}
static const char *
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 5b7930f46cf3..8d3765d3351e 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1075,10 +1075,12 @@ static const struct xe_migrate_pt_update_ops userptr_bind_ops = {
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
struct xe_gt *gt;
- struct xe_vma *vma;
struct dma_fence *fence;
struct dma_fence_cb cb;
struct work_struct work;
+ u64 start;
+ u64 end;
+ u32 asid;
};
static const char *
@@ -1121,13 +1123,14 @@ static void invalidation_fence_work_func(struct work_struct *w)
container_of(w, struct invalidation_fence, work);
trace_xe_gt_tlb_invalidation_fence_work_func(&ifence->base);
- xe_gt_tlb_invalidation_vma(ifence->gt, &ifence->base, ifence->vma);
+ xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start,
+ ifence->end, ifence->asid);
}
static int invalidation_fence_init(struct xe_gt *gt,
struct invalidation_fence *ifence,
struct dma_fence *fence,
- struct xe_vma *vma)
+ u64 start, u64 end, u32 asid)
{
int ret;
@@ -1144,7 +1147,9 @@ static int invalidation_fence_init(struct xe_gt *gt,
dma_fence_get(&ifence->base.base); /* Ref for caller */
ifence->fence = fence;
ifence->gt = gt;
- ifence->vma = vma;
+ ifence->start = start;
+ ifence->end = end;
+ ifence->asid = asid;
INIT_WORK(&ifence->work, invalidation_fence_work_func);
ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb);
@@ -1295,8 +1300,11 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
/* TLB invalidation must be done before signaling rebind */
if (ifence) {
- int err = invalidation_fence_init(tile->primary_gt, ifence, fence,
- vma);
+ int err = invalidation_fence_init(tile->primary_gt,
+ ifence, fence,
+ xe_vma_start(vma),
+ xe_vma_end(vma),
+ xe_vma_vm(vma)->usm.asid);
if (err) {
dma_fence_put(fence);
kfree(ifence);
@@ -1641,7 +1649,10 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
dma_fence_wait(fence, false);
/* TLB invalidation must be done before signaling unbind */
- err = invalidation_fence_init(tile->primary_gt, ifence, fence, vma);
+ err = invalidation_fence_init(tile->primary_gt, ifence, fence,
+ xe_vma_start(vma),
+ xe_vma_end(vma),
+ xe_vma_vm(vma)->usm.asid);
if (err) {
dma_fence_put(fence);
kfree(ifence);
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 65f1f1628235..2883d9aca404 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -339,6 +339,21 @@ err_out:
}
/**
+ * __xe_sync_ufence_get() - Get user fence from user fence
+ * @ufence: input user fence
+ *
+ * Get a user fence reference from user fence
+ *
+ * Return: xe_user_fence pointer with reference
+ */
+struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence)
+{
+ user_fence_get(ufence);
+
+ return ufence;
+}
+
+/**
* xe_sync_ufence_get() - Get user fence from sync
* @sync: input sync
*
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 3e03396af2c6..006dbf780793 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -37,6 +37,7 @@ static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
return !!sync->ufence;
}
+struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence);
struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
void xe_sync_ufence_put(struct xe_user_fence *ufence);
int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 4feb35c95a1c..0f6cfe06e635 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -215,13 +215,13 @@ void xe_uc_stop_prepare(struct xe_uc *uc)
xe_guc_stop_prepare(&uc->guc);
}
-int xe_uc_stop(struct xe_uc *uc)
+void xe_uc_stop(struct xe_uc *uc)
{
/* GuC submission not enabled, nothing to do */
if (!xe_device_uc_enabled(uc_to_xe(uc)))
- return 0;
+ return;
- return xe_guc_stop(&uc->guc);
+ xe_guc_stop(&uc->guc);
}
int xe_uc_start(struct xe_uc *uc)
@@ -247,17 +247,13 @@ again:
int xe_uc_suspend(struct xe_uc *uc)
{
- int ret;
-
/* GuC submission not enabled, nothing to do */
if (!xe_device_uc_enabled(uc_to_xe(uc)))
return 0;
uc_reset_wait(uc);
- ret = xe_uc_stop(uc);
- if (ret)
- return ret;
+ xe_uc_stop(uc);
return xe_guc_suspend(&uc->guc);
}
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index e4d4e3c99f0e..5dfa7725483d 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -16,7 +16,7 @@ int xe_uc_fini_hw(struct xe_uc *uc);
void xe_uc_gucrc_disable(struct xe_uc *uc);
int xe_uc_reset_prepare(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
-int xe_uc_stop(struct xe_uc *uc);
+void xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 85d6f359142d..dfd31b346021 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -315,19 +315,23 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
-static void xe_vm_kill(struct xe_vm *vm)
+static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
{
struct xe_exec_queue *q;
lockdep_assert_held(&vm->lock);
- xe_vm_lock(vm, false);
+ if (unlocked)
+ xe_vm_lock(vm, false);
+
vm->flags |= XE_VM_FLAG_BANNED;
trace_xe_vm_kill(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
q->ops->kill(q);
- xe_vm_unlock(vm);
+
+ if (unlocked)
+ xe_vm_unlock(vm);
/* TODO: Inform user the VM is banned */
}
@@ -557,7 +561,7 @@ out_unlock_outer:
if (err) {
drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
- xe_vm_kill(vm);
+ xe_vm_kill(vm, true);
}
up_write(&vm->lock);
@@ -708,37 +712,116 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
}
-static struct dma_fence *
-xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool first_op, bool last_op);
+static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
+ u8 tile_mask)
+{
+ INIT_LIST_HEAD(&op->link);
+ op->tile_mask = tile_mask;
+ op->base.op = DRM_GPUVA_OP_MAP;
+ op->base.map.va.addr = vma->gpuva.va.addr;
+ op->base.map.va.range = vma->gpuva.va.range;
+ op->base.map.gem.obj = vma->gpuva.gem.obj;
+ op->base.map.gem.offset = vma->gpuva.gem.offset;
+ op->map.vma = vma;
+ op->map.immediate = true;
+ op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
+ op->map.is_null = xe_vma_is_null(vma);
+}
+
+static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
+ u8 tile_mask)
+{
+ struct xe_vma_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ xe_vm_populate_rebind(op, vma, tile_mask);
+ list_add_tail(&op->link, &vops->list);
+
+ return 0;
+}
+
+static struct dma_fence *ops_execute(struct xe_vm *vm,
+ struct xe_vma_ops *vops);
+static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
+ struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs);
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
{
struct dma_fence *fence;
struct xe_vma *vma, *next;
+ struct xe_vma_ops vops;
+ struct xe_vma_op *op, *next_op;
+ int err;
lockdep_assert_held(&vm->lock);
- if (xe_vm_in_lr_mode(vm) && !rebind_worker)
+ if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
+ list_empty(&vm->rebind_list))
return 0;
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+
xe_vm_assert_held(vm);
- list_for_each_entry_safe(vma, next, &vm->rebind_list,
- combined_links.rebind) {
+ list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
xe_assert(vm->xe, vma->tile_present);
- list_del_init(&vma->combined_links.rebind);
if (rebind_worker)
trace_xe_vma_rebind_worker(vma);
else
trace_xe_vma_rebind_exec(vma);
- fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
- if (IS_ERR(fence))
- return PTR_ERR(fence);
+
+ err = xe_vm_ops_add_rebind(&vops, vma,
+ vma->tile_present);
+ if (err)
+ goto free_ops;
+ }
+
+ fence = ops_execute(vm, &vops);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ } else {
dma_fence_put(fence);
+ list_for_each_entry_safe(vma, next, &vm->rebind_list,
+ combined_links.rebind)
+ list_del_init(&vma->combined_links.rebind);
+ }
+free_ops:
+ list_for_each_entry_safe(op, next_op, &vops.list, link) {
+ list_del(&op->link);
+ kfree(op);
}
- return 0;
+ return err;
+}
+
+struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
+{
+ struct dma_fence *fence = NULL;
+ struct xe_vma_ops vops;
+ struct xe_vma_op *op, *next_op;
+ int err;
+
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+
+ err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
+ if (err)
+ return ERR_PTR(err);
+
+ fence = ops_execute(vm, &vops);
+
+ list_for_each_entry_safe(op, next_op, &vops.list, link) {
+ list_del(&op->link);
+ kfree(op);
+ }
+
+ return fence;
}
static void xe_vma_free(struct xe_vma *vma)
@@ -863,11 +946,6 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
vma->ufence = NULL;
}
- if (vma->ufence) {
- xe_sync_ufence_put(vma->ufence);
- vma->ufence = NULL;
- }
-
if (xe_vma_is_userptr(vma)) {
struct xe_userptr_vma *uvma = to_userptr_vma(vma);
struct xe_userptr *userptr = &uvma->userptr;
@@ -1178,6 +1256,8 @@ static const struct xe_pt_ops xelp_pt_ops = {
.pde_encode_bo = xelp_pde_encode_bo,
};
+static void vm_destroy_work_func(struct work_struct *w);
+
/**
* xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
* given tile and vm.
@@ -1257,6 +1337,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
+ INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
+
INIT_LIST_HEAD(&vm->preempt.exec_queues);
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
@@ -1279,7 +1361,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
drm_gem_object_put(vm_resv_obj);
- err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
+ err = xe_vm_lock(vm, true);
if (err)
goto err_close;
@@ -1323,7 +1405,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
}
- dma_resv_unlock(xe_vm_resv(vm));
+ xe_vm_unlock(vm);
/* Kernel migration VM shouldn't have a circular loop.. */
if (!(flags & XE_VM_FLAG_MIGRATION)) {
@@ -1365,7 +1447,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
return vm;
err_unlock_close:
- dma_resv_unlock(xe_vm_resv(vm));
+ xe_vm_unlock(vm);
err_close:
xe_vm_close_and_put(vm);
return ERR_PTR(err);
@@ -1494,9 +1576,10 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vm_put(vm);
}
-static void xe_vm_free(struct drm_gpuvm *gpuvm)
+static void vm_destroy_work_func(struct work_struct *w)
{
- struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
+ struct xe_vm *vm =
+ container_of(w, struct xe_vm, destroy_work);
struct xe_device *xe = vm->xe;
struct xe_tile *tile;
u8 id;
@@ -1504,6 +1587,9 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
/* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size);
+ if (xe_vm_in_preempt_fence_mode(vm))
+ flush_work(&vm->preempt.rebind_work);
+
mutex_destroy(&vm->snap_mutex);
if (!(vm->flags & XE_VM_FLAG_MIGRATION))
@@ -1516,6 +1602,14 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
kfree(vm);
}
+static void xe_vm_free(struct drm_gpuvm *gpuvm)
+{
+ struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
+
+ /* To destroy the VM we need to be able to sleep */
+ queue_work(system_unbound_wq, &vm->destroy_work);
+}
+
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
{
struct xe_vm *vm;
@@ -1552,23 +1646,13 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct dma_fence *fence = NULL;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
- int cur_fence = 0, i;
+ int cur_fence = 0;
int number_tiles = hweight8(vma->tile_present);
int err;
u8 id;
trace_xe_vma_unbind(vma);
- if (vma->ufence) {
- struct xe_user_fence * const f = vma->ufence;
-
- if (!xe_sync_ufence_get_status(f))
- return ERR_PTR(-EBUSY);
-
- vma->ufence = NULL;
- xe_sync_ufence_put(f);
- }
-
if (number_tiles > 1) {
fences = kmalloc_array(number_tiles, sizeof(*fences),
GFP_KERNEL);
@@ -1610,10 +1694,6 @@ next:
fence = cf ? &cf->base : !fence ?
xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
- if (last_op) {
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], fence);
- }
return fence;
@@ -1630,15 +1710,15 @@ err_fences:
static struct dma_fence *
xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
- bool first_op, bool last_op)
+ u8 tile_mask, bool first_op, bool last_op)
{
struct xe_tile *tile;
struct dma_fence *fence;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
struct xe_vm *vm = xe_vma_vm(vma);
- int cur_fence = 0, i;
- int number_tiles = hweight8(vma->tile_mask);
+ int cur_fence = 0;
+ int number_tiles = hweight8(tile_mask);
int err;
u8 id;
@@ -1652,7 +1732,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
}
for_each_tile(tile, vm->xe, id) {
- if (!(vma->tile_mask & BIT(id)))
+ if (!(tile_mask & BIT(id)))
goto next;
fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
@@ -1684,12 +1764,6 @@ next:
}
}
- if (last_op) {
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i],
- cf ? &cf->base : fence);
- }
-
return cf ? &cf->base : fence;
err_fences:
@@ -1717,87 +1791,46 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
return NULL;
}
-static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, struct xe_sync_entry *syncs,
- u32 num_syncs, bool immediate, bool first_op,
- bool last_op)
+static struct dma_fence *
+xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
+ u8 tile_mask, bool immediate, bool first_op, bool last_op)
{
struct dma_fence *fence;
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
- struct xe_user_fence *ufence;
xe_vm_assert_held(vm);
-
- ufence = find_ufence_get(syncs, num_syncs);
- if (vma->ufence && ufence)
- xe_sync_ufence_put(vma->ufence);
-
- vma->ufence = ufence ?: vma->ufence;
+ xe_bo_assert_held(bo);
if (immediate) {
- fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
- last_op);
+ fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
+ first_op, last_op);
if (IS_ERR(fence))
- return PTR_ERR(fence);
+ return fence;
} else {
- int i;
-
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- if (last_op) {
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], fence);
- }
}
- if (last_op)
- xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
- dma_fence_put(fence);
-
- return 0;
-}
-
-static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_bo *bo, struct xe_sync_entry *syncs,
- u32 num_syncs, bool immediate, bool first_op,
- bool last_op)
-{
- int err;
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(bo);
-
- if (bo && immediate) {
- err = xe_bo_validate(bo, vm, true);
- if (err)
- return err;
- }
-
- return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
- last_op);
+ return fence;
}
-static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, struct xe_sync_entry *syncs,
- u32 num_syncs, bool first_op, bool last_op)
+static struct dma_fence *
+xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_exec_queue *q, struct xe_sync_entry *syncs,
+ u32 num_syncs, bool first_op, bool last_op)
{
struct dma_fence *fence;
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma));
fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
if (IS_ERR(fence))
- return PTR_ERR(fence);
-
- xe_vma_destroy(vma, fence);
- if (last_op)
- xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
- dma_fence_put(fence);
+ return fence;
- return 0;
+ return fence;
}
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
@@ -1940,40 +1973,18 @@ static const u32 region_to_mem_type[] = {
XE_PL_VRAM1,
};
-static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, u32 region,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool first_op, bool last_op)
+static struct dma_fence *
+xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_exec_queue *q, struct xe_sync_entry *syncs,
+ u32 num_syncs, bool first_op, bool last_op)
{
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
- int err;
-
- xe_assert(vm->xe, region < ARRAY_SIZE(region_to_mem_type));
-
- if (!xe_vma_has_no_bo(vma)) {
- err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
- if (err)
- return err;
- }
if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
- true, first_op, last_op);
+ vma->tile_mask, true, first_op, last_op);
} else {
- int i;
-
- /* Nothing to do, signal fences now */
- if (last_op) {
- for (i = 0; i < num_syncs; i++) {
- struct dma_fence *fence =
- xe_exec_queue_last_fence_get(wait_exec_queue, vm);
-
- xe_sync_entry_signal(&syncs[i], fence);
- dma_fence_put(fence);
- }
- }
-
- return 0;
+ return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
}
}
@@ -2100,6 +2111,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) {
+ op->map.immediate =
+ flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
+ op->map.read_only =
+ flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
@@ -2265,23 +2280,28 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
struct drm_gpuva_ops *ops,
struct xe_sync_entry *syncs, u32 num_syncs,
- struct list_head *ops_list, bool last)
+ struct xe_vma_ops *vops, bool last)
{
struct xe_device *xe = vm->xe;
struct xe_vma_op *last_op = NULL;
struct drm_gpuva_op *__op;
+ struct xe_tile *tile;
+ u8 id, tile_mask = 0;
int err = 0;
lockdep_assert_held_write(&vm->lock);
+ for_each_tile(tile, vm->xe, id)
+ tile_mask |= 0x1 << id;
+
drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
struct xe_vma *vma;
- bool first = list_empty(ops_list);
+ bool first = list_empty(&vops->list);
unsigned int flags = 0;
INIT_LIST_HEAD(&op->link);
- list_add_tail(&op->link, ops_list);
+ list_add_tail(&op->link, &vops->list);
if (first) {
op->flags |= XE_VMA_OP_FIRST;
@@ -2290,10 +2310,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
}
op->q = q;
+ op->tile_mask = tile_mask;
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
+ flags |= op->map.read_only ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ?
@@ -2405,12 +2428,11 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
}
/* FIXME: Unhandled corner case */
- XE_WARN_ON(!last_op && last && !list_empty(ops_list));
+ XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
if (!last_op)
return 0;
- last_op->ops = ops;
if (last) {
last_op->flags |= XE_VMA_OP_LAST;
last_op->num_syncs = num_syncs;
@@ -2420,27 +2442,24 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
return 0;
}
-static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
- struct xe_vma *vma, struct xe_vma_op *op)
+static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vma_op *op)
{
- int err;
-
- lockdep_assert_held_write(&vm->lock);
+ struct dma_fence *fence = NULL;
- err = xe_vm_lock_vma(exec, vma);
- if (err)
- return err;
+ lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma));
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
- op->syncs, op->num_syncs,
- !xe_vm_in_fault_mode(vm),
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
+ fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
+ op->syncs, op->num_syncs,
+ op->tile_mask,
+ op->map.immediate || !xe_vm_in_fault_mode(vm),
+ op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST);
break;
case DRM_GPUVA_OP_REMAP:
{
@@ -2450,37 +2469,41 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
if (!op->remap.unmap_done) {
if (prev || next)
vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
- err = xe_vm_unbind(vm, vma, op->q, op->syncs,
- op->num_syncs,
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST &&
- !prev && !next);
- if (err)
+ fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
+ op->num_syncs,
+ op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST &&
+ !prev && !next);
+ if (IS_ERR(fence))
break;
op->remap.unmap_done = true;
}
if (prev) {
op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
- err = xe_vm_bind(vm, op->remap.prev, op->q,
- xe_vma_bo(op->remap.prev), op->syncs,
- op->num_syncs, true, false,
- op->flags & XE_VMA_OP_LAST && !next);
+ dma_fence_put(fence);
+ fence = xe_vm_bind(vm, op->remap.prev, op->q,
+ xe_vma_bo(op->remap.prev), op->syncs,
+ op->num_syncs,
+ op->remap.prev->tile_mask, true,
+ false,
+ op->flags & XE_VMA_OP_LAST && !next);
op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
- if (err)
+ if (IS_ERR(fence))
break;
op->remap.prev = NULL;
}
if (next) {
op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
- err = xe_vm_bind(vm, op->remap.next, op->q,
- xe_vma_bo(op->remap.next),
- op->syncs, op->num_syncs,
- true, false,
- op->flags & XE_VMA_OP_LAST);
+ dma_fence_put(fence);
+ fence = xe_vm_bind(vm, op->remap.next, op->q,
+ xe_vma_bo(op->remap.next),
+ op->syncs, op->num_syncs,
+ op->remap.next->tile_mask, true,
+ false, op->flags & XE_VMA_OP_LAST);
op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
- if (err)
+ if (IS_ERR(fence))
break;
op->remap.next = NULL;
}
@@ -2488,43 +2511,35 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
break;
}
case DRM_GPUVA_OP_UNMAP:
- err = xe_vm_unbind(vm, vma, op->q, op->syncs,
- op->num_syncs, op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
+ fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
+ op->num_syncs, op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST);
break;
case DRM_GPUVA_OP_PREFETCH:
- err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
- op->syncs, op->num_syncs,
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
+ fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
+ op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- if (err)
+ if (IS_ERR(fence))
trace_xe_vma_fail(vma);
- return err;
+ return fence;
}
-static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
+static struct dma_fence *
+__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vma_op *op)
{
- struct drm_exec exec;
+ struct dma_fence *fence;
int err;
retry_userptr:
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
- drm_exec_until_all_locked(&exec) {
- err = op_execute(&exec, vm, vma, op);
- drm_exec_retry_on_contention(&exec);
- if (err)
- break;
- }
- drm_exec_fini(&exec);
-
- if (err == -EAGAIN) {
+ fence = op_execute(vm, vma, op);
+ if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
lockdep_assert_held_write(&vm->lock);
if (op->base.op == DRM_GPUVA_OP_REMAP) {
@@ -2541,22 +2556,24 @@ retry_userptr:
if (!err)
goto retry_userptr;
+ fence = ERR_PTR(err);
trace_xe_vma_fail(vma);
}
}
- return err;
+ return fence;
}
-static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
+static struct dma_fence *
+xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
{
- int ret = 0;
+ struct dma_fence *fence = ERR_PTR(-ENOMEM);
- lockdep_assert_held_write(&vm->lock);
+ lockdep_assert_held(&vm->lock);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- ret = __xe_vma_op_execute(vm, op->map.vma, op);
+ fence = __xe_vma_op_execute(vm, op->map.vma, op);
break;
case DRM_GPUVA_OP_REMAP:
{
@@ -2569,42 +2586,23 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
else
vma = op->remap.next;
- ret = __xe_vma_op_execute(vm, vma, op);
+ fence = __xe_vma_op_execute(vm, vma, op);
break;
}
case DRM_GPUVA_OP_UNMAP:
- ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
- op);
+ fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
+ op);
break;
case DRM_GPUVA_OP_PREFETCH:
- ret = __xe_vma_op_execute(vm,
- gpuva_to_vma(op->base.prefetch.va),
- op);
+ fence = __xe_vma_op_execute(vm,
+ gpuva_to_vma(op->base.prefetch.va),
+ op);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- return ret;
-}
-
-static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
-{
- bool last = op->flags & XE_VMA_OP_LAST;
-
- if (last) {
- while (op->num_syncs--)
- xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
- kfree(op->syncs);
- if (op->q)
- xe_exec_queue_put(op->q);
- }
- if (!list_empty(&op->link))
- list_del(&op->link);
- if (op->ops)
- drm_gpuva_ops_free(&vm->gpuvm, op->ops);
- if (last)
- xe_vm_put(vm);
+ return fence;
}
static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
@@ -2683,34 +2681,223 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
op->flags & XE_VMA_OP_PREV_COMMITTED,
op->flags & XE_VMA_OP_NEXT_COMMITTED);
}
+ }
+}
- drm_gpuva_ops_free(&vm->gpuvm, __ops);
+static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
+ bool validate)
+{
+ struct xe_bo *bo = xe_vma_bo(vma);
+ int err = 0;
+
+ if (bo) {
+ if (!bo->vm)
+ err = drm_exec_prepare_obj(exec, &bo->ttm.base, 0);
+ if (!err && validate)
+ err = xe_bo_validate(bo, xe_vma_vm(vma), true);
}
+
+ return err;
}
-static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
- struct list_head *ops_list)
+static int check_ufence(struct xe_vma *vma)
+{
+ if (vma->ufence) {
+ struct xe_user_fence * const f = vma->ufence;
+
+ if (!xe_sync_ufence_get_status(f))
+ return -EBUSY;
+
+ vma->ufence = NULL;
+ xe_sync_ufence_put(f);
+ }
+
+ return 0;
+}
+
+static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
+ struct xe_vma_op *op)
+{
+ int err = 0;
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ err = vma_lock_and_validate(exec, op->map.vma,
+ !xe_vm_in_fault_mode(vm) ||
+ op->map.immediate);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
+ if (err)
+ break;
+
+ err = vma_lock_and_validate(exec,
+ gpuva_to_vma(op->base.remap.unmap->va),
+ false);
+ if (!err && op->remap.prev)
+ err = vma_lock_and_validate(exec, op->remap.prev, true);
+ if (!err && op->remap.next)
+ err = vma_lock_and_validate(exec, op->remap.next, true);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ err = check_ufence(gpuva_to_vma(op->base.unmap.va));
+ if (err)
+ break;
+
+ err = vma_lock_and_validate(exec,
+ gpuva_to_vma(op->base.unmap.va),
+ false);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ u32 region = op->prefetch.region;
+
+ xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+
+ err = vma_lock_and_validate(exec,
+ gpuva_to_vma(op->base.prefetch.va),
+ false);
+ if (!err && !xe_vma_has_no_bo(vma))
+ err = xe_bo_migrate(xe_vma_bo(vma),
+ region_to_mem_type[region]);
+ break;
+ }
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
+
+static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
+ struct xe_vm *vm,
+ struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+ int err;
+
+ err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), 0);
+ if (err)
+ return err;
+
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_lock_and_prep(exec, vm, op);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct dma_fence *ops_execute(struct xe_vm *vm,
+ struct xe_vma_ops *vops)
{
struct xe_vma_op *op, *next;
+ struct dma_fence *fence = NULL;
+
+ list_for_each_entry_safe(op, next, &vops->list, link) {
+ dma_fence_put(fence);
+ fence = xe_vma_op_execute(vm, op);
+ if (IS_ERR(fence)) {
+ drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
+ op->base.op, PTR_ERR(fence));
+ fence = ERR_PTR(-ENOSPC);
+ break;
+ }
+ }
+
+ return fence;
+}
+
+static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
+{
+ if (vma->ufence)
+ xe_sync_ufence_put(vma->ufence);
+ vma->ufence = __xe_sync_ufence_get(ufence);
+}
+
+static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_user_fence *ufence)
+{
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ vma_add_ufence(op->map.vma, ufence);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ vma_add_ufence(op->remap.prev, ufence);
+ if (op->remap.next)
+ vma_add_ufence(op->remap.next, ufence);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+}
+
+static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
+ struct dma_fence *fence)
+{
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
+ struct xe_user_fence *ufence;
+ struct xe_vma_op *op;
+ int i;
+
+ ufence = find_ufence_get(vops->syncs, vops->num_syncs);
+ list_for_each_entry(op, &vops->list, link) {
+ if (ufence)
+ op_add_ufence(vm, op, ufence);
+
+ if (op->base.op == DRM_GPUVA_OP_UNMAP)
+ xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
+ else if (op->base.op == DRM_GPUVA_OP_REMAP)
+ xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
+ fence);
+ }
+ if (ufence)
+ xe_sync_ufence_put(ufence);
+ for (i = 0; i < vops->num_syncs; i++)
+ xe_sync_entry_signal(vops->syncs + i, fence);
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ dma_fence_put(fence);
+}
+
+static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
+ struct xe_vma_ops *vops)
+{
+ struct drm_exec exec;
+ struct dma_fence *fence;
int err;
lockdep_assert_held_write(&vm->lock);
- list_for_each_entry_safe(op, next, ops_list, link) {
- err = xe_vma_op_execute(vm, op);
- if (err) {
- drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
- op->base.op, err);
- /*
- * FIXME: Killing VM rather than proper error handling
- */
- xe_vm_kill(vm);
- return -ENOSPC;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ goto unlock;
+
+ fence = ops_execute(vm, vops);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ /* FIXME: Killing VM rather than proper error handling */
+ xe_vm_kill(vm, false);
+ goto unlock;
+ } else {
+ vm_bind_ioctl_ops_fini(vm, vops, fence);
}
- xe_vma_op_cleanup(vm, op);
}
- return 0;
+unlock:
+ drm_exec_fini(&exec);
+ return err;
}
#define SUPPORTED_FLAGS \
@@ -2858,6 +3045,18 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
return err;
}
+static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
+ struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs)
+{
+ memset(vops, 0, sizeof(*vops));
+ INIT_LIST_HEAD(&vops->list);
+ vops->vm = vm;
+ vops->q = q;
+ vops->syncs = syncs;
+ vops->num_syncs = num_syncs;
+}
+
int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -2871,7 +3070,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 num_syncs, num_ufence = 0;
struct xe_sync_entry *syncs = NULL;
struct drm_xe_vm_bind_op *bind_ops;
- LIST_HEAD(ops_list);
+ struct xe_vma_ops vops;
int err;
int i;
@@ -3022,6 +3221,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto free_syncs;
}
+ xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
for (i = 0; i < args->num_binds; ++i) {
u64 range = bind_ops[i].range;
u64 addr = bind_ops[i].addr;
@@ -3041,42 +3241,25 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
- &ops_list,
- i == args->num_binds - 1);
+ &vops, i == args->num_binds - 1);
if (err)
goto unwind_ops;
}
/* Nothing to do */
- if (list_empty(&ops_list)) {
+ if (list_empty(&vops.list)) {
err = -ENODATA;
goto unwind_ops;
}
- xe_vm_get(vm);
- if (q)
- xe_exec_queue_get(q);
-
- err = vm_bind_ioctl_ops_execute(vm, &ops_list);
-
- up_write(&vm->lock);
-
- if (q)
- xe_exec_queue_put(q);
- xe_vm_put(vm);
-
- for (i = 0; bos && i < args->num_binds; ++i)
- xe_bo_put(bos[i]);
-
- kvfree(bos);
- kvfree(ops);
- if (args->num_binds > 1)
- kvfree(bind_ops);
-
- return err;
+ err = vm_bind_ioctl_ops_execute(vm, &vops);
unwind_ops:
- vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+ if (err && err != -ENODATA)
+ vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+ for (i = args->num_binds - 1; i >= 0; --i)
+ if (ops[i])
+ drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
free_syncs:
if (err == -ENODATA)
err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
@@ -3335,7 +3518,7 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
}
if (bo) {
- dma_resv_lock(bo->ttm.base.resv, NULL);
+ xe_bo_lock(bo, false);
err = ttm_bo_vmap(&bo->ttm, &src);
if (!err) {
xe_map_memcpy_from(xe_bo_device(bo),
@@ -3344,7 +3527,7 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
snap->snap[i].len);
ttm_bo_vunmap(&bo->ttm, &src);
}
- dma_resv_unlock(bo->ttm.base.resv);
+ xe_bo_unlock(bo);
} else {
void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 306cd0934a19..204a4ff63f88 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -208,6 +208,8 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
int xe_vm_userptr_check_repin(struct xe_vm *vm);
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
+struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
+ u8 tile_mask);
int xe_vm_invalidate_vma(struct xe_vma *vma);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 7570c2c6c463..ce1a63a5e3e7 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -178,6 +178,13 @@ struct xe_vm {
struct list_head rebind_list;
/**
+ * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
+ * from an irq context can be last put and the destroy needs to be able
+ * to sleep.
+ */
+ struct work_struct destroy_work;
+
+ /**
* @rftree: range fence tree to track updates to page table structure.
* Used to implement conflict tracking between independent bind engines.
*/
@@ -269,6 +276,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
+ /** @immediate: Immediate bind */
+ bool immediate;
+ /** @read_only: Read only */
+ bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @dumpable: whether BO is dumped on GPU hang */
@@ -319,11 +330,6 @@ enum xe_vma_op_flags {
struct xe_vma_op {
/** @base: GPUVA base operation */
struct drm_gpuva_op base;
- /**
- * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
- * operations is processed
- */
- struct drm_gpuva_ops *ops;
/** @q: exec queue for this operation */
struct xe_exec_queue *q;
/**
@@ -337,6 +343,8 @@ struct xe_vma_op {
struct list_head link;
/** @flags: operation flags */
enum xe_vma_op_flags flags;
+ /** @tile_mask: Tile mask for operation */
+ u8 tile_mask;
union {
/** @map: VMA map operation specific data */
@@ -347,4 +355,19 @@ struct xe_vma_op {
struct xe_vma_op_prefetch prefetch;
};
};
+
+/** struct xe_vma_ops - VMA operations */
+struct xe_vma_ops {
+ /** @list: list of VMA operations */
+ struct list_head list;
+ /** @vm: VM */
+ struct xe_vm *vm;
+ /** @q: exec queue these operations */
+ struct xe_exec_queue *q;
+ /** @syncs: syncs these operation */
+ struct xe_sync_entry *syncs;
+ /** @num_syncs: number of syncs */
+ u32 num_syncs;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index dcf7ed51757c..9d9b7fa7a8f0 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -673,6 +673,11 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
},
+ { XE_RTP_NAME("14021567978"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_2, TBIMR_FAST_CLIP))
+ },
/* Xe2_HPG */
{ XE_RTP_NAME("15010599737"),