diff options
Diffstat (limited to 'drivers/gpu/drm')
35 files changed, 318 insertions, 142 deletions
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index fc96fa5aab54..2cb748bbefcd 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -125,6 +125,9 @@ #define TC358768_DSI_CONFW_MODE_CLR (6 << 29) #define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24) +/* TC358768_DSICMD_TX (0x0600) register */ +#define TC358768_DSI_CMDTX_DC_START BIT(0) + static const char * const tc358768_supplies[] = { "vddc", "vddmipi", "vddio" }; @@ -229,6 +232,21 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask, tc358768_write(priv, reg, tmp); } +static void tc358768_dsicmd_tx(struct tc358768_priv *priv) +{ + u32 val; + + /* start transfer */ + tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START); + if (priv->error) + return; + + /* wait transfer completion */ + priv->error = regmap_read_poll_timeout(priv->regmap, TC358768_DSICMD_TX, val, + (val & TC358768_DSI_CMDTX_DC_START) == 0, + 100, 100000); +} + static int tc358768_sw_reset(struct tc358768_priv *priv) { /* Assert Reset */ @@ -518,8 +536,7 @@ static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host, } } - /* start transfer */ - tc358768_write(priv, TC358768_DSICMD_TX, 1); + tc358768_dsicmd_tx(priv); ret = tc358768_clear_error(priv); if (ret) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 3f2e2b851cbc..4a73821b81f6 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -421,7 +421,6 @@ static const struct dmi_system_id orientation_data[] = { }, { /* Lenovo Yoga Tab 3 X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)&lcd1600x2560_rightside_up, diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index bfd16054ca05..27c530218ee6 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -929,7 +929,7 @@ intel_enable_tv(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_display *display = to_intel_display(state); + struct intel_display *display = to_intel_display(encoder); /* Prevents vblank waits from timing out in intel_tv_detect_type() */ intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc)); @@ -943,7 +943,7 @@ intel_disable_tv(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_display *display = to_intel_display(state); + struct intel_display *display = to_intel_display(encoder); intel_de_rmw(display, TV_CTL, TV_ENC_ENABLE, 0); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c index 551b0d7974ff..5dc0ccd07636 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c @@ -80,6 +80,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s const struct intel_gsc_cpd_header_v2 *cpd_header = NULL; const struct intel_gsc_cpd_entry *cpd_entry = NULL; const struct intel_gsc_manifest_header *manifest; + struct intel_uc_fw_ver min_ver = { 0 }; size_t min_size = sizeof(*layout); int i; @@ -212,33 +213,46 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s } } - if (IS_ARROWLAKE(gt->i915)) { + /* + * ARL SKUs require newer firmwares, but the blob is actually common + * across all MTL and ARL SKUs, so we need to do an explicit version check + * here rather than using a separate table entry. If a too old version + * is found, then just don't use GSC rather than aborting the driver load. + * Note that the major number in the GSC FW version is used to indicate + * the platform, so we expect it to always be 102 for MTL/ARL binaries. + */ + if (IS_ARROWLAKE_S(gt->i915)) + min_ver = (struct intel_uc_fw_ver){ 102, 0, 10, 1878 }; + else if (IS_ARROWLAKE_H(gt->i915) || IS_ARROWLAKE_U(gt->i915)) + min_ver = (struct intel_uc_fw_ver){ 102, 1, 15, 1926 }; + + if (IS_METEORLAKE(gt->i915) && gsc->release.major != 102) { + gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x", + gsc->release.major, gsc->release.minor, + gsc->release.patch, gsc->release.build); + return -EINVAL; + } + + if (min_ver.major) { bool too_old = false; - /* - * ARL requires a newer firmware than MTL did (102.0.10.1878) but the - * firmware is actually common. So, need to do an explicit version check - * here rather than using a separate table entry. And if the older - * MTL-only version is found, then just don't use GSC rather than aborting - * the driver load. - */ - if (gsc->release.major < 102) { + if (gsc->release.minor < min_ver.minor) { too_old = true; - } else if (gsc->release.major == 102) { - if (gsc->release.minor == 0) { - if (gsc->release.patch < 10) { + } else if (gsc->release.minor == min_ver.minor) { + if (gsc->release.patch < min_ver.patch) { + too_old = true; + } else if (gsc->release.patch == min_ver.patch) { + if (gsc->release.build < min_ver.build) too_old = true; - } else if (gsc->release.patch == 10) { - if (gsc->release.build < 1878) - too_old = true; - } } } if (too_old) { - gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least 102.0.10.1878", + gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least %d.%d.%d.%d", gsc->release.major, gsc->release.minor, - gsc->release.patch, gsc->release.build); + gsc->release.patch, gsc->release.build, + min_ver.major, min_ver.minor, + min_ver.patch, min_ver.build); return -EINVAL; } } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0f19cbd36829..7b1a061d92fb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -538,8 +538,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_BATTLEMAGE(i915) (0 && i915) #define IS_PANTHERLAKE(i915) (0 && i915) -#define IS_ARROWLAKE(i915) \ - IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL) +#define IS_ARROWLAKE_H(i915) \ + IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H) +#define IS_ARROWLAKE_U(i915) \ + IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_U) +#define IS_ARROWLAKE_S(i915) \ + IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_S) #define IS_DG2_G10(i915) \ IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10) #define IS_DG2_G11(i915) \ diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index ff9500194d15..856b30fa37dc 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -200,8 +200,16 @@ static const u16 subplatform_g12_ids[] = { INTEL_DG2_G12_IDS(ID), }; -static const u16 subplatform_arl_ids[] = { - INTEL_ARL_IDS(ID), +static const u16 subplatform_arl_h_ids[] = { + INTEL_ARL_H_IDS(ID), +}; + +static const u16 subplatform_arl_u_ids[] = { + INTEL_ARL_U_IDS(ID), +}; + +static const u16 subplatform_arl_s_ids[] = { + INTEL_ARL_S_IDS(ID), }; static bool find_devid(u16 id, const u16 *p, unsigned int num) @@ -261,9 +269,15 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_g12_ids, ARRAY_SIZE(subplatform_g12_ids))) { mask = BIT(INTEL_SUBPLATFORM_G12); - } else if (find_devid(devid, subplatform_arl_ids, - ARRAY_SIZE(subplatform_arl_ids))) { - mask = BIT(INTEL_SUBPLATFORM_ARL); + } else if (find_devid(devid, subplatform_arl_h_ids, + ARRAY_SIZE(subplatform_arl_h_ids))) { + mask = BIT(INTEL_SUBPLATFORM_ARL_H); + } else if (find_devid(devid, subplatform_arl_u_ids, + ARRAY_SIZE(subplatform_arl_u_ids))) { + mask = BIT(INTEL_SUBPLATFORM_ARL_U); + } else if (find_devid(devid, subplatform_arl_s_ids, + ARRAY_SIZE(subplatform_arl_s_ids))) { + mask = BIT(INTEL_SUBPLATFORM_ARL_S); } GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 4f4aa4ff9963..ef84eea9ba0b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -128,7 +128,9 @@ enum intel_platform { #define INTEL_SUBPLATFORM_RPLU 2 /* MTL */ -#define INTEL_SUBPLATFORM_ARL 0 +#define INTEL_SUBPLATFORM_ARL_H 0 +#define INTEL_SUBPLATFORM_ARL_U 1 +#define INTEL_SUBPLATFORM_ARL_S 2 enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c index 98327f9bbd9c..5edc3c01af72 100644 --- a/drivers/gpu/drm/imagination/pvr_context.c +++ b/drivers/gpu/drm/imagination/pvr_context.c @@ -17,10 +17,14 @@ #include <drm/drm_auth.h> #include <drm/drm_managed.h> + +#include <linux/bug.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/xarray.h> @@ -342,6 +346,10 @@ int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_co return err; } + spin_lock(&pvr_dev->ctx_list_lock); + list_add_tail(&ctx->file_link, &pvr_file->contexts); + spin_unlock(&pvr_dev->ctx_list_lock); + return 0; err_destroy_fw_obj: @@ -368,6 +376,11 @@ pvr_context_release(struct kref *ref_count) container_of(ref_count, struct pvr_context, ref_count); struct pvr_device *pvr_dev = ctx->pvr_dev; + WARN_ON(in_interrupt()); + spin_lock(&pvr_dev->ctx_list_lock); + list_del(&ctx->file_link); + spin_unlock(&pvr_dev->ctx_list_lock); + xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id); pvr_context_destroy_queues(ctx); pvr_fw_object_destroy(ctx->fw_obj); @@ -425,11 +438,30 @@ pvr_context_destroy(struct pvr_file *pvr_file, u32 handle) */ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) { + struct pvr_device *pvr_dev = pvr_file->pvr_dev; struct pvr_context *ctx; unsigned long handle; xa_for_each(&pvr_file->ctx_handles, handle, ctx) pvr_context_destroy(pvr_file, handle); + + spin_lock(&pvr_dev->ctx_list_lock); + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + + while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) { + list_del_init(&ctx->file_link); + + if (pvr_context_get_if_referenced(ctx)) { + spin_unlock(&pvr_dev->ctx_list_lock); + + pvr_vm_unmap_all(ctx->vm_ctx); + + pvr_context_put(ctx); + spin_lock(&pvr_dev->ctx_list_lock); + } + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + } + spin_unlock(&pvr_dev->ctx_list_lock); } /** @@ -439,6 +471,7 @@ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) void pvr_context_device_init(struct pvr_device *pvr_dev) { xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1); + spin_lock_init(&pvr_dev->ctx_list_lock); } /** diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h index 0c7b97dfa6ba..07afa179cdf4 100644 --- a/drivers/gpu/drm/imagination/pvr_context.h +++ b/drivers/gpu/drm/imagination/pvr_context.h @@ -85,6 +85,9 @@ struct pvr_context { /** @compute: Transfer queue. */ struct pvr_queue *transfer; } queues; + + /** @file_link: pvr_file PVR context list link. */ + struct list_head file_link; }; static __always_inline struct pvr_queue * @@ -124,6 +127,24 @@ pvr_context_get(struct pvr_context *ctx) } /** + * pvr_context_get_if_referenced() - Take an additional reference on a still + * referenced context. + * @ctx: Context pointer. + * + * Call pvr_context_put() to release. + * + * Returns: + * * True on success, or + * * false if no context pointer passed, or the context wasn't still + * * referenced. + */ +static __always_inline bool +pvr_context_get_if_referenced(struct pvr_context *ctx) +{ + return ctx != NULL && kref_get_unless_zero(&ctx->ref_count) != 0; +} + +/** * pvr_context_lookup() - Lookup context pointer from handle and file. * @pvr_file: Pointer to pvr_file structure. * @handle: Context handle. diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index b574e23d484b..6d0dfacb677b 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/math.h> #include <linux/mutex.h> +#include <linux/spinlock_types.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/wait.h> @@ -293,6 +294,12 @@ struct pvr_device { /** @sched_wq: Workqueue for schedulers. */ struct workqueue_struct *sched_wq; + + /** + * @ctx_list_lock: Lock to be held when accessing the context list in + * struct pvr_file. + */ + spinlock_t ctx_list_lock; }; /** @@ -344,6 +351,9 @@ struct pvr_file { * This array is used to allocate handles returned to userspace. */ struct xarray vm_ctx_handles; + + /** @contexts: PVR context list. */ + struct list_head contexts; }; /** diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c index 684a9b9a2247..36c0e768698e 100644 --- a/drivers/gpu/drm/imagination/pvr_drv.c +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -28,6 +28,7 @@ #include <linux/export.h> #include <linux/fs.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -1326,6 +1327,8 @@ pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file) */ pvr_file->pvr_dev = pvr_dev; + INIT_LIST_HEAD(&pvr_file->contexts); + xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1); diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 39773991710a..363f885a7098 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -14,6 +14,7 @@ #include <drm/drm_gem.h> #include <drm/drm_gpuvm.h> +#include <linux/bug.h> #include <linux/container_of.h> #include <linux/err.h> #include <linux/errno.h> @@ -597,12 +598,26 @@ err_free: } /** - * pvr_vm_context_release() - Teardown a VM context. - * @ref_count: Pointer to reference counter of the VM context. + * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. + * @vm_ctx: Target VM context. * * This function ensures that no mappings are left dangling by unmapping them * all in order of ascending device-virtual address. */ +void +pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) +{ + WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, + vm_ctx->gpuvm_mgr.mm_range)); +} + +/** + * pvr_vm_context_release() - Teardown a VM context. + * @ref_count: Pointer to reference counter of the VM context. + * + * This function also ensures that no mappings are left dangling by calling + * pvr_vm_unmap_all. + */ static void pvr_vm_context_release(struct kref *ref_count) { @@ -612,8 +627,7 @@ pvr_vm_context_release(struct kref *ref_count) if (vm_ctx->fw_mem_ctx_obj) pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj); - WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, - vm_ctx->gpuvm_mgr.mm_range)); + pvr_vm_unmap_all(vm_ctx); pvr_mmu_context_destroy(vm_ctx->mmu_ctx); drm_gem_private_object_fini(&vm_ctx->dummy_gem); diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h index f2a6463f2b05..79406243617c 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.h +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -39,6 +39,7 @@ int pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, u64 device_addr, u64 size); int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size); +void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx); dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx); struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c index 027867c2a8c5..99110ab2f44d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c @@ -992,7 +992,7 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 ctrl->data = data; ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret == -EAGAIN && ctrl->retryTimeMs) { + if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) { /* * Device (likely an eDP panel) isn't ready yet, wait for the time specified * by GSP before retrying again @@ -1060,33 +1060,44 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize) NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl; u8 size = *psize; int ret; + int retries; - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + for (retries = 0; retries < 3; ++retries) { + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(outp->index); - ctrl->bAddrOnly = !size; - ctrl->cmd = type; - if (ctrl->bAddrOnly) { - ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE); - ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE); - } - ctrl->addr = addr; - ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0; - memcpy(ctrl->data, data, size); + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ctrl->bAddrOnly = !size; + ctrl->cmd = type; + if (ctrl->bAddrOnly) { + ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE); + ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE); + } + ctrl->addr = addr; + ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0; + memcpy(ctrl->data, data, size); - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) { + /* + * Device (likely an eDP panel) isn't ready yet, wait for the time specified + * by GSP before retrying again + */ + nvkm_debug(&disp->engine.subdev, + "Waiting %dms for GSP LT panel delay before retrying in AUX\n", + ctrl->retryTimeMs); + msleep(ctrl->retryTimeMs); + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + } else { + memcpy(data, ctrl->data, size); + *psize = ctrl->size; + ret = ctrl->replyType; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + break; + } } - - memcpy(data, ctrl->data, size); - *psize = ctrl->size; - ret = ctrl->replyType; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c index a1c8545f1249..cac6d64ab67d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c @@ -89,11 +89,6 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user, nvkm_falcon_fw_dtor_sigs(fw); } - /* after last write to the img, sync dma mappings */ - dma_sync_single_for_device(fw->fw.device->dev, - fw->fw.phys, - sg_dma_len(&fw->fw.mem.sgl), - DMA_TO_DEVICE); FLCNFW_DBG(fw, "resetting"); fw->func->reset(fw); @@ -105,6 +100,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user, goto done; } + /* after last write to the img, sync dma mappings */ + dma_sync_single_for_device(fw->fw.device->dev, + fw->fw.phys, + sg_dma_len(&fw->fw.mem.sgl), + DMA_TO_DEVICE); + ret = fw->func->load(fw); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index cf58f9da9139..d586aea30898 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -78,7 +78,7 @@ r535_rpc_status_to_errno(uint32_t rpc_status) switch (rpc_status) { case 0x55: /* NV_ERR_NOT_READY */ case 0x66: /* NV_ERR_TIMEOUT_RETRY */ - return -EAGAIN; + return -EBUSY; case 0x51: /* NV_ERR_NO_MEMORY */ return -ENOMEM; default: @@ -601,7 +601,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc) if (rpc->status) { ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); - if (PTR_ERR(ret) != -EAGAIN) + if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY) nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); } else { ret = repc ? rpc->params : NULL; @@ -660,7 +660,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc) if (rpc->status) { ret = r535_rpc_status_to_errno(rpc->status); - if (ret != -EAGAIN) + if (ret != -EAGAIN && ret != -EBUSY) nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", object->client->object.handle, object->handle, rpc->cmd, rpc->status); } diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index 4082c8f2951d..6fbff516c1c1 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -390,11 +390,15 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct * { u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + switch (offset) { case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET: if (vma->vm_end - vma->vm_start != PAGE_SIZE || (vma->vm_flags & (VM_WRITE | VM_EXEC))) return -EINVAL; + vm_flags_clear(vma, VM_MAYWRITE); break; diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 8ca85526491e..a49132f3778b 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -990,6 +990,8 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, if (!size) break; + + offset = 0; } return panthor_vm_flush_range(vm, start_iova, iova - start_iova); @@ -1580,7 +1582,9 @@ panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle) { struct panthor_vm *vm; + xa_lock(&pool->xa); vm = panthor_vm_get(xa_load(&pool->xa, handle)); + xa_unlock(&pool->xa); return vm; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index f161f40d8ce4..69900138295b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1093,10 +1093,10 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane, if (!plane->state->fb) return -EINVAL; - if (state) - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); - else /* Special case for asynchronous cursor updates. */ + crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc); + + /* Special case for asynchronous cursor updates. */ + if (!crtc_state) crtc_state = plane->crtc->state; return drm_atomic_helper_check_plane_state(plane->state, crtc_state, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f39bf992364d..8db38927729b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1265,6 +1265,8 @@ static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb, struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb); struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo); + if (WARN_ON(!bo)) + return -EINVAL; return drm_gem_handle_create(file_priv, &bo->tbo.base, handle); } diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 42dc55cb23f4..0c9e4b2fafab 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -528,7 +528,7 @@ * [4-6] RSVD * [7] Disabled */ -#define CCS_MODE XE_REG(0x14804) +#define CCS_MODE XE_REG(0x14804, XE_REG_OPTION_MASKED) #define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */ #define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */ #define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 5b232f2951b1..ae6b337cdc54 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -888,8 +888,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo) if (WARN_ON(!xe_bo_is_pinned(bo))) return -EINVAL; - if (WARN_ON(!xe_bo_is_vram(bo))) - return -EINVAL; + if (!xe_bo_is_vram(bo)) + return 0; ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx); if (ret) @@ -939,6 +939,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo) .interruptible = false, }; struct ttm_resource *new_mem; + struct ttm_place *place = &bo->placements[0]; int ret; xe_bo_assert_held(bo); @@ -949,9 +950,15 @@ int xe_bo_restore_pinned(struct xe_bo *bo) if (WARN_ON(!xe_bo_is_pinned(bo))) return -EINVAL; - if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm)) + if (WARN_ON(xe_bo_is_vram(bo))) + return -EINVAL; + + if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo))) return -EINVAL; + if (!mem_type_is_vram(place->mem_type)) + return 0; + ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx); if (ret) return ret; @@ -1766,6 +1773,7 @@ int xe_bo_pin_external(struct xe_bo *bo) int xe_bo_pin(struct xe_bo *bo) { + struct ttm_place *place = &bo->placements[0]; struct xe_device *xe = xe_bo_device(bo); int err; @@ -1796,21 +1804,21 @@ int xe_bo_pin(struct xe_bo *bo) */ if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { - struct ttm_place *place = &(bo->placements[0]); - if (mem_type_is_vram(place->mem_type)) { xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT); - - spin_lock(&xe->pinned.lock); - list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); - spin_unlock(&xe->pinned.lock); } } + if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { + spin_lock(&xe->pinned.lock); + list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); + spin_unlock(&xe->pinned.lock); + } + ttm_bo_pin(&bo->ttm); /* @@ -1856,23 +1864,18 @@ void xe_bo_unpin_external(struct xe_bo *bo) void xe_bo_unpin(struct xe_bo *bo) { + struct ttm_place *place = &bo->placements[0]; struct xe_device *xe = xe_bo_device(bo); xe_assert(xe, !bo->ttm.base.import_attach); xe_assert(xe, xe_bo_is_pinned(bo)); - if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && - bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { - struct ttm_place *place = &(bo->placements[0]); - - if (mem_type_is_vram(place->mem_type)) { - spin_lock(&xe->pinned.lock); - xe_assert(xe, !list_empty(&bo->pinned_link)); - list_del_init(&bo->pinned_link); - spin_unlock(&xe->pinned.lock); - } + if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { + spin_lock(&xe->pinned.lock); + xe_assert(xe, !list_empty(&bo->pinned_link)); + list_del_init(&bo->pinned_link); + spin_unlock(&xe->pinned.lock); } - ttm_bo_unpin(&bo->ttm); } diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index 541b49007d73..8fb2be061003 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -34,14 +34,22 @@ int xe_bo_evict_all(struct xe_device *xe) u8 id; int ret; - if (!IS_DGFX(xe)) - return 0; - /* User memory */ - for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) { + for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) { struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); + /* + * On igpu platforms with flat CCS we need to ensure we save and restore any CCS + * state since this state lives inside graphics stolen memory which doesn't survive + * hibernation. + * + * This can be further improved by only evicting objects that we know have actually + * used a compression enabled PAT index. + */ + if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe))) + continue; + if (man) { ret = ttm_resource_manager_evict_all(bdev, man); if (ret) @@ -125,9 +133,6 @@ int xe_bo_restore_kernel(struct xe_device *xe) struct xe_bo *bo; int ret; - if (!IS_DGFX(xe)) - return 0; - spin_lock(&xe->pinned.lock); for (;;) { bo = list_first_entry_or_null(&xe->pinned.evicted, @@ -159,7 +164,6 @@ int xe_bo_restore_kernel(struct xe_device *xe) * should setup the iosys map. */ xe_assert(xe, !iosys_map_is_null(&bo->vmap)); - xe_assert(xe, xe_bo_is_vram(bo)); xe_bo_put(bo); diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 51bb9d875268..0e2dd691bdae 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -88,10 +88,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) mutex_init(&xef->exec_queue.lock); xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); - spin_lock(&xe->clients.lock); - xe->clients.count++; - spin_unlock(&xe->clients.lock); - file->driver_priv = xef; kref_init(&xef->refcount); @@ -108,17 +104,12 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) static void xe_file_destroy(struct kref *ref) { struct xe_file *xef = container_of(ref, struct xe_file, refcount); - struct xe_device *xe = xef->xe; xa_destroy(&xef->exec_queue.xa); mutex_destroy(&xef->exec_queue.lock); xa_destroy(&xef->vm.xa); mutex_destroy(&xef->vm.lock); - spin_lock(&xe->clients.lock); - xe->clients.count--; - spin_unlock(&xe->clients.lock); - xe_drm_client_put(xef->client); kfree(xef->process_name); kfree(xef); @@ -334,7 +325,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe->info.force_execlist = xe_modparam.force_execlist; spin_lock_init(&xe->irq.lock); - spin_lock_init(&xe->clients.lock); init_waitqueue_head(&xe->ufence_wq); diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 4c3f0ebe78a9..f1fbfe916867 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -191,4 +191,18 @@ void xe_device_declare_wedged(struct xe_device *xe); struct xe_file *xe_file_get(struct xe_file *xef); void xe_file_put(struct xe_file *xef); +/* + * Occasionally it is seen that the G2H worker starts running after a delay of more than + * a second even after being queued and activated by the Linux workqueue subsystem. This + * leads to G2H timeout error. The root cause of issue lies with scheduling latency of + * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS + * and this is beyond xe kmd. + * + * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. + */ +#define LNL_FLUSH_WORKQUEUE(wq__) \ + flush_workqueue(wq__) +#define LNL_FLUSH_WORK(wrk__) \ + flush_work(wrk__) + #endif diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 85bede4dd646..b9ea455d6f59 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -379,15 +379,6 @@ struct xe_device { struct workqueue_struct *wq; } sriov; - /** @clients: drm clients info */ - struct { - /** @clients.lock: Protects drm clients info */ - spinlock_t lock; - - /** @clients.count: number of drm clients */ - u64 count; - } clients; - /** @usm: unified memory state */ struct { /** @usm.asid: convert a ASID to VM */ diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index f23ac1e2ed88..31cca938956f 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -132,12 +132,16 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (XE_IOCTL_DBG(xe, !q)) return -ENOENT; - if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) - return -EINVAL; + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, args->num_batch_buffer && - q->width != args->num_batch_buffer)) - return -EINVAL; + q->width != args->num_batch_buffer)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) { err = -ECANCELED; @@ -199,14 +203,14 @@ retry: write_locked = false; } if (err) - goto err_syncs; + goto err_hw_exec_mode; if (write_locked) { err = xe_vm_userptr_pin(vm); downgrade_write(&vm->lock); write_locked = false; if (err) - goto err_hw_exec_mode; + goto err_unlock_list; } if (!args->num_batch_buffer) { @@ -220,6 +224,7 @@ retry: fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); if (IS_ERR(fence)) { err = PTR_ERR(fence); + xe_vm_unlock(vm); goto err_unlock_list; } for (i = 0; i < num_syncs; i++) diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index d098d2dd1b2d..fd0f3b3c9101 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -260,8 +260,14 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) { int i; + /* + * Before releasing our ref to lrc and xef, accumulate our run ticks + */ + xe_exec_queue_update_run_ticks(q); + for (i = 0; i < q->width; ++i) xe_lrc_put(q->lrc[i]); + __xe_exec_queue_free(q); } diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c index 9360ac4de489..b6adfb9f2030 100644 --- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c +++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c @@ -68,6 +68,12 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) } } + /* + * Mask bits need to be set for the register. Though only Xe2+ + * platforms require setting of mask bits, it won't harm for older + * platforms as these bits are unused there. + */ + mode |= CCS_MODE_CSLICE_0_3_MASK << 16; xe_mmio_write32(>->mmio, CCS_MODE, mode); xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n", @@ -133,9 +139,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, } /* CCS mode can only be updated when there are no drm clients */ - spin_lock(&xe->clients.lock); - if (xe->clients.count) { - spin_unlock(&xe->clients.lock); + mutex_lock(&xe->drm.filelist_mutex); + if (!list_empty(&xe->drm.filelist)) { + mutex_unlock(&xe->drm.filelist_mutex); + xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n"); return -EBUSY; } @@ -146,7 +153,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, xe_gt_reset_async(gt); } - spin_unlock(&xe->clients.lock); + mutex_unlock(&xe->drm.filelist_mutex); return count; } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 062a0c2fd2cd..192643d63d22 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -395,6 +395,8 @@ static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node) * the xe_ggtt_clear() called by below xe_ggtt_remove_node(). */ xe_ggtt_node_remove(node, false); + } else { + xe_ggtt_node_fini(node); } } @@ -450,7 +452,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) config->ggtt_region = node; return 0; err: - xe_ggtt_node_fini(node); + pf_release_ggtt(tile, node); return err; } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 773de1f08db9..3cb228c773cd 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -72,6 +72,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work) struct xe_device *xe = gt_to_xe(gt); struct xe_gt_tlb_invalidation_fence *fence, *next; + LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker); + spin_lock_irq(>->tlb_invalidation.pending_lock); list_for_each_entry_safe(fence, next, >->tlb_invalidation.pending_fences, link) { diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 4870af1c5a90..8aeb1789805c 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -1018,17 +1018,8 @@ retry_same_fence: ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); - /* - * Occasionally it is seen that the G2H worker starts running after a delay of more than - * a second even after being queued and activated by the Linux workqueue subsystem. This - * leads to G2H timeout error. The root cause of issue lies with scheduling latency of - * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS - * and this is beyond xe kmd. - * - * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. - */ if (!ret) { - flush_work(&ct->g2h_worker); + LNL_FLUSH_WORK(&ct->g2h_worker); if (g2h_fence.done) { xe_gt_warn(gt, "G2H fence %u, action %04x, done\n", g2h_fence.seqno, action[0]); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 7afcc243037c..9a8564ea06b5 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -747,8 +747,6 @@ static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) { struct xe_sched_job *job = to_xe_sched_job(drm_job); - xe_exec_queue_update_run_ticks(job->q); - trace_xe_sched_job_free(job); xe_sched_job_put(job); } diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index fd2ffe8df156..8dd55798ab31 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -1614,9 +1614,11 @@ static int xe_oa_release(struct inode *inode, struct file *file) struct xe_oa_stream *stream = file->private_data; struct xe_gt *gt = stream->gt; + xe_pm_runtime_get(gt_to_xe(gt)); mutex_lock(>->oa.gt_lock); xe_oa_destroy_locked(stream); mutex_unlock(>->oa.gt_lock); + xe_pm_runtime_put(gt_to_xe(gt)); /* Release the reference the OA stream kept on the driver */ drm_dev_put(>_to_xe(gt)->drm); diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c index f5deb81eba01..5b4264ea38bd 100644 --- a/drivers/gpu/drm/xe/xe_wait_user_fence.c +++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c @@ -155,6 +155,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, } if (!timeout) { + LNL_FLUSH_WORKQUEUE(xe->ordered_wq); + err = do_compare(addr, args->value, args->mask, + args->op); + if (err <= 0) { + drm_dbg(&xe->drm, "LNL_FLUSH_WORKQUEUE resolved ufence timeout\n"); + break; + } err = -ETIME; break; } |