summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorXiang Liu <xiang.liu@amd.com>2024-11-15 16:59:30 +0800
committerAlex Deucher <alexander.deucher@amd.com>2024-11-21 15:56:22 -0500
commit928cd772e18ffbd7723cb2361db4a8ccf2222235 (patch)
treeca8c3f884c7dd4f74bb0ee1878780333d7400a37 /drivers/gpu/drm
parent2f1b13521d2a64967530623dc0a3ecd8fd653722 (diff)
drm/amdgpu/vcn: reset fw_shared when VCPU buffers corrupted on vcn v4.0.3
It is not necessarily corrupted. When there is RAS fatal error, device memory access is blocked. Hence vcpu bo cannot be saved to system memory as in a regular suspend sequence before going for reset. In other full device reset cases, that gets saved and restored during resume. v2: Remove redundant code like vcn_v4_0 did v2: Refine commit message v3: Drop the volatile v3: Refine commit message Signed-off-by: Xiang Liu <xiang.liu@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Reviewed-by: Stanley.Yang <Stanley.Yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 63e837f83646..3f69b9b2bcd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -123,6 +123,20 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
return amdgpu_vcn_early_init(adev);
}
+static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
+
+ return 0;
+}
+
/**
* vcn_v4_0_3_sw_init - sw init for VCN block
*
@@ -155,8 +169,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
-
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -179,12 +191,7 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
- fw_shared->sq.is_enabled = true;
-
- if (amdgpu_vcnfw_log)
- amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ vcn_v4_0_3_fw_shared_init(adev, i);
}
/* TODO: Add queue reset mask when FW fully supports it */
@@ -289,6 +296,8 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
}
} else {
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -312,6 +321,11 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
regVCN_RB1_DB_CTRL);
}
+ /* Re-init fw_shared when RAS fatal error occurred */
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ if (!fw_shared->sq.is_enabled)
+ vcn_v4_0_3_fw_shared_init(adev, i);
+
r = amdgpu_ring_test_helper(ring);
if (r)
return r;