summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c145
1 files changed, 145 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 07bc0f504713..e1288901beb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -820,3 +820,148 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
}
}
}
+
+static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
+ u32 acc_flags, u32 hwip,
+ bool write, u32 *rlcg_flag)
+{
+ bool ret = false;
+
+ switch (hwip) {
+ case GC_HWIP:
+ if (amdgpu_sriov_reg_indirect_gc(adev)) {
+ *rlcg_flag =
+ write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
+ ret = true;
+ /* only in new version, AMDGPU_REGS_NO_KIQ and
+ * AMDGPU_REGS_RLC are enabled simultaneously */
+ } else if ((acc_flags & AMDGPU_REGS_RLC) &&
+ !(acc_flags & AMDGPU_REGS_NO_KIQ)) {
+ *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
+ ret = true;
+ }
+ break;
+ case MMHUB_HWIP:
+ if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
+ (acc_flags & AMDGPU_REGS_RLC) && write) {
+ *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
+ ret = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+{
+ struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+ uint32_t timeout = 50000;
+ uint32_t i, tmp;
+ uint32_t ret = 0;
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+
+ if (!adev->gfx.rlc.rlcg_reg_access_supported) {
+ dev_err(adev->dev,
+ "indirect registers access through rlcg is not available\n");
+ return 0;
+ }
+
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+ scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
+ scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
+ scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
+ scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
+ if (reg_access_ctrl->spare_int)
+ spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
+
+ if (offset == reg_access_ctrl->grbm_cntl) {
+ /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
+ writel(v, scratch_reg2);
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else if (offset == reg_access_ctrl->grbm_idx) {
+ /* if the target reg offset is grbm_idx, write to scratch_reg3 */
+ writel(v, scratch_reg3);
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ /*
+ * SCRATCH_REG0 = read/write value
+ * SCRATCH_REG1[30:28] = command
+ * SCRATCH_REG1[19:0] = address in dword
+ * SCRATCH_REG1[26:24] = Error reporting
+ */
+ writel(v, scratch_reg0);
+ writel((offset | flag), scratch_reg1);
+ if (reg_access_ctrl->spare_int)
+ writel(1, spare_int);
+
+ for (i = 0; i < timeout; i++) {
+ tmp = readl(scratch_reg1);
+ if (!(tmp & flag))
+ break;
+ udelay(10);
+ }
+
+ if (i >= timeout) {
+ if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
+ if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
+ dev_err(adev->dev,
+ "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
+ } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
+ dev_err(adev->dev,
+ "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
+ } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
+ dev_err(adev->dev,
+ "regiser is not in range, rlcg failed to program reg: 0x%05x\n", offset);
+ } else {
+ dev_err(adev->dev,
+ "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
+ }
+ } else {
+ dev_err(adev->dev,
+ "timeout: rlcg faled to program reg: 0x%05x\n", offset);
+ }
+ }
+ }
+
+ ret = readl(scratch_reg0);
+ return ret;
+}
+
+void amdgpu_sriov_wreg(struct amdgpu_device *adev,
+ u32 offset, u32 value,
+ u32 acc_flags, u32 hwip)
+{
+ u32 rlcg_flag;
+
+ if (!amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
+ amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
+ return;
+ }
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
+ WREG32_NO_KIQ(offset, value);
+ else
+ WREG32(offset, value);
+}
+
+u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
+ u32 offset, u32 acc_flags, u32 hwip)
+{
+ u32 rlcg_flag;
+
+ if (!amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
+ return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
+ return RREG32_NO_KIQ(offset);
+ else
+ return RREG32(offset);
+}