summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2015-04-20 17:09:27 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-04-29 17:08:19 -0400
commitc2b173231df72ede6184fb1f1a14ac45bdad3701 (patch)
treebbbfcbba044ab445c4e882d7499205ebe6be7d97
parent6547cb0c1669b1afacd0dab625f549649a2287ef (diff)
drm/amdgpu: Add support for CIK parts
This patch adds support for CIK parts. These parts are also supported by radeon which is the preferred option, so there is a config option to enable support for CIK parts in amdgpu for testing. Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jammy Zhou <Jammy.Zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c6684
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h348
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_smc.c279
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2505
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c453
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c1422
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_ci.h944
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c3830
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c5635
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c1307
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c3336
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.h229
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_smc.c219
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu7.h170
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu7_discrete.h514
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu7_fusion.h300
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c888
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c642
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.h29
29 files changed, 30008 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 01276a592bc5..aec28866945f 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -18,6 +18,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
+ ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
+
# add IH block
amdgpu-y += \
amdgpu_irq.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cd4bb90fa85c..548e0843d95a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -38,6 +38,9 @@
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "cik.h"
+#endif
#include "bif/bif_4_1_d.h"
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
@@ -1154,6 +1157,22 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
int i, r = -EINVAL;
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
+ adev->family = AMDGPU_FAMILY_CI;
+ else
+ adev->family = AMDGPU_FAMILY_KV;
+
+ r = cik_set_ip_blocks(adev);
+ if (r)
+ return r;
+ break;
+#endif
default:
/* FIXME: not supported yet */
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
new file mode 100644
index 000000000000..5a9dad8e55c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -0,0 +1,6684 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_ucode.h"
+#include "cikd.h"
+#include "amdgpu_dpm.h"
+#include "ci_dpm.h"
+#include "gfx_v7_0.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+
+static const struct ci_pt_defaults defaults_hawaii_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
+ { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
+ { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
+};
+
+static const struct ci_pt_defaults defaults_saturn_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+ { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
+ { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_saturn_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
+ { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
+ { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
+};
+
+static const struct ci_pt_config_reg didt_config_ci[] =
+{
+ { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
+{
+ return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
+}
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
+ u32 arb_freq_src, u32 arb_freq_dest)
+{
+ u32 mc_arb_dram_timing;
+ u32 mc_arb_dram_timing2;
+ u32 burst_time;
+ u32 mc_cg_config;
+
+ switch (arb_freq_src) {
+ case MC_CG_ARB_FREQ_F0:
+ mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
+ mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
+ burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
+ MC_ARB_BURST_TIME__STATE0__SHIFT;
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
+ mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
+ burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
+ MC_ARB_BURST_TIME__STATE1__SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (arb_freq_dest) {
+ case MC_CG_ARB_FREQ_F0:
+ WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+ WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+ WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
+ ~MC_ARB_BURST_TIME__STATE0_MASK);
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+ WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+ WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
+ ~MC_ARB_BURST_TIME__STATE1_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
+ WREG32(mmMC_CG_CONFIG, mc_cg_config);
+ WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
+ ~MC_ARB_CG__CG_ARB_REQ_MASK);
+
+ return 0;
+}
+
+static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+{
+ u8 mc_para_index;
+
+ if (memory_clock < 10000)
+ mc_para_index = 0;
+ else if (memory_clock >= 80000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
+ return mc_para_index;
+}
+
+static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+{
+ u8 mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500)
+ mc_para_index = 0x00;
+ else if (memory_clock > 47500)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (u8)((memory_clock - 10000) / 2500);
+ } else {
+ if (memory_clock < 65000)
+ mc_para_index = 0x00;
+ else if (memory_clock > 135000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (u8)((memory_clock - 60000) / 5000);
+ }
+ return mc_para_index;
+}
+
+static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
+ u32 max_voltage_steps,
+ struct atom_voltage_table *voltage_table)
+{
+ unsigned int i, diff;
+
+ if (voltage_table->count <= max_voltage_steps)
+ return;
+
+ diff = voltage_table->count - max_voltage_steps;
+
+ for (i = 0; i < max_voltage_steps; i++)
+ voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+ voltage_table->count = max_voltage_steps;
+}
+
+static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
+ struct atom_voltage_table_entry *voltage_table,
+ u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
+static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
+static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
+ u32 target_tdp);
+static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
+static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
+static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
+
+static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+ PPSMC_Msg msg, u32 parameter);
+static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
+static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
+
+static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = adev->pm.dpm.priv;
+
+ return pi;
+}
+
+static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
+{
+ struct ci_ps *ps = rps->ps_priv;
+
+ return ps;
+}
+
+static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ switch (adev->pdev->device) {
+ case 0x6649:
+ case 0x6650:
+ case 0x6651:
+ case 0x6658:
+ case 0x665C:
+ case 0x665D:
+ default:
+ pi->powertune_defaults = &defaults_bonaire_xt;
+ break;
+ case 0x6640:
+ case 0x6641:
+ case 0x6646:
+ case 0x6647:
+ pi->powertune_defaults = &defaults_saturn_xt;
+ break;
+ case 0x67B8:
+ case 0x67B0:
+ pi->powertune_defaults = &defaults_hawaii_xt;
+ break;
+ case 0x67BA:
+ case 0x67B1:
+ pi->powertune_defaults = &defaults_hawaii_pro;
+ break;
+ case 0x67A0:
+ case 0x67A1:
+ case 0x67A2:
+ case 0x67A8:
+ case 0x67A9:
+ case 0x67AA:
+ case 0x67B9:
+ case 0x67BE:
+ pi->powertune_defaults = &defaults_bonaire_xt;
+ break;
+ }
+
+ pi->dte_tj_offset = 0;
+
+ pi->caps_power_containment = true;
+ pi->caps_cac = false;
+ pi->caps_sq_ramping = false;
+ pi->caps_db_ramping = false;
+ pi->caps_td_ramping = false;
+ pi->caps_tcp_ramping = false;
+
+ if (pi->caps_power_containment) {
+ pi->caps_cac = true;
+ if (adev->asic_type == CHIP_HAWAII)
+ pi->enable_bapm_feature = false;
+ else
+ pi->enable_bapm_feature = true;
+ pi->enable_tdc_limit_feature = true;
+ pi->enable_pkg_pwr_tracking_feature = true;
+ }
+}
+
+static u8 ci_convert_to_vid(u16 vddc)
+{
+ return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
+}
+
+static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+ u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
+ u32 i;
+
+ if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
+ return -EINVAL;
+ if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
+ return -EINVAL;
+ if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
+ adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
+ return -EINVAL;
+
+ for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
+ hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
+ hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
+ } else {
+ lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
+ hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
+ }
+ }
+ return 0;
+}
+
+static int ci_populate_vddc_vid(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u8 *vid = pi->smc_powertune_table.VddCVid;
+ u32 i;
+
+ if (pi->vddc_voltage_table.count > 8)
+ return -EINVAL;
+
+ for (i = 0; i < pi->vddc_voltage_table.count; i++)
+ vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
+
+ return 0;
+}
+
+static int ci_populate_svi_load_line(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+
+ pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
+ pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
+ pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
+ pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int ci_populate_tdc_limit(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ u16 tdc_limit;
+
+ tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
+ pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
+ pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ pt_defaults->tdc_vddc_throttle_release_limit_perc;
+ pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int ci_populate_dw8(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ int ret;
+
+ ret = amdgpu_ci_read_smc_sram_dword(adev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable) +
+ offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
+ (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
+ pi->sram_end);
+ if (ret)
+ return -EINVAL;
+ else
+ pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
+ (adev->pm.dpm.fan.fan_output_sensitivity == 0))
+ adev->pm.dpm.fan.fan_output_sensitivity =
+ adev->pm.dpm.fan.default_fan_output_sensitivity;
+
+ pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
+ cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
+
+ return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+ int i, min, max;
+
+ min = max = hi_vid[0];
+ for (i = 0; i < 8; i++) {
+ if (0 != hi_vid[i]) {
+ if (min > hi_vid[i])
+ min = hi_vid[i];
+ if (max < hi_vid[i])
+ max = hi_vid[i];
+ }
+
+ if (0 != lo_vid[i]) {
+ if (min > lo_vid[i])
+ min = lo_vid[i];
+ if (max < lo_vid[i])
+ max = lo_vid[i];
+ }
+ }
+
+ if ((min == 0) || (max == 0))
+ return -EINVAL;
+ pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
+ pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
+
+ return 0;
+}
+
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
+ u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
+ struct amdgpu_cac_tdp_table *cac_tdp_table =
+ adev->pm.dpm.dyn_state.cac_tdp_table;
+
+ hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
+ lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
+
+ pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
+ pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
+
+ return 0;
+}
+
+static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
+ struct amdgpu_cac_tdp_table *cac_tdp_table =
+ adev->pm.dpm.dyn_state.cac_tdp_table;
+ struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
+ int i, j, k;
+ const u16 *def1;
+ const u16 *def2;
+
+ dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
+ dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
+
+ dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
+ dpm_table->GpuTjMax =
+ (u8)(pi->thermal_temp_setting.temperature_high / 1000);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
+
+ if (ppm) {
+ dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
+ dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
+ } else {
+ dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
+ dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
+ }
+
+ dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
+ def1 = pt_defaults->bapmti_r;
+ def2 = pt_defaults->bapmti_rc;
+
+ for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU7_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ci_populate_pm_base(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 pm_fuse_table_offset;
+ int ret;
+
+ if (pi->caps_power_containment) {
+ ret = amdgpu_ci_read_smc_sram_dword(adev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, pi->sram_end);
+ if (ret)
+ return ret;
+ ret = ci_populate_bapm_vddc_vid_sidd(adev);
+ if (ret)
+ return ret;
+ ret = ci_populate_vddc_vid(adev);
+ if (ret)
+ return ret;
+ ret = ci_populate_svi_load_line(adev);
+ if (ret)
+ return ret;
+ ret = ci_populate_tdc_limit(adev);
+ if (ret)
+ return ret;
+ ret = ci_populate_dw8(adev);
+ if (ret)
+ return ret;
+ ret = ci_populate_fuzzy_fan(adev);
+ if (ret)
+ return ret;
+ ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
+ if (ret)
+ return ret;
+ ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
+ if (ret)
+ return ret;
+ ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
+ (u8 *)&pi->smc_powertune_table,
+ sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 data;
+
+ if (pi->caps_sq_ramping) {
+ data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
+ if (enable)
+ data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
+ }
+
+ if (pi->caps_db_ramping) {
+ data = RREG32_DIDT(ixDIDT_DB_CTRL0);
+ if (enable)
+ data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_DB_CTRL0, data);
+ }
+
+ if (pi->caps_td_ramping) {
+ data = RREG32_DIDT(ixDIDT_TD_CTRL0);
+ if (enable)
+ data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_TD_CTRL0, data);
+ }
+
+ if (pi->caps_tcp_ramping) {
+ data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
+ if (enable)
+ data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
+ }
+}
+
+static int ci_program_pt_config_registers(struct amdgpu_device *adev,
+ const struct ci_pt_config_reg *cac_config_regs)
+{
+ const struct ci_pt_config_reg *config_regs = cac_config_regs;
+ u32 data;
+ u32 cache = 0;
+
+ if (config_regs == NULL)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ } else {
+ switch (config_regs->type) {
+ case CISLANDS_CONFIGREG_SMC_IND:
+ data = RREG32_SMC(config_regs->offset);
+ break;
+ case CISLANDS_CONFIGREG_DIDT_IND:
+ data = RREG32_DIDT(config_regs->offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+
+ switch (config_regs->type) {
+ case CISLANDS_CONFIGREG_SMC_IND:
+ WREG32_SMC(config_regs->offset, data);
+ break;
+ case CISLANDS_CONFIGREG_DIDT_IND:
+ WREG32_DIDT(config_regs->offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset, data);
+ break;
+ }
+ cache = 0;
+ }
+ config_regs++;
+ }
+ return 0;
+}
+
+static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ int ret;
+
+ if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+ pi->caps_td_ramping || pi->caps_tcp_ramping) {
+ gfx_v7_0_enter_rlc_safe_mode(adev);
+
+ if (enable) {
+ ret = ci_program_pt_config_registers(adev, didt_config_ci);
+ if (ret) {
+ gfx_v7_0_exit_rlc_safe_mode(adev);
+ return ret;
+ }
+ }
+
+ ci_do_enable_didt(adev, enable);
+
+ gfx_v7_0_exit_rlc_safe_mode(adev);
+ }
+
+ return 0;
+}
+
+static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (enable) {
+ pi->power_containment_features = 0;
+ if (pi->caps_power_containment) {
+ if (pi->enable_bapm_feature) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ else
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
+ }
+
+ if (pi->enable_tdc_limit_feature) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ else
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
+ }
+
+ if (pi->enable_pkg_pwr_tracking_feature) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ } else {
+ struct amdgpu_cac_tdp_table *cac_tdp_table =
+ adev->pm.dpm.dyn_state.cac_tdp_table;
+ u32 default_pwr_limit =
+ (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+ ci_set_power_limit(adev, default_pwr_limit);
+ }
+ }
+ }
+ } else {
+ if (pi->caps_power_containment && pi->power_containment_features) {
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
+ pi->power_containment_features = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (pi->caps_cac) {
+ if (enable) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ pi->cac_enabled = false;
+ } else {
+ pi->cac_enabled = true;
+ }
+ } else if (pi->cac_enabled) {
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
+ pi->cac_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result smc_result = PPSMC_Result_OK;
+
+ if (pi->thermal_sclk_dpm_enabled) {
+ if (enable)
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
+ else
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
+ }
+
+ if (smc_result == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int ci_power_control_set_level(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_cac_tdp_table *cac_tdp_table =
+ adev->pm.dpm.dyn_state.cac_tdp_table;
+ s32 adjust_percent;
+ s32 target_tdp;
+ int ret = 0;
+ bool adjust_polarity = false; /* ??? */
+
+ if (pi->caps_power_containment) {
+ adjust_percent = adjust_polarity ?
+ adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
+ target_tdp = ((100 + adjust_percent) *
+ (s32)cac_tdp_table->configurable_tdp) / 100;
+
+ ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
+ }
+
+ return ret;
+}
+
+static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (pi->uvd_power_gated == gate)
+ return;
+
+ pi->uvd_power_gated = gate;
+
+ ci_update_uvd_dpm(adev, gate);
+}
+
+static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
+{
+ u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ u32 switch_limit = adev->mc.is_gddr5 ? 450 : 300;
+
+ if (vblank_time < switch_limit)
+ return true;
+ else
+ return false;
+
+}
+
+static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_clock_and_voltage_limits *max_limits;
+ bool disable_mclk_switching;
+ u32 sclk, mclk;
+ int i;
+
+ if (rps->vce_active) {
+ rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+ rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+ } else {
+ rps->evclk = 0;
+ rps->ecclk = 0;
+ }
+
+ if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+ ci_dpm_vblank_too_short(adev))
+ disable_mclk_switching = true;
+ else
+ disable_mclk_switching = false;
+
+ if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+ pi->battery_state = true;
+ else
+ pi->battery_state = false;
+
+ if (adev->pm.dpm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (adev->pm.dpm.ac_power == false) {
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk > max_limits->mclk)
+ ps->performance_levels[i].mclk = max_limits->mclk;
+ if (ps->performance_levels[i].sclk > max_limits->sclk)
+ ps->performance_levels[i].sclk = max_limits->sclk;
+ }
+ }
+
+ /* XXX validate the min clocks required for display */
+
+ if (disable_mclk_switching) {
+ mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
+ sclk = ps->performance_levels[0].sclk;
+ } else {
+ mclk = ps->performance_levels[0].mclk;
+ sclk = ps->performance_levels[0].sclk;
+ }
+
+ if (rps->vce_active) {
+ if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
+ sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
+ if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
+ mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
+ }
+
+ ps->performance_levels[0].sclk = sclk;
+ ps->performance_levels[0].mclk = mclk;
+
+ if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
+ ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
+
+ if (disable_mclk_switching) {
+ if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
+ ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
+ } else {
+ if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
+ ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
+ }
+}
+
+static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+ u32 tmp;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ tmp = RREG32_SMC(ixCG_THERMAL_INT);
+ tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
+ tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
+ ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
+ WREG32_SMC(ixCG_THERMAL_INT, tmp);
+
+#if 0
+ /* XXX: need to figure out how to handle this properly */
+ tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
+ tmp &= DIG_THERM_DPM_MASK;
+ tmp |= DIG_THERM_DPM(high_temp / 1000);
+ WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
+#endif
+
+ adev->pm.dpm.thermal.min_temp = low_temp;
+ adev->pm.dpm.thermal.max_temp = high_temp;
+ return 0;
+}
+
+static int ci_thermal_enable_alert(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+ PPSMC_Result result;
+
+ if (enable) {
+ thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
+ CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
+ WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
+ result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ } else {
+ thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
+ CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+ WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
+ result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 tmp;
+
+ if (pi->fan_ctrl_is_in_default_mode) {
+ tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
+ >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ pi->fan_ctrl_default_mode = tmp;
+ tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
+ >> CG_FDO_CTRL2__TMIN__SHIFT;
+ pi->t_min = tmp;
+ pi->fan_ctrl_is_in_default_mode = false;
+ }
+
+ tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+
+ tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ u32 duty100;
+ u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ u16 fdo_min, slope1, slope2;
+ u32 reference_clock, tmp;
+ int ret;
+ u64 tmp64;
+
+ if (!pi->fan_table_start) {
+ adev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
+ >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+
+ if (duty100 == 0) {
+ adev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (u16)tmp64;
+
+ t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
+ t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
+
+ pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
+ pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
+
+ slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = amdgpu_asic_get_xclk(adev);
+
+ fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((u16)duty100);
+
+ tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
+ >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
+ fan_table.TempSrc = (uint8_t)tmp;
+
+ ret = amdgpu_ci_copy_bytes_to_smc(adev,
+ pi->fan_table_start,
+ (u8 *)(&fan_table),
+ sizeof(fan_table),
+ pi->sram_end);
+
+ if (ret) {
+ DRM_ERROR("Failed to load fan table to the SMC.");
+ adev->pm.dpm.fan.ucode_fan_control = false;
+ }
+
+ return 0;
+}
+
+static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result ret;
+
+ if (pi->caps_od_fuzzy_fan_control_support) {
+ ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_StartFanControl,
+ FAN_CONTROL_FUZZY);
+ if (ret != PPSMC_Result_OK)
+ return -EINVAL;
+ ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SetFanPwmMax,
+ adev->pm.dpm.fan.default_max_fan_pwm);
+ if (ret != PPSMC_Result_OK)
+ return -EINVAL;
+ } else {
+ ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_StartFanControl,
+ FAN_CONTROL_TABLE);
+ if (ret != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ pi->fan_is_controlled_by_smc = true;
+ return 0;
+}
+
+
+static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
+{
+ PPSMC_Result ret;
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
+ if (ret == PPSMC_Result_OK) {
+ pi->fan_is_controlled_by_smc = false;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+ u32 *speed)
+{
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
+ >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+ duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
+ >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)duty * 100;
+ do_div(tmp64, duty100);
+ *speed = (u32)tmp64;
+
+ if (*speed > 100)
+ *speed = 100;
+
+ return 0;
+}
+
+static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+ u32 speed)
+{
+ u32 tmp;
+ u32 duty, duty100;
+ u64 tmp64;
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ if (pi->fan_is_controlled_by_smc)
+ return -EINVAL;
+
+ if (speed > 100)
+ return -EINVAL;
+
+ duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
+ >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (u32)tmp64;
+
+ tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
+ tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
+ WREG32_SMC(ixCG_FDO_CTRL0, tmp);
+
+ return 0;
+}
+
+static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+{
+ if (mode) {
+ /* stop auto-manage */
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ ci_fan_ctrl_stop_smc_fan_control(adev);
+ ci_fan_ctrl_set_static_mode(adev, mode);
+ } else {
+ /* restart auto-manage */
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ ci_thermal_start_smc_fan_control(adev);
+ else
+ ci_fan_ctrl_set_default_mode(adev);
+ }
+}
+
+static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 tmp;
+
+ if (pi->fan_is_controlled_by_smc)
+ return 0;
+
+ tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
+}
+
+#if 0
+static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
+ u32 *speed)
+{
+ u32 tach_period;
+ u32 xclk = amdgpu_asic_get_xclk(adev);
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ if (adev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
+ >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
+ if (tach_period == 0)
+ return -ENOENT;
+
+ *speed = 60 * xclk * 10000 / tach_period;
+
+ return 0;
+}
+
+static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
+ u32 speed)
+{
+ u32 tach_period, tmp;
+ u32 xclk = amdgpu_asic_get_xclk(adev);
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ if (adev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ if ((speed < adev->pm.fan_min_rpm) ||
+ (speed > adev->pm.fan_max_rpm))
+ return -EINVAL;
+
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ ci_fan_ctrl_stop_smc_fan_control(adev);
+
+ tach_period = 60 * xclk * 10000 / (8 * speed);
+ tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
+ tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
+ WREG32_SMC(CG_TACH_CTRL, tmp);
+
+ ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
+
+ return 0;
+}
+#endif
+
+static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 tmp;
+
+ if (!pi->fan_ctrl_is_in_default_mode) {
+ tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+
+ tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+ pi->fan_ctrl_is_in_default_mode = true;
+ }
+}
+
+static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm.fan.ucode_fan_control) {
+ ci_fan_ctrl_start_smc_fan_control(adev);
+ ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
+ }
+}
+
+static void ci_thermal_initialize(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ if (adev->pm.fan_pulses_per_revolution) {
+ tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
+ tmp |= (adev->pm.fan_pulses_per_revolution - 1)
+ << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
+ WREG32_SMC(ixCG_TACH_CTRL, tmp);
+ }
+
+ tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
+ tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
+ WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ci_thermal_initialize(adev);
+ ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = ci_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+ if (adev->pm.dpm.fan.ucode_fan_control) {
+ ret = ci_thermal_setup_fan_table(adev);
+ if (ret)
+ return ret;
+ ci_thermal_start_smc_fan_control(adev);
+ }
+
+ return 0;
+}
+
+static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
+{
+ if (!adev->pm.no_fan)
+ ci_fan_ctrl_set_default_mode(adev);
+}
+
+#if 0
+static int ci_read_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 *value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ return amdgpu_ci_read_smc_sram_dword(adev,
+ pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+#endif
+
+static int ci_write_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ return amdgpu_ci_write_smc_sram_dword(adev,
+ pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+
+static void ci_init_fps_limits(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+
+ if (pi->caps_fps) {
+ u16 tmp;
+
+ tmp = 45;
+ table->FpsHighT = cpu_to_be16(tmp);
+
+ tmp = 30;
+ table->FpsLowT = cpu_to_be16(tmp);
+ }
+}
+
+static int ci_update_sclk_t(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ int ret = 0;
+ u32 low_sclk_interrupt_t = 0;
+
+ if (pi->caps_sclk_throttle_low_notification) {
+ low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+ ret = amdgpu_ci_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
+ (u8 *)&low_sclk_interrupt_t,
+ sizeof(u32), pi->sram_end);
+
+ }
+
+ return ret;
+}
+
+static void ci_get_leakage_voltages(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u16 leakage_id, virtual_voltage_id;
+ u16 vddc, vddci;
+ int i;
+
+ pi->vddc_leakage.count = 0;
+ pi->vddci_leakage.count = 0;
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
+ continue;
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+ pi->vddc_leakage.count++;
+ }
+ }
+ } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
+ virtual_voltage_id,
+ leakage_id) == 0) {
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+ pi->vddc_leakage.count++;
+ }
+ if (vddci != 0 && vddci != virtual_voltage_id) {
+ pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
+ pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
+ pi->vddci_leakage.count++;
+ }
+ }
+ }
+ }
+}
+
+static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ bool want_thermal_protection;
+ enum amdgpu_dpm_event_src dpm_event_src;
+ u32 tmp;
+
+ switch (sources) {
+ case 0:
+ default:
+ want_thermal_protection = false;
+ break;
+ case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ want_thermal_protection = true;
+ dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+ break;
+ case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ want_thermal_protection = true;
+ dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+ break;
+ case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+ want_thermal_protection = true;
+ dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+ break;
+ }
+
+ if (want_thermal_protection) {
+#if 0
+ /* XXX: need to figure out how to handle this properly */
+ tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
+ tmp &= DPM_EVENT_SRC_MASK;
+ tmp |= DPM_EVENT_SRC(dpm_event_src);
+ WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
+#endif
+
+ tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+ if (pi->thermal_protection)
+ tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+ else
+ tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+ } else {
+ tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+ tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+ }
+}
+
+static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
+ enum amdgpu_dpm_auto_throttle_src source,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (enable) {
+ if (!(pi->active_auto_throttle_sources & (1 << source))) {
+ pi->active_auto_throttle_sources |= 1 << source;
+ ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+ }
+ } else {
+ if (pi->active_auto_throttle_sources & (1 << source)) {
+ pi->active_auto_throttle_sources &= ~(1 << source);
+ ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+ }
+ }
+}
+
+static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
+}
+
+static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result smc_result;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((!pi->sclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if ((!pi->mclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ pi->need_update_smu7_dpm_table = 0;
+ return 0;
+}
+
+static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result smc_result;
+
+ if (enable) {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
+ ~MC_SEQ_CNTL_3__CAC_EN_MASK);
+
+ WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
+ WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
+ WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
+
+ udelay(10);
+
+ WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
+ WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
+ WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
+ }
+ } else {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ci_start_dpm(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result smc_result;
+ int ret;
+ u32 tmp;
+
+ tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+ tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+
+ tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+ tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
+ WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+
+ ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
+
+ WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
+
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ ret = ci_enable_sclk_mclk_dpm(adev, true);
+ if (ret)
+ return ret;
+
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result smc_result;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((!pi->sclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if ((!pi->mclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_stop_dpm(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result smc_result;
+ int ret;
+ u32 tmp;
+
+ tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+ tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+
+ tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+ tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
+ WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ ret = ci_enable_sclk_mclk_dpm(adev, false);
+ if (ret)
+ return ret;
+
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+
+ if (enable)
+ tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
+ else
+ tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
+ WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+}
+
+#if 0
+static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
+ bool ac_power)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_cac_tdp_table *cac_tdp_table =
+ adev->pm.dpm.dyn_state.cac_tdp_table;
+ u32 power_limit;
+
+ if (ac_power)
+ power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+ else
+ power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
+
+ ci_set_power_limit(adev, power_limit);
+
+ if (pi->caps_automatic_dc_transition) {
+ if (ac_power)
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
+ else
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
+ }
+
+ return 0;
+}
+#endif
+
+static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+ PPSMC_Msg msg, u32 parameter)
+{
+ WREG32(mmSMC_MSG_ARG_0, parameter);
+ return amdgpu_ci_send_msg_to_smc(adev, msg);
+}
+
+static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
+ PPSMC_Msg msg, u32 *parameter)
+{
+ PPSMC_Result smc_result;
+
+ smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
+
+ if ((smc_result == PPSMC_Result_OK) && parameter)
+ *parameter = RREG32(mmSMC_MSG_ARG_0);
+
+ return smc_result;
+}
+
+static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (!pi->sclk_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (!pi->mclk_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (!pi->pcie_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ PPSMC_Result smc_result =
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
+ u32 target_tdp)
+{
+ PPSMC_Result smc_result =
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ return 0;
+}
+
+#if 0
+static int ci_set_boot_state(struct amdgpu_device *adev)
+{
+ return ci_enable_sclk_mclk_dpm(adev, false);
+}
+#endif
+
+static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
+{
+ u32 sclk_freq;
+ PPSMC_Result smc_result =
+ amdgpu_ci_send_msg_to_smc_return_parameter(adev,
+ PPSMC_MSG_API_GetSclkFrequency,
+ &sclk_freq);
+ if (smc_result != PPSMC_Result_OK)
+ sclk_freq = 0;
+
+ return sclk_freq;
+}
+
+static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
+{
+ u32 mclk_freq;
+ PPSMC_Result smc_result =
+ amdgpu_ci_send_msg_to_smc_return_parameter(adev,
+ PPSMC_MSG_API_GetMclkFrequency,
+ &mclk_freq);
+ if (smc_result != PPSMC_Result_OK)
+ mclk_freq = 0;
+
+ return mclk_freq;
+}
+
+static void ci_dpm_start_smc(struct amdgpu_device *adev)
+{
+ int i;
+
+ amdgpu_ci_program_jump_on_start(adev);
+ amdgpu_ci_start_smc_clock(adev);
+ amdgpu_ci_start_smc(adev);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+ break;
+ }
+}
+
+static void ci_dpm_stop_smc(struct amdgpu_device *adev)
+{
+ amdgpu_ci_reset_smc(adev);
+ amdgpu_ci_stop_smc_clock(adev);
+}
+
+static int ci_process_firmware_header(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_ci_read_smc_sram_dword(adev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->dpm_table_start = tmp;
+
+ ret = amdgpu_ci_read_smc_sram_dword(adev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->soft_regs_start = tmp;
+
+ ret = amdgpu_ci_read_smc_sram_dword(adev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcRegisterTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->mc_reg_table_start = tmp;
+
+ ret = amdgpu_ci_read_smc_sram_dword(adev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, FanTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->fan_table_start = tmp;
+
+ ret = amdgpu_ci_read_smc_sram_dword(adev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->arb_table_start = tmp;
+
+ return 0;
+}
+
+static void ci_read_clock_registers(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ pi->clock_registers.cg_spll_func_cntl =
+ RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
+ pi->clock_registers.cg_spll_func_cntl_2 =
+ RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
+ pi->clock_registers.cg_spll_func_cntl_3 =
+ RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
+ pi->clock_registers.cg_spll_func_cntl_4 =
+ RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
+ pi->clock_registers.cg_spll_spread_spectrum =
+ RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
+ pi->clock_registers.cg_spll_spread_spectrum_2 =
+ RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
+ pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
+ pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
+ pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
+ pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
+ pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
+ pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
+ pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
+ pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
+ pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
+}
+
+static void ci_init_sclk_t(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ pi->low_sclk_interrupt_t = 0;
+}
+
+static void ci_enable_thermal_protection(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+
+ if (enable)
+ tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+ else
+ tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+}
+
+static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+
+ tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
+
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+}
+
+#if 0
+static int ci_enter_ulp_state(struct amdgpu_device *adev)
+{
+
+ WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+ udelay(25000);
+
+ return 0;
+}
+
+static int ci_exit_ulp_state(struct amdgpu_device *adev)
+{
+ int i;
+
+ WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+ udelay(7000);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(mmSMC_RESP_0) == 1)
+ break;
+ udelay(1000);
+ }
+
+ return 0;
+}
+#endif
+
+static int ci_notify_smc_display_change(struct amdgpu_device *adev,
+ bool has_display)
+{
+ PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+ return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
+}
+
+static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (enable) {
+ if (pi->caps_sclk_ds) {
+ if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
+ return -EINVAL;
+ } else {
+ if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ } else {
+ if (pi->caps_sclk_ds) {
+ if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void ci_program_display_gap(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
+ u32 pre_vbi_time_in_us;
+ u32 frame_time_in_us;
+ u32 ref_clock = adev->clock.spll.reference_freq;
+ u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
+ u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+
+ tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
+ if (adev->pm.dpm.new_active_crtc_count > 0)
+ tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
+ else
+ tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
+ WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
+
+ if (refresh_rate == 0)
+ refresh_rate = 60;
+ if (vblank_time == 0xffffffff)
+ vblank_time = 500;
+ frame_time_in_us = 1000000 / refresh_rate;
+ pre_vbi_time_in_us =
+ frame_time_in_us - 200 - vblank_time;
+ tmp = pre_vbi_time_in_us * (ref_clock / 100);
+
+ WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
+ ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
+ ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
+
+ ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
+
+}
+
+static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 tmp;
+
+ if (enable) {
+ if (pi->caps_sclk_ss_support) {
+ tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+ tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+ }
+ } else {
+ tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
+ tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
+ WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
+
+ tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+ tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+ }
+}
+
+static void ci_program_sstp(struct amdgpu_device *adev)
+{
+ WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
+ ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
+ (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
+}
+
+static void ci_enable_display_gap(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
+
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
+ CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
+ tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
+ (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
+
+ WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void ci_program_vc(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+ tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
+ WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
+}
+
+static void ci_clear_vc(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+ tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
+ WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
+}
+
+static int ci_upload_firmware(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ int i, ret;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
+ break;
+ }
+ WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
+
+ amdgpu_ci_stop_smc_clock(adev);
+ amdgpu_ci_reset_smc(adev);
+
+ ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
+
+ return ret;
+
+}
+
+static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
+ struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+static int ci_construct_voltage_tables(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ int ret;
+
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
+ ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
+ &pi->vddc_voltage_table);
+
+ if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(adev,
+ &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
+ ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
+ &pi->vddci_voltage_table);
+
+ if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->mvdd_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(adev,
+ &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ &pi->mvdd_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
+ ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
+ &pi->mvdd_voltage_table);
+
+ return 0;
+}
+
+static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
+ struct atom_voltage_table_entry *voltage_table,
+ SMU7_Discrete_VoltageLevel *smc_voltage_table)
+{
+ int ret;
+
+ ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
+ &smc_voltage_table->StdVoltageHiSidd,
+ &smc_voltage_table->StdVoltageLoSidd);
+
+ if (ret) {
+ smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
+ smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
+ smc_voltage_table->StdVoltageHiSidd =
+ cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
+ smc_voltage_table->StdVoltageLoSidd =
+ cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
+}
+
+static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ unsigned int count;
+
+ table->VddcLevelCount = pi->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ ci_populate_smc_voltage_table(adev,
+ &pi->vddc_voltage_table.entries[count],
+ &table->VddcLevel[count]);
+
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->VddcLevel[count].Smio |=
+ pi->vddc_voltage_table.entries[count].smio_low;
+ else
+ table->VddcLevel[count].Smio = 0;
+ }
+ table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ table->VddciLevelCount = pi->vddci_voltage_table.count;
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ ci_populate_smc_voltage_table(adev,
+ &pi->vddci_voltage_table.entries[count],
+ &table->VddciLevel[count]);
+
+ if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->VddciLevel[count].Smio |=
+ pi->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio = 0;
+ }
+ table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ unsigned int count;
+
+ table->MvddLevelCount = pi->mvdd_voltage_table.count;
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ ci_populate_smc_voltage_table(adev,
+ &pi->mvdd_voltage_table.entries[count],
+ &table->MvddLevel[count]);
+
+ if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->MvddLevel[count].Smio |=
+ pi->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio = 0;
+ }
+ table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ int ret;
+
+ ret = ci_populate_smc_vddc_table(adev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_vddci_table(adev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_mvdd_table(adev, table);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
+ SMU7_Discrete_VoltageLevel *voltage)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 i = 0;
+
+ if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
+ if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
+ voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
+ struct atom_voltage_table_entry *voltage_table,
+ u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
+{
+ u16 v_index, idx;
+ bool voltage_found = false;
+ *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
+ *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
+
+ if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+ return -EINVAL;
+
+ if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+ for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (voltage_table->value ==
+ adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+ idx = v_index;
+ else
+ idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+ *std_voltage_lo_sidd =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+ *std_voltage_hi_sidd =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+ break;
+ }
+ }
+
+ if (!voltage_found) {
+ for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (voltage_table->value <=
+ adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+ idx = v_index;
+ else
+ idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+ *std_voltage_lo_sidd =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+ *std_voltage_hi_sidd =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
+ const struct amdgpu_phase_shedding_limits_table *limits,
+ u32 sclk,
+ u32 *phase_shedding)
+{
+ unsigned int i;
+
+ *phase_shedding = 1;
+
+ for (i = 0; i < limits->count; i++) {
+ if (sclk < limits->entries[i].sclk) {
+ *phase_shedding = i;
+ break;
+ }
+ }
+}
+
+static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
+ const struct amdgpu_phase_shedding_limits_table *limits,
+ u32 mclk,
+ u32 *phase_shedding)
+{
+ unsigned int i;
+
+ *phase_shedding = 1;
+
+ for (i = 0; i < limits->count; i++) {
+ if (mclk < limits->entries[i].mclk) {
+ *phase_shedding = i;
+ break;
+ }
+ }
+}
+
+static int ci_init_arb_table_index(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+ return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
+ tmp, pi->sram_end);
+}
+
+static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
+ struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ u32 clock, u32 *voltage)
+{
+ u32 i = 0;
+
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *voltage = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ *voltage = allowed_clock_voltage_table->entries[i-1].v;
+
+ return 0;
+}
+
+static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
+ u32 sclk, u32 min_sclk_in_sr)
+{
+ u32 i;
+ u32 tmp;
+ u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
+ min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
+
+ if (sclk < min)
+ return 0;
+
+ for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ tmp = sclk / (1 << i);
+ if (tmp >= min || i == 0)
+ break;
+ }
+
+ return (u8)i;
+}
+
+static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
+{
+ return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int ci_reset_to_default(struct amdgpu_device *adev)
+{
+ return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
+ const u32 engine_clock,
+ const u32 memory_clock,
+ u32 *dram_timimg2)
+{
+ bool patch;
+ u32 tmp, tmp2;
+
+ tmp = RREG32(mmMC_SEQ_MISC0);
+ patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+ if (patch &&
+ ((adev->pdev->device == 0x67B0) ||
+ (adev->pdev->device == 0x67B1))) {
+ if ((memory_clock > 100000) && (memory_clock <= 125000)) {
+ tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
+ *dram_timimg2 &= ~0x00ff0000;
+ *dram_timimg2 |= tmp2 << 16;
+ } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
+ tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
+ *dram_timimg2 &= ~0x00ff0000;
+ *dram_timimg2 |= tmp2 << 16;
+ }
+ }
+}
+
+static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
+ u32 sclk,
+ u32 mclk,
+ SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ u32 dram_timing;
+ u32 dram_timing2;
+ u32 burst_time;
+
+ amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
+
+ dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
+ burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
+
+ ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
+
+ arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
+ arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
+ arb_regs->McArbBurstTime = (u8)burst_time;
+
+ return 0;
+}
+
+static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ SMU7_Discrete_MCArbDramTimingTable arb_regs;
+ u32 i, j;
+ int ret = 0;
+
+ memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
+ ret = ci_populate_memory_timing_parameters(adev,
+ pi->dpm_table.sclk_table.dpm_levels[i].value,
+ pi->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (ret)
+ break;
+ }
+ }
+
+ if (ret == 0)
+ ret = amdgpu_ci_copy_bytes_to_smc(adev,
+ pi->arb_table_start,
+ (u8 *)&arb_regs,
+ sizeof(SMU7_Discrete_MCArbDramTimingTable),
+ pi->sram_end);
+
+ return ret;
+}
+
+static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (pi->need_update_smu7_dpm_table == 0)
+ return 0;
+
+ return ci_do_program_memory_timing_parameters(adev);
+}
+
+static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_boot_state)
+{
+ struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 level = 0;
+
+ for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
+ if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
+ boot_state->performance_levels[0].sclk) {
+ pi->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
+ if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
+ boot_state->performance_levels[0].mclk) {
+ pi->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+}
+
+static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
+{
+ u32 i;
+ u32 mask_value = 0;
+
+ for (i = dpm_table->count; i > 0; i--) {
+ mask_value = mask_value << 1;
+ if (dpm_table->dpm_levels[i-1].enabled)
+ mask_value |= 0x1;
+ else
+ mask_value &= 0xFFFFFFFE;
+ }
+
+ return mask_value;
+}
+
+static void ci_populate_smc_link_level(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 i;
+
+ for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].DownT = cpu_to_be32(5);
+ table->LinkLevel[i].UpT = cpu_to_be32(30);
+ }
+
+ pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+}
+
+static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->UvdLevelCount =
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency =
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency =
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
+ table->UvdLevel[count].MinVddc =
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->UvdLevel[count].MinVddcPhases = 1;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->UvdLevel[count].VclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->UvdLevel[count].DclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
+
+ table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
+ table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
+ table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
+ }
+
+ return ret;
+}
+
+static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->VceLevelCount =
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency =
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
+ table->VceLevel[count].MinVoltage =
+ (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->VceLevel[count].MinPhases = 1;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->VceLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->VceLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
+ table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
+ }
+
+ return ret;
+
+}
+
+static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->AcpLevelCount = (u8)
+ (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency =
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
+ table->AcpLevel[count].MinVoltage =
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
+ table->AcpLevel[count].MinPhases = 1;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->AcpLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->AcpLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
+ table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
+ }
+
+ return ret;
+}
+
+static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->SamuLevelCount =
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ table->SamuLevel[count].Frequency =
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
+ table->SamuLevel[count].MinVoltage =
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->SamuLevel[count].MinPhases = 1;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->SamuLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->SamuLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
+ table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
+ }
+
+ return ret;
+}
+
+static int ci_calculate_mclk_params(struct amdgpu_device *adev,
+ u32 memory_clock,
+ SMU7_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dll_state_on)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 dll_cntl = pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
+ u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
+ u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
+ u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
+ u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
+ u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
+ struct atom_mpll_param mpll_param;
+ int ret;
+
+ ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
+ if (ret)
+ return ret;
+
+ mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
+ mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
+
+ mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
+ MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
+ mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
+ (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
+ (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
+
+ mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
+ mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
+
+ if (adev->mc.is_gddr5) {
+ mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
+ MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
+ mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
+ (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
+ }
+
+ if (pi->caps_mclk_ss_support) {
+ struct amdgpu_atom_ss ss;
+ u32 freq_nom;
+ u32 tmp;
+ u32 reference_clock = adev->clock.mpll.reference_freq;
+
+ if (mpll_param.qdr == 1)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
+
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+ if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+ u32 clks = reference_clock * 5 / ss.rate;
+ u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
+ mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
+
+ mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
+ mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
+ }
+ }
+
+ mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
+ mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
+
+ if (dll_state_on)
+ mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
+ MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
+ else
+ mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
+ MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
+
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static int ci_populate_single_memory_level(struct amdgpu_device *adev,
+ u32 memory_clock,
+ SMU7_Discrete_MemoryLevel *memory_level)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ int ret;
+ bool dll_state_on;
+
+ if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ memory_clock, &memory_level->MinVddc);
+ if (ret)
+ return ret;
+ }
+
+ if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(adev,
+ &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ memory_clock, &memory_level->MinVddci);
+ if (ret)
+ return ret;
+ }
+
+ if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(adev,
+ &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ memory_clock, &memory_level->MinMvdd);
+ if (ret)
+ return ret;
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (pi->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_mclk(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ memory_clock,
+ &memory_level->MinVddcPhases);
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 1;
+ memory_level->UpH = 0;
+ memory_level->DownH = 100;
+ memory_level->VoltageDownH = 0;
+ memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
+
+ memory_level->StutterEnable = false;
+ memory_level->StrobeEnable = false;
+ memory_level->EdcReadEnable = false;
+ memory_level->EdcWriteEnable = false;
+ memory_level->RttEnable = false;
+
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (pi->mclk_stutter_mode_threshold &&
+ (memory_clock <= pi->mclk_stutter_mode_threshold) &&
+ (pi->uvd_enabled == false) &&
+ (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
+ (adev->pm.dpm.new_active_crtc_count <= 2))
+ memory_level->StutterEnable = true;
+
+ if (pi->mclk_strobe_mode_threshold &&
+ (memory_clock <= pi->mclk_strobe_mode_threshold))
+ memory_level->StrobeEnable = 1;
+
+ if (adev->mc.is_gddr5) {
+ memory_level->StrobeRatio =
+ ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
+ if (pi->mclk_edc_enable_threshold &&
+ (memory_clock > pi->mclk_edc_enable_threshold))
+ memory_level->EdcReadEnable = true;
+
+ if (pi->mclk_edc_wr_enable_threshold &&
+ (memory_clock > pi->mclk_edc_wr_enable_threshold))
+ memory_level->EdcWriteEnable = true;
+
+ if (memory_level->StrobeEnable) {
+ if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
+ ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ else
+ dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+ } else {
+ dll_state_on = pi->dll_default_on;
+ }
+ } else {
+ memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ }
+
+ ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+ if (ret)
+ return ret;
+
+ memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
+ memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
+ memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
+
+ memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
+ memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
+ memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
+ memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
+ memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
+ memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
+ memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
+ memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
+ memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
+ memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
+ memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
+
+ return 0;
+}
+
+static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct atom_clock_dividers dividers;
+ SMU7_Discrete_VoltageLevel voltage_level;
+ u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
+ u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
+ u32 dll_cntl = pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+ int ret;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (pi->acpi_vddc)
+ table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
+
+ table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+ table->ACPILevel.SclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->ACPILevel.SclkDid = (u8)dividers.post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
+ spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
+
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
+ table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
+ table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
+ table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
+ table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
+ table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
+ table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
+ table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
+ table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
+ table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
+ table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
+ table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
+ table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
+ table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
+
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ if (pi->acpi_vddci)
+ table->MemoryACPILevel.MinVddci =
+ cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci =
+ cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
+ }
+
+ if (ci_populate_mvdd_value(adev, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd = 0;
+ else
+ table->MemoryACPILevel.MinMvdd =
+ cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
+
+ mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
+ MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
+ mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
+ MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
+
+ dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
+
+ table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
+ table->MemoryACPILevel.MpllFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
+ table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
+ table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpH = 0;
+ table->MemoryACPILevel.DownH = 100;
+ table->MemoryACPILevel.VoltageDownH = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ cpu_to_be16((u16)pi->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = false;
+ table->MemoryACPILevel.StrobeEnable = false;
+ table->MemoryACPILevel.EdcReadEnable = false;
+ table->MemoryACPILevel.EdcWriteEnable = false;
+ table->MemoryACPILevel.RttEnable = false;
+
+ return 0;
+}
+
+
+static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ulv_parm *ulv = &pi->ulv;
+
+ if (ulv->supported) {
+ if (enable)
+ return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ else
+ return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_populate_ulv_level(struct amdgpu_device *adev,
+ SMU7_Discrete_Ulv *state)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ if (ulv_voltage == 0) {
+ pi->ulv.supported = false;
+ return 0;
+ }
+
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+ state->VddcOffset = 0;
+ else
+ state->VddcOffset =
+ adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
+ } else {
+ if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+ state->VddcOffsetVid = 0;
+ else
+ state->VddcOffsetVid = (u8)
+ ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
+
+ state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
+ state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
+ state->VddcOffset = cpu_to_be16(state->VddcOffset);
+
+ return 0;
+}
+
+static int ci_calculate_sclk_params(struct amdgpu_device *adev,
+ u32 engine_clock,
+ SMU7_Discrete_GraphicsLevel *sclk)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct atom_clock_dividers dividers;
+ u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
+ u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+ u32 reference_clock = adev->clock.spll.reference_freq;
+ u32 reference_divider;
+ u32 fbdiv;
+ int ret;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+ engine_clock, false, &dividers);
+ if (ret)
+ return ret;
+
+ reference_divider = 1 + dividers.ref_div;
+ fbdiv = dividers.fb_div & 0x3FFFFFF;
+
+ spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
+ spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
+
+ if (pi->caps_sclk_ss_support) {
+ struct amdgpu_atom_ss ss;
+ u32 vco_freq = engine_clock * dividers.post_div;
+
+ if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+ u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+ u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
+ cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
+ cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
+
+ cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
+ cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (u8)dividers.post_divider;
+
+ return 0;
+}
+
+static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
+ u32 engine_clock,
+ u16 sclk_activity_level_t,
+ SMU7_Discrete_GraphicsLevel *graphic_level)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ int ret;
+
+ ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
+ if (ret)
+ return ret;
+
+ ret = ci_get_dependency_volt_by_clk(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ engine_clock, &graphic_level->MinVddc);
+ if (ret)
+ return ret;
+
+ graphic_level->SclkFrequency = engine_clock;
+
+ graphic_level->Flags = 0;
+ graphic_level->MinVddcPhases = 1;
+
+ if (pi->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_sclk(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ engine_clock,
+ &graphic_level->MinVddcPhases);
+
+ graphic_level->ActivityLevel = sclk_activity_level_t;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpH = 0;
+ graphic_level->DownH = 0;
+ graphic_level->VoltageDownH = 0;
+ graphic_level->PowerThrottle = 0;
+
+ if (pi->caps_sclk_ds)
+ graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev,
+ engine_clock,
+ CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
+ graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
+ graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
+ graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
+ graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
+ graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
+ graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
+ graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
+ graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
+ graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
+ graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
+ graphic_level->EnabledForActivity = 1;
+
+ return 0;
+}
+
+static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 level_array_address = pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
+ u32 i, ret;
+
+ memset(levels, 0, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ ret = ci_populate_single_graphic_level(adev,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (u16)pi->activity_target[i],
+ &pi->smc_state_table.GraphicsLevel[i]);
+ if (ret)
+ return ret;
+ if (i > 1)
+ pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ if (i == (dpm_table->sclk_table.count - 1))
+ pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+
+ pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
+ (u8 *)levels, level_array_size,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ci_populate_ulv_state(struct amdgpu_device *adev,
+ SMU7_Discrete_Ulv *ulv_level)
+{
+ return ci_populate_ulv_level(adev, ulv_level);
+}
+
+static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 level_array_address = pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+ u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
+ SMU7_MAX_LEVELS_MEMORY;
+ SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
+ u32 i, ret;
+
+ memset(levels, 0, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ if (dpm_table->mclk_table.dpm_levels[i].value == 0)
+ return -EINVAL;
+ ret = ci_populate_single_memory_level(adev,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &pi->smc_state_table.MemoryLevel[i]);
+ if (ret)
+ return ret;
+ }
+
+ if ((dpm_table->mclk_table.count >= 2) &&
+ ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
+ pi->smc_state_table.MemoryLevel[1].MinVddc =
+ pi->smc_state_table.MemoryLevel[0].MinVddc;
+ pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
+ pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
+ }
+
+ pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
+
+ pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
+ (u8 *)levels, level_array_size,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
+ struct ci_single_dpm_table* dpm_table,
+ u32 count)
+{
+ u32 i;
+
+ dpm_table->count = count;
+ for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
+ dpm_table->dpm_levels[i].enabled = false;
+}
+
+static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
+ u32 index, u32 pcie_gen, u32 pcie_lanes)
+{
+ dpm_table->dpm_levels[index].value = pcie_gen;
+ dpm_table->dpm_levels[index].param1 = pcie_lanes;
+ dpm_table->dpm_levels[index].enabled = true;
+}
+
+static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
+ return -EINVAL;
+
+ if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
+ pi->pcie_gen_powersaving = pi->pcie_gen_performance;
+ pi->pcie_lane_powersaving = pi->pcie_lane_performance;
+ } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
+ pi->pcie_gen_performance = pi->pcie_gen_powersaving;
+ pi->pcie_lane_performance = pi->pcie_lane_powersaving;
+ }
+
+ ci_reset_single_dpm_table(adev,
+ &pi->dpm_table.pcie_speed_table,
+ SMU7_MAX_LEVELS_LINK);
+
+ if (adev->asic_type == CHIP_BONAIRE)
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.max);
+ else
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.min);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
+ pi->pcie_gen_performance.min,
+ pi->pcie_lane_performance.min);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
+ pi->pcie_gen_performance.min,
+ pi->pcie_lane_performance.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
+ pi->pcie_gen_powersaving.max,
+ pi->pcie_lane_powersaving.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
+ pi->pcie_gen_performance.max,
+ pi->pcie_lane_performance.max);
+
+ pi->dpm_table.pcie_speed_table.count = 6;
+
+ return 0;
+}
+
+static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+ struct amdgpu_cac_leakage_table *std_voltage_table =
+ &adev->pm.dpm.dyn_state.cac_leakage_table;
+ u32 i;
+
+ if (allowed_sclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_sclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_table->count < 1)
+ return -EINVAL;
+
+ memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
+
+ ci_reset_single_dpm_table(adev,
+ &pi->dpm_table.sclk_table,
+ SMU7_MAX_LEVELS_GRAPHICS);
+ ci_reset_single_dpm_table(adev,
+ &pi->dpm_table.mclk_table,
+ SMU7_MAX_LEVELS_MEMORY);
+ ci_reset_single_dpm_table(adev,
+ &pi->dpm_table.vddc_table,
+ SMU7_MAX_LEVELS_VDDC);
+ ci_reset_single_dpm_table(adev,
+ &pi->dpm_table.vddci_table,
+ SMU7_MAX_LEVELS_VDDCI);
+ ci_reset_single_dpm_table(adev,
+ &pi->dpm_table.mvdd_table,
+ SMU7_MAX_LEVELS_MVDD);
+
+ pi->dpm_table.sclk_table.count = 0;
+ for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+ if ((i == 0) ||
+ (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
+ allowed_sclk_vddc_table->entries[i].clk)) {
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
+ allowed_sclk_vddc_table->entries[i].clk;
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
+ (i == 0) ? true : false;
+ pi->dpm_table.sclk_table.count++;
+ }
+ }
+
+ pi->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ if ((i == 0) ||
+ (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
+ allowed_mclk_table->entries[i].clk)) {
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
+ allowed_mclk_table->entries[i].clk;
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
+ (i == 0) ? true : false;
+ pi->dpm_table.mclk_table.count++;
+ }
+ }
+
+ for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+ pi->dpm_table.vddc_table.dpm_levels[i].value =
+ allowed_sclk_vddc_table->entries[i].v;
+ pi->dpm_table.vddc_table.dpm_levels[i].param1 =
+ std_voltage_table->entries[i].leakage;
+ pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
+
+ allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+ if (allowed_mclk_table) {
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.vddci_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
+ }
+
+ allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
+ if (allowed_mclk_table) {
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.mvdd_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
+ }
+
+ ci_setup_default_pcie_tables(adev);
+
+ return 0;
+}
+
+static int ci_find_boot_level(struct ci_single_dpm_table *table,
+ u32 value, u32 *boot_level)
+{
+ u32 i;
+ int ret = -EINVAL;
+
+ for(i = 0; i < table->count; i++) {
+ if (value == table->dpm_levels[i].value) {
+ *boot_level = i;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_init_smc_table(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ulv_parm *ulv = &pi->ulv;
+ struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
+ SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+ int ret;
+
+ ret = ci_setup_default_dpm_tables(adev);
+ if (ret)
+ return ret;
+
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
+ ci_populate_smc_voltage_tables(adev, table);
+
+ ci_init_fps_limits(adev);
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (adev->mc.is_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (ulv->supported) {
+ ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
+ if (ret)
+ return ret;
+ WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+ }
+
+ ret = ci_populate_all_graphic_levels(adev);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_all_memory_levels(adev);
+ if (ret)
+ return ret;
+
+ ci_populate_smc_link_level(adev, table);
+
+ ret = ci_populate_smc_acpi_level(adev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_vce_level(adev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_acp_level(adev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_samu_level(adev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_do_program_memory_timing_parameters(adev);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_uvd_level(adev, table);
+ if (ret)
+ return ret;
+
+ table->UvdBootLevel = 0;
+ table->VceBootLevel = 0;
+ table->AcpBootLevel = 0;
+ table->SamuBootLevel = 0;
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
+ pi->vbios_boot_state.sclk_bootup_value,
+ (u32 *)&pi->smc_state_table.GraphicsBootLevel);
+
+ ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
+ pi->vbios_boot_state.mclk_bootup_value,
+ (u32 *)&pi->smc_state_table.MemoryBootLevel);
+
+ table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
+ table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
+ table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
+
+ ci_populate_smc_initial_state(adev, amdgpu_boot_state);
+
+ ret = ci_populate_bapm_parameters_in_dpm_table(adev);
+ if (ret)
+ return ret;
+
+ table->UVDInterval = 1;
+ table->VCEInterval = 1;
+ table->ACPInterval = 1;
+ table->SAMUInterval = 1;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
+ CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+ table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
+ CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->VddcVddciDelta = 4000;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
+ table->PCIeGenInterval = 1;
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
+ table->SVI2Enable = 1;
+ else
+ table->SVI2Enable = 0;
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ table->SystemFlags = cpu_to_be32(table->SystemFlags);
+ table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
+ table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
+ table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
+ table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
+ table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
+ table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
+ table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
+ table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
+ table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
+ table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
+ table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
+
+ ret = amdgpu_ci_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+ (u8 *)&table->SystemFlags,
+ sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
+ struct ci_single_dpm_table *dpm_table,
+ u32 low_limit, u32 high_limit)
+{
+ u32 i;
+
+ for (i = 0; i < dpm_table->count; i++) {
+ if ((dpm_table->dpm_levels[i].value < low_limit) ||
+ (dpm_table->dpm_levels[i].value > high_limit))
+ dpm_table->dpm_levels[i].enabled = false;
+ else
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+}
+
+static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
+ u32 speed_low, u32 lanes_low,
+ u32 speed_high, u32 lanes_high)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+ u32 i, j;
+
+ for (i = 0; i < pcie_table->count; i++) {
+ if ((pcie_table->dpm_levels[i].value < speed_low) ||
+ (pcie_table->dpm_levels[i].param1 < lanes_low) ||
+ (pcie_table->dpm_levels[i].value > speed_high) ||
+ (pcie_table->dpm_levels[i].param1 > lanes_high))
+ pcie_table->dpm_levels[i].enabled = false;
+ else
+ pcie_table->dpm_levels[i].enabled = true;
+ }
+
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_table->dpm_levels[i].enabled) {
+ for (j = i + 1; j < pcie_table->count; j++) {
+ if (pcie_table->dpm_levels[j].enabled) {
+ if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
+ (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
+ pcie_table->dpm_levels[j].enabled = false;
+ }
+ }
+ }
+ }
+}
+
+static int ci_trim_dpm_states(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct ci_ps *state = ci_get_ps(amdgpu_state);
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 high_limit_count;
+
+ if (state->performance_level_count < 1)
+ return -EINVAL;
+
+ if (state->performance_level_count == 1)
+ high_limit_count = 0;
+ else
+ high_limit_count = 1;
+
+ ci_trim_single_dpm_states(adev,
+ &pi->dpm_table.sclk_table,
+ state->performance_levels[0].sclk,
+ state->performance_levels[high_limit_count].sclk);
+
+ ci_trim_single_dpm_states(adev,
+ &pi->dpm_table.mclk_table,
+ state->performance_levels[0].mclk,
+ state->performance_levels[high_limit_count].mclk);
+
+ ci_trim_pcie_dpm_states(adev,
+ state->performance_levels[0].pcie_gen,
+ state->performance_levels[0].pcie_lane,
+ state->performance_levels[high_limit_count].pcie_gen,
+ state->performance_levels[high_limit_count].pcie_lane);
+
+ return 0;
+}
+
+static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
+{
+ struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
+ struct amdgpu_clock_voltage_dependency_table *vddc_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 requested_voltage = 0;
+ u32 i;
+
+ if (disp_voltage_table == NULL)
+ return -EINVAL;
+ if (!disp_voltage_table->count)
+ return -EINVAL;
+
+ for (i = 0; i < disp_voltage_table->count; i++) {
+ if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
+ requested_voltage = disp_voltage_table->entries[i].v;
+ }
+
+ for (i = 0; i < vddc_table->count; i++) {
+ if (requested_voltage <= vddc_table->entries[i].v) {
+ requested_voltage = vddc_table->entries[i].v;
+ return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_VddC_Request,
+ requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ PPSMC_Result result;
+
+ ci_apply_disp_minimum_voltage_request(adev);
+
+ if (!pi->sclk_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+#if 0
+ if (!pi->pcie_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_PCIeDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *state = ci_get_ps(amdgpu_state);
+ struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+ u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+ struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+ u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+ u32 i;
+
+ pi->need_update_smu7_dpm_table = 0;
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= sclk_table->count) {
+ pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ } else {
+ /* XXX check display min clock requirements */
+ if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+ }
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (mclk == mclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= mclk_table->count)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+ if (adev->pm.dpm.current_active_crtc_count !=
+ adev->pm.dpm.new_active_crtc_count)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+}
+
+static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *state = ci_get_ps(amdgpu_state);
+ u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+ u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ int ret;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
+ dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
+
+ if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
+ dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
+
+ if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
+ ret = ci_populate_all_graphic_levels(adev);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
+ ret = ci_populate_all_memory_levels(adev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ const struct amdgpu_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (adev->pm.dpm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+ for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_uvd_dpm)
+ break;
+ }
+ }
+
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+ if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ pi->uvd_enabled = true;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+ } else {
+ if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ pi->uvd_enabled = false;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+ }
+
+ return (amdgpu_ci_send_msg_to_smc(adev, enable ?
+ PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ const struct amdgpu_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (adev->pm.dpm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+ for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_vce_dpm)
+ break;
+ }
+ }
+
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask);
+ }
+
+ return (amdgpu_ci_send_msg_to_smc(adev, enable ?
+ PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+#if 0
+static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ const struct amdgpu_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (adev->pm.dpm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
+ for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_samu_dpm)
+ break;
+ }
+ }
+
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask);
+ }
+ return (amdgpu_ci_send_msg_to_smc(adev, enable ?
+ PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ const struct amdgpu_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (adev->pm.dpm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
+ for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_acp_dpm)
+ break;
+ }
+ }
+
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask);
+ }
+
+ return (amdgpu_ci_send_msg_to_smc(adev, enable ?
+ PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+#endif
+
+static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 tmp;
+
+ if (!gate) {
+ if (pi->caps_uvd_dpm ||
+ (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
+ pi->smc_state_table.UvdBootLevel = 0;
+ else
+ pi->smc_state_table.UvdBootLevel =
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
+
+ tmp = RREG32_SMC(ixDPM_TABLE_475);
+ tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
+ tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
+ WREG32_SMC(ixDPM_TABLE_475, tmp);
+ }
+
+ return ci_enable_uvd_dpm(adev, !gate);
+}
+
+static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
+{
+ u8 i;
+ u32 min_evclk = 30000; /* ??? */
+ struct amdgpu_vce_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].evclk >= min_evclk)
+ return i;
+ }
+
+ return table->count - 1;
+}
+
+static int ci_update_vce_dpm(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ int ret = 0;
+ u32 tmp;
+
+ if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
+ if (amdgpu_new_state->evclk) {
+ /* turn the clocks on when encoding */
+ ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE,
+ AMDGPU_CG_STATE_UNGATE);
+ if (ret)
+ return ret;
+
+ pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
+ tmp = RREG32_SMC(ixDPM_TABLE_475);
+ tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
+ tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
+ WREG32_SMC(ixDPM_TABLE_475, tmp);
+
+ ret = ci_enable_vce_dpm(adev, true);
+ } else {
+ /* turn the clocks off when not encoding */
+ ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE,
+ AMDGPU_CG_STATE_GATE);
+ if (ret)
+ return ret;
+
+ ret = ci_enable_vce_dpm(adev, false);
+ }
+ }
+ return ret;
+}
+
+#if 0
+static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
+{
+ return ci_enable_samu_dpm(adev, gate);
+}
+
+static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 tmp;
+
+ if (!gate) {
+ pi->smc_state_table.AcpBootLevel = 0;
+
+ tmp = RREG32_SMC(ixDPM_TABLE_475);
+ tmp &= ~AcpBootLevel_MASK;
+ tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
+ WREG32_SMC(ixDPM_TABLE_475, tmp);
+ }
+
+ return ci_enable_acp_dpm(adev, !gate);
+}
+#endif
+
+static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ int ret;
+
+ ret = ci_trim_dpm_states(adev, amdgpu_state);
+ if (ret)
+ return ret;
+
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
+ pi->last_mclk_dpm_enable_mask =
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ if (pi->uvd_enabled) {
+ if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+ }
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
+
+ return 0;
+}
+
+static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
+ u32 level_mask)
+{
+ u32 level = 0;
+
+ while ((level_mask & (1 << level)) == 0)
+ level++;
+
+ return level;
+}
+
+
+static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
+ enum amdgpu_dpm_forced_level level)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 tmp, levels, i;
+ int ret;
+
+ if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_pcie(adev, level);
+ if (ret)
+ return ret;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_sclk(adev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_mclk(adev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_pcie(adev, level);
+ if (ret)
+ return ret;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(adev,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ ret = ci_dpm_force_state_sclk(adev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(adev,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ ret = ci_dpm_force_state_mclk(adev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(adev,
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ ret = ci_dpm_force_state_pcie(adev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+ if (!pi->pcie_dpm_key_disabled) {
+ PPSMC_Result smc_result;
+
+ smc_result = amdgpu_ci_send_msg_to_smc(adev,
+ PPSMC_MSG_PCIeDPM_UnForceLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ ret = ci_upload_dpm_level_enable_mask(adev);
+ if (ret)
+ return ret;
+ }
+
+ adev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+static int ci_set_mc_special_registers(struct amdgpu_device *adev,
+ struct ci_mc_reg_table *table)
+{
+ u8 i, j, k;
+ u32 temp_reg;
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ switch(table->mc_reg_address[i].s1) {
+ case mmMC_SEQ_MISC1:
+ temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ temp_reg = RREG32(mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ if (!adev->mc.is_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ if (!adev->mc.is_gddr5) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ }
+ break;
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+ bool result = true;
+
+ switch(in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static void ci_set_valid_flag(struct ci_mc_reg_table *table)
+{
+ u8 i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->valid_flag |= 1 << i;
+ break;
+ }
+ }
+ }
+}
+
+static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
+{
+ u32 i;
+ u16 address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+ address : table->mc_reg_address[i].s1;
+ }
+}
+
+static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
+ struct ci_mc_reg_table *ci_table)
+{
+ u8 i, j;
+
+ if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+ return -EINVAL;
+
+ for (i = 0; i < table->last; i++)
+ ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ci_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ci_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++)
+ ci_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ ci_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
+ struct ci_mc_reg_table *table)
+{
+ u8 i, k;
+ u32 tmp;
+ bool patch;
+
+ tmp = RREG32(mmMC_SEQ_MISC0);
+ patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+ if (patch &&
+ ((adev->pdev->device == 0x67B0) ||
+ (adev->pdev->device == 0x67B1))) {
+ for (i = 0; i < table->last; i++) {
+ if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ switch (table->mc_reg_address[i].s1) {
+ case mmMC_SEQ_MISC1:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
+ 0x00000007;
+ }
+ break;
+ case mmMC_SEQ_WR_CTL_D0:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+ 0x0000D0DD;
+ }
+ break;
+ case mmMC_SEQ_WR_CTL_D1:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+ 0x0000D0DD;
+ }
+ break;
+ case mmMC_SEQ_WR_CTL_2:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] = 0;
+ }
+ break;
+ case mmMC_SEQ_CAS_TIMING:
+ for (k = 0; k < table->num_entries; k++) {
+ if (table->mc_reg_table_entry[k].mclk_max == 125000)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+ 0x000C0140;
+ else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+ 0x000C0150;
+ }
+ break;
+ case mmMC_SEQ_MISC_TIMING:
+ for (k = 0; k < table->num_entries; k++) {
+ if (table->mc_reg_table_entry[k].mclk_max == 125000)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+ 0x00000030;
+ else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+ 0x00000035;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
+ tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
+ tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
+ WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
+ WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
+ }
+
+ return 0;
+}
+
+static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct atom_mc_reg_table *table;
+ struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
+ u8 module_index = ci_get_memory_module_index(adev);
+ int ret;
+
+ table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
+ WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
+ WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
+ WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
+ WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
+ WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
+ WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
+ WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
+ WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
+ WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
+ WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
+ WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
+ WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
+ WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
+ WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
+ WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
+ WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
+ WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
+ WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
+ WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
+
+ ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
+ if (ret)
+ goto init_mc_done;
+
+ ret = ci_copy_vbios_mc_reg_table(table, ci_table);
+ if (ret)
+ goto init_mc_done;
+
+ ci_set_s0_mc_reg_index(ci_table);
+
+ ret = ci_register_patching_mc_seq(adev, ci_table);
+ if (ret)
+ goto init_mc_done;
+
+ ret = ci_set_mc_special_registers(adev, ci_table);
+ if (ret)
+ goto init_mc_done;
+
+ ci_set_valid_flag(ci_table);
+
+init_mc_done:
+ kfree(table);
+
+ return ret;
+}
+
+static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 i, j;
+
+ for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
+ if (pi->mc_reg_table.valid_flag & (1 << j)) {
+ if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (u8)i;
+
+ return 0;
+}
+
+static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
+ SMU7_Discrete_MCRegisterSet *data,
+ u32 num_entries, u32 valid_flag)
+{
+ u32 i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & (1 << j)) {
+ data->value[i] = cpu_to_be32(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
+ const u32 memory_clock,
+ SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 i = 0;
+
+ for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
+ if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+ break;
+ }
+
+ if ((i == pi->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, pi->mc_reg_table.last,
+ pi->mc_reg_table.valid_flag);
+}
+
+static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ u32 i;
+
+ for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
+ ci_convert_mc_reg_table_entry_to_smc(adev,
+ pi->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_reg_table->data[i]);
+}
+
+static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ int ret;
+
+ memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+ ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
+ if (ret)
+ return ret;
+ ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
+
+ return amdgpu_ci_copy_bytes_to_smc(adev,
+ pi->mc_reg_table_start,
+ (u8 *)&pi->smc_mc_reg_table,
+ sizeof(SMU7_Discrete_MCRegisters),
+ pi->sram_end);
+}
+
+static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+ memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+ ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
+
+ return amdgpu_ci_copy_bytes_to_smc(adev,
+ pi->mc_reg_table_start +
+ offsetof(SMU7_Discrete_MCRegisters, data[0]),
+ (u8 *)&pi->smc_mc_reg_table.data[0],
+ sizeof(SMU7_Discrete_MCRegisterSet) *
+ pi->dpm_table.mclk_table.count,
+ pi->sram_end);
+}
+
+static void ci_enable_voltage_control(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+
+ tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+}
+
+static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct ci_ps *state = ci_get_ps(amdgpu_state);
+ int i;
+ u16 pcie_speed, max_speed = 0;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ pcie_speed = state->performance_levels[i].pcie_gen;
+ if (max_speed < pcie_speed)
+ max_speed = pcie_speed;
+ }
+
+ return max_speed;
+}
+
+static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
+{
+ u32 speed_cntl = 0;
+
+ speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+
+ return (u16)speed_cntl;
+}
+
+static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
+{
+ u32 link_width = 0;
+
+ link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
+ link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+
+ switch (link_width) {
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ case 3:
+ return 4;
+ case 4:
+ return 8;
+ case 0:
+ case 6:
+ default:
+ return 16;
+ }
+}
+
+static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ enum amdgpu_pcie_gen target_link_speed =
+ ci_get_maximum_link_speed(adev, amdgpu_new_state);
+ enum amdgpu_pcie_gen current_link_speed;
+
+ if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+ current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
+ else
+ current_link_speed = pi->force_pcie_gen;
+
+ pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ pi->pspp_notify_required = false;
+ if (target_link_speed > current_link_speed) {
+ switch (target_link_speed) {
+#ifdef CONFIG_ACPI
+ case AMDGPU_PCIE_GEN3:
+ if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+ break;
+ pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
+ if (current_link_speed == AMDGPU_PCIE_GEN2)
+ break;
+ case AMDGPU_PCIE_GEN2:
+ if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+ break;
+#endif
+ default:
+ pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
+ break;
+ }
+ } else {
+ if (target_link_speed < current_link_speed)
+ pi->pspp_notify_required = true;
+ }
+}
+
+static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ enum amdgpu_pcie_gen target_link_speed =
+ ci_get_maximum_link_speed(adev, amdgpu_new_state);
+ u8 request;
+
+ if (pi->pspp_notify_required) {
+ if (target_link_speed == AMDGPU_PCIE_GEN3)
+ request = PCIE_PERF_REQ_PECI_GEN3;
+ else if (target_link_speed == AMDGPU_PCIE_GEN2)
+ request = PCIE_PERF_REQ_PECI_GEN2;
+ else
+ request = PCIE_PERF_REQ_PECI_GEN1;
+
+ if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+ (ci_get_current_pcie_speed(adev) > 0))
+ return;
+
+#ifdef CONFIG_ACPI
+ amdgpu_acpi_pcie_performance_request(adev, request, false);
+#endif
+ }
+}
+
+static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
+ &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+
+ if (allowed_sclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_sclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_vddci_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_vddci_table->count < 1)
+ return -EINVAL;
+
+ pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
+ pi->max_vddc_in_pp_table =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+ pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
+ pi->max_vddci_in_pp_table =
+ allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
+ allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
+ allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+ return 0;
+}
+
+static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
+ u32 leakage_index;
+
+ for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+ if (leakage_table->leakage_id[leakage_index] == *vddc) {
+ *vddc = leakage_table->actual_voltage[leakage_index];
+ break;
+ }
+ }
+}
+
+static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
+ u32 leakage_index;
+
+ for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+ if (leakage_table->leakage_id[leakage_index] == *vddci) {
+ *vddci = leakage_table->actual_voltage[leakage_index];
+ break;
+ }
+ }
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
+ struct amdgpu_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
+ struct amdgpu_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
+ struct amdgpu_vce_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
+ struct amdgpu_uvd_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
+ struct amdgpu_phase_shedding_limits_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
+ }
+}
+
+static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
+ struct amdgpu_clock_and_voltage_limits *table)
+{
+ if (table) {
+ ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
+ ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
+ }
+}
+
+static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
+ struct amdgpu_cac_leakage_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
+ }
+}
+
+static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
+{
+
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
+ ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+ ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
+ ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
+ ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
+ ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+ ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
+ ci_patch_cac_leakage_table_with_vddc_leakage(adev,
+ &adev->pm.dpm.dyn_state.cac_leakage_table);
+
+}
+
+static void ci_update_current_ps(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct ci_ps *new_ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ pi->current_rps = *rps;
+ pi->current_ps = *new_ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void ci_update_requested_ps(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct ci_ps *new_ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ pi->requested_rps = *rps;
+ pi->requested_ps = *new_ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+ struct amdgpu_ps *new_ps = &requested_ps;
+
+ ci_update_requested_ps(adev, new_ps);
+
+ ci_apply_state_adjust_rules(adev, &pi->requested_rps);
+
+ return 0;
+}
+
+static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_ps *new_ps = &pi->requested_rps;
+
+ ci_update_current_ps(adev, new_ps);
+}
+
+
+static void ci_dpm_setup_asic(struct amdgpu_device *adev)
+{
+ ci_read_clock_registers(adev);
+ ci_enable_acpi_power_management(adev);
+ ci_init_sclk_t(adev);
+}
+
+static int ci_dpm_enable(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+ int ret;
+
+ if (amdgpu_ci_is_smc_running(adev))
+ return -EINVAL;
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ ci_enable_voltage_control(adev);
+ ret = ci_construct_voltage_tables(adev);
+ if (ret) {
+ DRM_ERROR("ci_construct_voltage_tables failed\n");
+ return ret;
+ }
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_initialize_mc_reg_table(adev);
+ if (ret)
+ pi->caps_dynamic_ac_timing = false;
+ }
+ if (pi->dynamic_ss)
+ ci_enable_spread_spectrum(adev, true);
+ if (pi->thermal_protection)
+ ci_enable_thermal_protection(adev, true);
+ ci_program_sstp(adev);
+ ci_enable_display_gap(adev);
+ ci_program_vc(adev);
+ ret = ci_upload_firmware(adev);
+ if (ret) {
+ DRM_ERROR("ci_upload_firmware failed\n");
+ return ret;
+ }
+ ret = ci_process_firmware_header(adev);
+ if (ret) {
+ DRM_ERROR("ci_process_firmware_header failed\n");
+ return ret;
+ }
+ ret = ci_initial_switch_from_arb_f0_to_f1(adev);
+ if (ret) {
+ DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
+ return ret;
+ }
+ ret = ci_init_smc_table(adev);
+ if (ret) {
+ DRM_ERROR("ci_init_smc_table failed\n");
+ return ret;
+ }
+ ret = ci_init_arb_table_index(adev);
+ if (ret) {
+ DRM_ERROR("ci_init_arb_table_index failed\n");
+ return ret;
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_populate_initial_mc_reg_table(adev);
+ if (ret) {
+ DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = ci_populate_pm_base(adev);
+ if (ret) {
+ DRM_ERROR("ci_populate_pm_base failed\n");
+ return ret;
+ }
+ ci_dpm_start_smc(adev);
+ ci_enable_vr_hot_gpio_interrupt(adev);
+ ret = ci_notify_smc_display_change(adev, false);
+ if (ret) {
+ DRM_ERROR("ci_notify_smc_display_change failed\n");
+ return ret;
+ }
+ ci_enable_sclk_control(adev, true);
+ ret = ci_enable_ulv(adev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_ulv failed\n");
+ return ret;
+ }
+ ret = ci_enable_ds_master_switch(adev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_ds_master_switch failed\n");
+ return ret;
+ }
+ ret = ci_start_dpm(adev);
+ if (ret) {
+ DRM_ERROR("ci_start_dpm failed\n");
+ return ret;
+ }
+ ret = ci_enable_didt(adev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_didt failed\n");
+ return ret;
+ }
+ ret = ci_enable_smc_cac(adev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_smc_cac failed\n");
+ return ret;
+ }
+ ret = ci_enable_power_containment(adev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_power_containment failed\n");
+ return ret;
+ }
+
+ ret = ci_power_control_set_level(adev);
+ if (ret) {
+ DRM_ERROR("ci_power_control_set_level failed\n");
+ return ret;
+ }
+
+ ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ ret = ci_enable_thermal_based_sclk_dpm(adev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
+ return ret;
+ }
+
+ ci_thermal_start_thermal_controller(adev);
+
+ ci_update_current_ps(adev, boot_ps);
+
+ if (adev->irq.installed &&
+ amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
+#if 0
+ PPSMC_Result result;
+#endif
+ ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
+ CISLANDS_TEMP_RANGE_MAX);
+ if (ret) {
+ DRM_ERROR("ci_thermal_set_temperature_range failed\n");
+ return ret;
+ }
+ amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+
+#if 0
+ result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+
+ if (result != PPSMC_Result_OK)
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+#endif
+ }
+
+ return 0;
+}
+
+static void ci_dpm_disable(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+
+ ci_dpm_powergate_uvd(adev, false);
+
+ if (!amdgpu_ci_is_smc_running(adev))
+ return;
+
+ ci_thermal_stop_thermal_controller(adev);
+
+ if (pi->thermal_protection)
+ ci_enable_thermal_protection(adev, false);
+ ci_enable_power_containment(adev, false);
+ ci_enable_smc_cac(adev, false);
+ ci_enable_didt(adev, false);
+ ci_enable_spread_spectrum(adev, false);
+ ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+ ci_stop_dpm(adev);
+ ci_enable_ds_master_switch(adev, false);
+ ci_enable_ulv(adev, false);
+ ci_clear_vc(adev);
+ ci_reset_to_default(adev);
+ ci_dpm_stop_smc(adev);
+ ci_force_switch_to_arb_f0(adev);
+ ci_enable_thermal_based_sclk_dpm(adev, false);
+
+ ci_update_current_ps(adev, boot_ps);
+}
+
+static int ci_dpm_set_power_state(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_ps *new_ps = &pi->requested_rps;
+ struct amdgpu_ps *old_ps = &pi->current_rps;
+ int ret;
+
+ ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
+ if (pi->pcie_performance_request)
+ ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
+ ret = ci_freeze_sclk_mclk_dpm(adev);
+ if (ret) {
+ DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
+ return ret;
+ }
+ ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
+ return ret;
+ }
+ ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
+ return ret;
+ }
+
+ ret = ci_update_vce_dpm(adev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("ci_update_vce_dpm failed\n");
+ return ret;
+ }
+
+ ret = ci_update_sclk_t(adev);
+ if (ret) {
+ DRM_ERROR("ci_update_sclk_t failed\n");
+ return ret;
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_update_and_upload_mc_reg_table(adev);
+ if (ret) {
+ DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = ci_program_memory_timing_parameters(adev);
+ if (ret) {
+ DRM_ERROR("ci_program_memory_timing_parameters failed\n");
+ return ret;
+ }
+ ret = ci_unfreeze_sclk_mclk_dpm(adev);
+ if (ret) {
+ DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
+ return ret;
+ }
+ ret = ci_upload_dpm_level_enable_mask(adev);
+ if (ret) {
+ DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
+ return ret;
+ }
+ if (pi->pcie_performance_request)
+ ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
+
+ return 0;
+}
+
+#if 0
+static void ci_dpm_reset_asic(struct amdgpu_device *adev)
+{
+ ci_set_boot_state(adev);
+}
+#endif
+
+static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+ ci_program_display_gap(adev);
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+ struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+ struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+ adev->pm.dpm.boot_ps = rps;
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ adev->pm.dpm.uvd_ps = rps;
+}
+
+static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_pl *pl = &ps->performance_levels[index];
+
+ ps->performance_level_count = index + 1;
+
+ pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+ pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
+ pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+ pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+
+ pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
+ pi->sys_pcie_mask,
+ pi->vbios_boot_state.pcie_gen_bootup_value,
+ clock_info->ci.ucPCIEGen);
+ pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
+ pi->vbios_boot_state.pcie_lane_bootup_value,
+ le16_to_cpu(clock_info->ci.usPCIELane));
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+ pi->acpi_pcie_gen = pl->pcie_gen;
+ }
+
+ if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
+ pi->ulv.supported = true;
+ pi->ulv.pl = *pl;
+ pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
+ }
+
+ /* patch up boot state */
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
+ pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
+ pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
+ pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
+ }
+
+ switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ pi->use_pcie_powersaving_levels = true;
+ if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
+ pi->pcie_gen_powersaving.max = pl->pcie_gen;
+ if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
+ pi->pcie_gen_powersaving.min = pl->pcie_gen;
+ if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
+ pi->pcie_lane_powersaving.max = pl->pcie_lane;
+ if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
+ pi->pcie_lane_powersaving.min = pl->pcie_lane;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ pi->use_pcie_performance_levels = true;
+ if (pi->pcie_gen_performance.max < pl->pcie_gen)
+ pi->pcie_gen_performance.max = pl->pcie_gen;
+ if (pi->pcie_gen_performance.min > pl->pcie_gen)
+ pi->pcie_gen_performance.min = pl->pcie_gen;
+ if (pi->pcie_lane_performance.max < pl->pcie_lane)
+ pi->pcie_lane_performance.max = pl->pcie_lane;
+ if (pi->pcie_lane_performance.min > pl->pcie_lane)
+ pi->pcie_lane_performance.min = pl->pcie_lane;
+ break;
+ default:
+ break;
+ }
+}
+
+static int ci_parse_power_table(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct ci_ps *ps;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ amdgpu_add_thermal_controller(adev);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!adev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(adev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.ps[i].ps_priv = ps;
+ ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ ci_parse_pplib_clock_info(adev,
+ &adev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+ /* fill in the vce power states */
+ for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ u32 sclk, mclk;
+ clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+ sclk |= clock_info->ci.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+ mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+ adev->pm.dpm.vce_states[i].sclk = sclk;
+ adev->pm.dpm.vce_states[i].mclk = mclk;
+ }
+
+ return 0;
+}
+
+static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
+ struct ci_vbios_boot_state *boot_state)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
+ data_offset);
+ boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
+ boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
+ boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
+ boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
+ boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
+ boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
+ boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ci_dpm_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+ kfree(adev->pm.dpm.ps[i].ps_priv);
+ }
+ kfree(adev->pm.dpm.ps);
+ kfree(adev->pm.dpm.priv);
+ kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+ amdgpu_free_extended_power_table(adev);
+}
+
+/**
+ * ci_dpm_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int ci_dpm_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ chip_name = "bonaire";
+ break;
+ case CHIP_HAWAII:
+ chip_name = "hawaii";
+ break;
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+ if (err) {
+ printk(KERN_ERR
+ "cik_smc: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ }
+ return err;
+}
+
+static int ci_dpm_init(struct amdgpu_device *adev)
+{
+ int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+ SMU7_Discrete_DpmTable *dpm_table;
+ struct amdgpu_gpio_rec gpio;
+ u16 data_offset, size;
+ u8 frev, crev;
+ struct ci_power_info *pi;
+ int ret;
+ u32 mask;
+
+ ret = ci_dpm_init_microcode(adev);
+ if (ret)
+ return ret;
+
+ pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
+ if (pi == NULL)
+ return -ENOMEM;
+ adev->pm.dpm.priv = pi;
+
+ ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+ if (ret)
+ pi->sys_pcie_mask = 0;
+ else
+ pi->sys_pcie_mask = mask;
+ pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+
+ pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
+ pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
+ pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
+ pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
+
+ pi->pcie_lane_performance.max = 0;
+ pi->pcie_lane_performance.min = 16;
+ pi->pcie_lane_powersaving.max = 0;
+ pi->pcie_lane_powersaving.min = 16;
+
+ ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
+ if (ret) {
+ ci_dpm_fini(adev);
+ return ret;
+ }
+
+ ret = amdgpu_get_platform_caps(adev);
+ if (ret) {
+ ci_dpm_fini(adev);
+ return ret;
+ }
+
+ ret = amdgpu_parse_extended_power_table(adev);
+ if (ret) {
+ ci_dpm_fini(adev);
+ return ret;
+ }
+
+ ret = ci_parse_power_table(adev);
+ if (ret) {
+ ci_dpm_fini(adev);
+ return ret;
+ }
+
+ pi->dll_default_on = false;
+ pi->sram_end = SMC_RAM_END;
+
+ pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
+
+ pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
+
+ pi->sclk_dpm_key_disabled = 0;
+ pi->mclk_dpm_key_disabled = 0;
+ pi->pcie_dpm_key_disabled = 0;
+ pi->thermal_sclk_dpm_enabled = 0;
+
+ pi->caps_sclk_ds = true;
+
+ pi->mclk_strobe_mode_threshold = 40000;
+ pi->mclk_stutter_mode_threshold = 40000;
+ pi->mclk_edc_enable_threshold = 40000;
+ pi->mclk_edc_wr_enable_threshold = 40000;
+
+ ci_initialize_powertune_defaults(adev);
+
+ pi->caps_fps = false;
+
+ pi->caps_sclk_throttle_low_notification = false;
+
+ pi->caps_uvd_dpm = true;
+ pi->caps_vce_dpm = true;
+
+ ci_get_leakage_voltages(adev);
+ ci_patch_dependency_tables_with_leakage(adev);
+ ci_set_private_data_variables_based_on_pptable(adev);
+
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+ kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+ ci_dpm_fini(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+ adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+ adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+ adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+
+ adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+ adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+ adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+ adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+ if (adev->asic_type == CHIP_HAWAII) {
+ pi->thermal_temp_setting.temperature_low = 94500;
+ pi->thermal_temp_setting.temperature_high = 95000;
+ pi->thermal_temp_setting.temperature_shutdown = 104000;
+ } else {
+ pi->thermal_temp_setting.temperature_low = 99500;
+ pi->thermal_temp_setting.temperature_high = 100000;
+ pi->thermal_temp_setting.temperature_shutdown = 104000;
+ }
+
+ pi->uvd_enabled = false;
+
+ dpm_table = &pi->smc_state_table;
+
+ gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
+ if (gpio.valid) {
+ dpm_table->VRHotGpio = gpio.shift;
+ adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+ } else {
+ dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
+ adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+ }
+
+ gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
+ if (gpio.valid) {
+ dpm_table->AcDcGpio = gpio.shift;
+ adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+ } else {
+ dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
+ adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+ }
+
+ gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
+ if (gpio.valid) {
+ u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
+
+ switch (gpio.shift) {
+ case 0:
+ tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
+ tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
+ break;
+ case 1:
+ tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
+ tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
+ break;
+ case 2:
+ tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
+ break;
+ case 3:
+ tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
+ break;
+ case 4:
+ tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
+ break;
+ default:
+ DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+ break;
+ }
+ WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
+ }
+
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
+ if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+ else
+ adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
+ }
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
+ if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+ else
+ adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
+ }
+
+ pi->vddc_phase_shed_control = true;
+
+#if defined(CONFIG_ACPI)
+ pi->pcie_performance_request =
+ amdgpu_acpi_is_pcie_performance_request_supported(adev);
+#else
+ pi->pcie_performance_request = false;
+#endif
+
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ pi->caps_sclk_ss_support = true;
+ pi->caps_mclk_ss_support = true;
+ pi->dynamic_ss = true;
+ } else {
+ pi->caps_sclk_ss_support = false;
+ pi->caps_mclk_ss_support = false;
+ pi->dynamic_ss = true;
+ }
+
+ if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+ pi->thermal_protection = true;
+ else
+ pi->thermal_protection = false;
+
+ pi->caps_dynamic_ac_timing = true;
+
+ pi->uvd_power_gated = false;
+
+ /* make sure dc limits are valid */
+ if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ pi->fan_ctrl_is_in_default_mode = true;
+
+ return 0;
+}
+
+static void
+ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ struct seq_file *m)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct amdgpu_ps *rps = &pi->current_rps;
+ u32 sclk = ci_get_average_sclk_freq(adev);
+ u32 mclk = ci_get_average_mclk_freq(adev);
+
+ seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
+ seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
+ seq_printf(m, "power level avg sclk: %u mclk: %u\n",
+ sclk, mclk);
+}
+
+static void ci_dpm_print_power_state(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_pl *pl;
+ int i;
+
+ amdgpu_dpm_print_class_info(rps->class, rps->class2);
+ amdgpu_dpm_print_cap_info(rps->caps);
+ printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->performance_level_count; i++) {
+ pl = &ps->performance_levels[i];
+ printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
+ i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
+ }
+ amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].sclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].mclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
+
+/* get temperature in millidegrees */
+static int ci_dpm_get_temp(struct amdgpu_device *adev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
+
+ if (temp & 0x200)
+ actual_temp = 255;
+ else
+ actual_temp = temp & 0x1ff;
+
+ actual_temp = actual_temp * 1000;
+
+ return actual_temp;
+}
+
+static int ci_set_temperature_range(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = ci_thermal_enable_alert(adev, false);
+ if (ret)
+ return ret;
+ ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
+ CISLANDS_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = ci_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+static int ci_dpm_early_init(struct amdgpu_device *adev)
+{
+ ci_dpm_set_dpm_funcs(adev);
+ ci_dpm_set_irq_funcs(adev);
+
+ return 0;
+}
+
+static int ci_dpm_late_init(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (!amdgpu_dpm)
+ return 0;
+
+ ret = ci_set_temperature_range(adev);
+ if (ret)
+ return ret;
+
+ ci_dpm_powergate_uvd(adev, true);
+
+ return 0;
+}
+
+static int ci_dpm_sw_init(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+ /* default to balanced state */
+ adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+ adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+ adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+ adev->pm.default_sclk = adev->clock.default_sclk;
+ adev->pm.default_mclk = adev->clock.default_mclk;
+ adev->pm.current_sclk = adev->clock.default_sclk;
+ adev->pm.current_mclk = adev->clock.default_mclk;
+ adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+ if (amdgpu_dpm == 0)
+ return 0;
+
+ INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
+ mutex_lock(&adev->pm.mutex);
+ ret = ci_dpm_init(adev);
+ if (ret)
+ goto dpm_failed;
+ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ if (amdgpu_dpm == 1)
+ amdgpu_pm_print_power_states(adev);
+ ret = amdgpu_pm_sysfs_init(adev);
+ if (ret)
+ goto dpm_failed;
+ mutex_unlock(&adev->pm.mutex);
+ DRM_INFO("amdgpu: dpm initialized\n");
+
+ return 0;
+
+dpm_failed:
+ ci_dpm_fini(adev);
+ mutex_unlock(&adev->pm.mutex);
+ DRM_ERROR("amdgpu: dpm initialization failed\n");
+ return ret;
+}
+
+static int ci_dpm_sw_fini(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_pm_sysfs_fini(adev);
+ ci_dpm_fini(adev);
+ mutex_unlock(&adev->pm.mutex);
+
+ return 0;
+}
+
+static int ci_dpm_hw_init(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (!amdgpu_dpm)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ci_dpm_setup_asic(adev);
+ ret = ci_dpm_enable(adev);
+ if (ret)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+static int ci_dpm_hw_fini(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ ci_dpm_disable(adev);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return 0;
+}
+
+static int ci_dpm_suspend(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ /* disable dpm */
+ ci_dpm_disable(adev);
+ /* reset the power state */
+ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
+ }
+ return 0;
+}
+
+static int ci_dpm_resume(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (adev->pm.dpm_enabled) {
+ /* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
+ ci_dpm_setup_asic(adev);
+ ret = ci_dpm_enable(adev);
+ if (ret)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+ if (adev->pm.dpm_enabled)
+ amdgpu_pm_compute_clocks(adev);
+ }
+ return 0;
+}
+
+static bool ci_dpm_is_idle(struct amdgpu_device *adev)
+{
+ /* XXX */
+ return true;
+}
+
+static int ci_dpm_wait_for_idle(struct amdgpu_device *adev)
+{
+ /* XXX */
+ return 0;
+}
+
+static void ci_dpm_print_status(struct amdgpu_device *adev)
+{
+ dev_info(adev->dev, "CIK DPM registers\n");
+ dev_info(adev->dev, " BIOS_SCRATCH_4=0x%08X\n",
+ RREG32(mmBIOS_SCRATCH_4));
+ dev_info(adev->dev, " MC_ARB_DRAM_TIMING=0x%08X\n",
+ RREG32(mmMC_ARB_DRAM_TIMING));
+ dev_info(adev->dev, " MC_ARB_DRAM_TIMING2=0x%08X\n",
+ RREG32(mmMC_ARB_DRAM_TIMING2));
+ dev_info(adev->dev, " MC_ARB_BURST_TIME=0x%08X\n",
+ RREG32(mmMC_ARB_BURST_TIME));
+ dev_info(adev->dev, " MC_ARB_DRAM_TIMING_1=0x%08X\n",
+ RREG32(mmMC_ARB_DRAM_TIMING_1));
+ dev_info(adev->dev, " MC_ARB_DRAM_TIMING2_1=0x%08X\n",
+ RREG32(mmMC_ARB_DRAM_TIMING2_1));
+ dev_info(adev->dev, " MC_CG_CONFIG=0x%08X\n",
+ RREG32(mmMC_CG_CONFIG));
+ dev_info(adev->dev, " MC_ARB_CG=0x%08X\n",
+ RREG32(mmMC_ARB_CG));
+ dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n",
+ RREG32_DIDT(ixDIDT_SQ_CTRL0));
+ dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n",
+ RREG32_DIDT(ixDIDT_DB_CTRL0));
+ dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n",
+ RREG32_DIDT(ixDIDT_TD_CTRL0));
+ dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n",
+ RREG32_DIDT(ixDIDT_TCP_CTRL0));
+ dev_info(adev->dev, " CG_THERMAL_INT=0x%08X\n",
+ RREG32_SMC(ixCG_THERMAL_INT));
+ dev_info(adev->dev, " CG_THERMAL_CTRL=0x%08X\n",
+ RREG32_SMC(ixCG_THERMAL_CTRL));
+ dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n",
+ RREG32_SMC(ixGENERAL_PWRMGT));
+ dev_info(adev->dev, " MC_SEQ_CNTL_3=0x%08X\n",
+ RREG32(mmMC_SEQ_CNTL_3));
+ dev_info(adev->dev, " LCAC_MC0_CNTL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC0_CNTL));
+ dev_info(adev->dev, " LCAC_MC1_CNTL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC1_CNTL));
+ dev_info(adev->dev, " LCAC_CPL_CNTL=0x%08X\n",
+ RREG32_SMC(ixLCAC_CPL_CNTL));
+ dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n",
+ RREG32_SMC(ixSCLK_PWRMGT_CNTL));
+ dev_info(adev->dev, " BIF_LNCNT_RESET=0x%08X\n",
+ RREG32(mmBIF_LNCNT_RESET));
+ dev_info(adev->dev, " FIRMWARE_FLAGS=0x%08X\n",
+ RREG32_SMC(ixFIRMWARE_FLAGS));
+ dev_info(adev->dev, " CG_SPLL_FUNC_CNTL=0x%08X\n",
+ RREG32_SMC(ixCG_SPLL_FUNC_CNTL));
+ dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_2=0x%08X\n",
+ RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2));
+ dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_3=0x%08X\n",
+ RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3));
+ dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_4=0x%08X\n",
+ RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4));
+ dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM=0x%08X\n",
+ RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM));
+ dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n",
+ RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2));
+ dev_info(adev->dev, " DLL_CNTL=0x%08X\n",
+ RREG32(mmDLL_CNTL));
+ dev_info(adev->dev, " MCLK_PWRMGT_CNTL=0x%08X\n",
+ RREG32(mmMCLK_PWRMGT_CNTL));
+ dev_info(adev->dev, " MPLL_AD_FUNC_CNTL=0x%08X\n",
+ RREG32(mmMPLL_AD_FUNC_CNTL));
+ dev_info(adev->dev, " MPLL_DQ_FUNC_CNTL=0x%08X\n",
+ RREG32(mmMPLL_DQ_FUNC_CNTL));
+ dev_info(adev->dev, " MPLL_FUNC_CNTL=0x%08X\n",
+ RREG32(mmMPLL_FUNC_CNTL));
+ dev_info(adev->dev, " MPLL_FUNC_CNTL_1=0x%08X\n",
+ RREG32(mmMPLL_FUNC_CNTL_1));
+ dev_info(adev->dev, " MPLL_FUNC_CNTL_2=0x%08X\n",
+ RREG32(mmMPLL_FUNC_CNTL_2));
+ dev_info(adev->dev, " MPLL_SS1=0x%08X\n",
+ RREG32(mmMPLL_SS1));
+ dev_info(adev->dev, " MPLL_SS2=0x%08X\n",
+ RREG32(mmMPLL_SS2));
+ dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL=0x%08X\n",
+ RREG32_SMC(ixCG_DISPLAY_GAP_CNTL));
+ dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL2=0x%08X\n",
+ RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2));
+ dev_info(adev->dev, " CG_STATIC_SCREEN_PARAMETER=0x%08X\n",
+ RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER));
+ dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n",
+ RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
+ dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_1=0x%08X\n",
+ RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1));
+ dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_2=0x%08X\n",
+ RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2));
+ dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_3=0x%08X\n",
+ RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3));
+ dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_4=0x%08X\n",
+ RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4));
+ dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_5=0x%08X\n",
+ RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5));
+ dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_6=0x%08X\n",
+ RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6));
+ dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_7=0x%08X\n",
+ RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7));
+ dev_info(adev->dev, " RCU_UC_EVENTS=0x%08X\n",
+ RREG32_SMC(ixRCU_UC_EVENTS));
+ dev_info(adev->dev, " DPM_TABLE_475=0x%08X\n",
+ RREG32_SMC(ixDPM_TABLE_475));
+ dev_info(adev->dev, " MC_SEQ_RAS_TIMING_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_RAS_TIMING_LP));
+ dev_info(adev->dev, " MC_SEQ_RAS_TIMING=0x%08X\n",
+ RREG32(mmMC_SEQ_RAS_TIMING));
+ dev_info(adev->dev, " MC_SEQ_CAS_TIMING_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_CAS_TIMING_LP));
+ dev_info(adev->dev, " MC_SEQ_CAS_TIMING=0x%08X\n",
+ RREG32(mmMC_SEQ_CAS_TIMING));
+ dev_info(adev->dev, " MC_SEQ_DLL_STBY_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_DLL_STBY_LP));
+ dev_info(adev->dev, " MC_SEQ_DLL_STBY=0x%08X\n",
+ RREG32(mmMC_SEQ_DLL_STBY));
+ dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_G5PDX_CMD0_LP));
+ dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0=0x%08X\n",
+ RREG32(mmMC_SEQ_G5PDX_CMD0));
+ dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_G5PDX_CMD1_LP));
+ dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1=0x%08X\n",
+ RREG32(mmMC_SEQ_G5PDX_CMD1));
+ dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_G5PDX_CTRL_LP));
+ dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL=0x%08X\n",
+ RREG32(mmMC_SEQ_G5PDX_CTRL));
+ dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_DVS_CMD_LP));
+ dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_DVS_CMD));
+ dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_DVS_CTL_LP));
+ dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_DVS_CTL));
+ dev_info(adev->dev, " MC_SEQ_MISC_TIMING_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_MISC_TIMING_LP));
+ dev_info(adev->dev, " MC_SEQ_MISC_TIMING=0x%08X\n",
+ RREG32(mmMC_SEQ_MISC_TIMING));
+ dev_info(adev->dev, " MC_SEQ_MISC_TIMING2_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_MISC_TIMING2_LP));
+ dev_info(adev->dev, " MC_SEQ_MISC_TIMING2=0x%08X\n",
+ RREG32(mmMC_SEQ_MISC_TIMING2));
+ dev_info(adev->dev, " MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP));
+ dev_info(adev->dev, " MC_PMG_CMD_EMRS=0x%08X\n",
+ RREG32(mmMC_PMG_CMD_EMRS));
+ dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_CMD_MRS_LP));
+ dev_info(adev->dev, " MC_PMG_CMD_MRS=0x%08X\n",
+ RREG32(mmMC_PMG_CMD_MRS));
+ dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP));
+ dev_info(adev->dev, " MC_PMG_CMD_MRS1=0x%08X\n",
+ RREG32(mmMC_PMG_CMD_MRS1));
+ dev_info(adev->dev, " MC_SEQ_WR_CTL_D0_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_WR_CTL_D0_LP));
+ dev_info(adev->dev, " MC_SEQ_WR_CTL_D0=0x%08X\n",
+ RREG32(mmMC_SEQ_WR_CTL_D0));
+ dev_info(adev->dev, " MC_SEQ_WR_CTL_D1_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_WR_CTL_D1_LP));
+ dev_info(adev->dev, " MC_SEQ_WR_CTL_D1=0x%08X\n",
+ RREG32(mmMC_SEQ_WR_CTL_D1));
+ dev_info(adev->dev, " MC_SEQ_RD_CTL_D0_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_RD_CTL_D0_LP));
+ dev_info(adev->dev, " MC_SEQ_RD_CTL_D0=0x%08X\n",
+ RREG32(mmMC_SEQ_RD_CTL_D0));
+ dev_info(adev->dev, " MC_SEQ_RD_CTL_D1_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_RD_CTL_D1_LP));
+ dev_info(adev->dev, " MC_SEQ_RD_CTL_D1=0x%08X\n",
+ RREG32(mmMC_SEQ_RD_CTL_D1));
+ dev_info(adev->dev, " MC_SEQ_PMG_TIMING_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_TIMING_LP));
+ dev_info(adev->dev, " MC_SEQ_PMG_TIMING=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_TIMING));
+ dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP));
+ dev_info(adev->dev, " MC_PMG_CMD_MRS2=0x%08X\n",
+ RREG32(mmMC_PMG_CMD_MRS2));
+ dev_info(adev->dev, " MC_SEQ_WR_CTL_2_LP=0x%08X\n",
+ RREG32(mmMC_SEQ_WR_CTL_2_LP));
+ dev_info(adev->dev, " MC_SEQ_WR_CTL_2=0x%08X\n",
+ RREG32(mmMC_SEQ_WR_CTL_2));
+ dev_info(adev->dev, " PCIE_LC_SPEED_CNTL=0x%08X\n",
+ RREG32_PCIE(ixPCIE_LC_SPEED_CNTL));
+ dev_info(adev->dev, " PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n",
+ RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL));
+ dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n",
+ RREG32(mmSMC_IND_INDEX_0));
+ dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n",
+ RREG32(mmSMC_IND_DATA_0));
+ dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n",
+ RREG32(mmSMC_IND_ACCESS_CNTL));
+ dev_info(adev->dev, " SMC_RESP_0=0x%08X\n",
+ RREG32(mmSMC_RESP_0));
+ dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n",
+ RREG32(mmSMC_MESSAGE_0));
+ dev_info(adev->dev, " SMC_SYSCON_RESET_CNTL=0x%08X\n",
+ RREG32_SMC(ixSMC_SYSCON_RESET_CNTL));
+ dev_info(adev->dev, " SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n",
+ RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0));
+ dev_info(adev->dev, " SMC_SYSCON_MISC_CNTL=0x%08X\n",
+ RREG32_SMC(ixSMC_SYSCON_MISC_CNTL));
+ dev_info(adev->dev, " SMC_PC_C=0x%08X\n",
+ RREG32_SMC(ixSMC_PC_C));
+}
+
+static int ci_dpm_soft_reset(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cg_thermal_int;
+
+ switch (type) {
+ case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+ WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+ WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ bool queue_thermal = false;
+
+ if (entry == NULL)
+ return -EINVAL;
+
+ switch (entry->src_id) {
+ case 230: /* thermal low to high */
+ DRM_DEBUG("IH: thermal low to high\n");
+ adev->pm.dpm.thermal.high_to_low = false;
+ queue_thermal = true;
+ break;
+ case 231: /* thermal high to low */
+ DRM_DEBUG("IH: thermal high to low\n");
+ adev->pm.dpm.thermal.high_to_low = true;
+ queue_thermal = true;
+ break;
+ default:
+ break;
+ }
+
+ if (queue_thermal)
+ schedule_work(&adev->pm.dpm.thermal.work);
+
+ return 0;
+}
+
+static int ci_dpm_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ return 0;
+}
+
+static int ci_dpm_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ return 0;
+}
+
+const struct amdgpu_ip_funcs ci_dpm_ip_funcs = {
+ .early_init = ci_dpm_early_init,
+ .late_init = ci_dpm_late_init,
+ .sw_init = ci_dpm_sw_init,
+ .sw_fini = ci_dpm_sw_fini,
+ .hw_init = ci_dpm_hw_init,
+ .hw_fini = ci_dpm_hw_fini,
+ .suspend = ci_dpm_suspend,
+ .resume = ci_dpm_resume,
+ .is_idle = ci_dpm_is_idle,
+ .wait_for_idle = ci_dpm_wait_for_idle,
+ .soft_reset = ci_dpm_soft_reset,
+ .print_status = ci_dpm_print_status,
+ .set_clockgating_state = ci_dpm_set_clockgating_state,
+ .set_powergating_state = ci_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
+ .get_temperature = &ci_dpm_get_temp,
+ .pre_set_power_state = &ci_dpm_pre_set_power_state,
+ .set_power_state = &ci_dpm_set_power_state,
+ .post_set_power_state = &ci_dpm_post_set_power_state,
+ .display_configuration_changed = &ci_dpm_display_configuration_changed,
+ .get_sclk = &ci_dpm_get_sclk,
+ .get_mclk = &ci_dpm_get_mclk,
+ .print_power_state = &ci_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &ci_dpm_force_performance_level,
+ .vblank_too_short = &ci_dpm_vblank_too_short,
+ .powergate_uvd = &ci_dpm_powergate_uvd,
+ .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
+ .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
+ .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
+ .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
+};
+
+static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
+{
+ if (adev->pm.funcs == NULL)
+ adev->pm.funcs = &ci_dpm_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
+ .set = ci_dpm_set_interrupt_state,
+ .process = ci_dpm_process_interrupt,
+};
+
+static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
+ adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
new file mode 100644
index 000000000000..faccc30c93bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_DPM_H__
+#define __CI_DPM_H__
+
+#include "amdgpu_atombios.h"
+#include "ppsmc.h"
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#include "smu7_discrete.h"
+
+#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
+
+#define CISLANDS_UNUSED_GPIO_PIN 0x7F
+
+struct ci_pl {
+ u32 mclk;
+ u32 sclk;
+ enum amdgpu_pcie_gen pcie_gen;
+ u16 pcie_lane;
+};
+
+struct ci_ps {
+ u16 performance_level_count;
+ bool dc_compatible;
+ u32 sclk_t;
+ struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct ci_dpm_level {
+ bool enabled;
+ u32 value;
+ u32 param1;
+};
+
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+
+struct ci_single_dpm_table {
+ u32 count;
+ struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct ci_dpm_table {
+ struct ci_single_dpm_table sclk_table;
+ struct ci_single_dpm_table mclk_table;
+ struct ci_single_dpm_table pcie_speed_table;
+ struct ci_single_dpm_table vddc_table;
+ struct ci_single_dpm_table vddci_table;
+ struct ci_single_dpm_table mvdd_table;
+};
+
+struct ci_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_ulv_parm
+{
+ bool supported;
+ u32 cg_ulv_parameter;
+ u32 volt_change_delay;
+ struct ci_pl pl;
+};
+
+#define CISLANDS_MAX_LEAKAGE_COUNT 8
+
+struct ci_leakage_voltage {
+ u16 count;
+ u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
+ u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+struct ci_dpm_level_enable_mask {
+ u32 uvd_dpm_enable_mask;
+ u32 vce_dpm_enable_mask;
+ u32 acp_dpm_enable_mask;
+ u32 samu_dpm_enable_mask;
+ u32 sclk_dpm_enable_mask;
+ u32 mclk_dpm_enable_mask;
+ u32 pcie_dpm_enable_mask;
+};
+
+struct ci_vbios_boot_state
+{
+ u16 mvdd_bootup_value;
+ u16 vddc_bootup_value;
+ u16 vddci_bootup_value;
+ u32 sclk_bootup_value;
+ u32 mclk_bootup_value;
+ u16 pcie_gen_bootup_value;
+ u16 pcie_lane_bootup_value;
+};
+
+struct ci_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 dll_cntl;
+ u32 mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_func_cntl;
+ u32 mpll_func_cntl_1;
+ u32 mpll_func_cntl_2;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct ci_thermal_temperature_setting {
+ s32 temperature_low;
+ s32 temperature_high;
+ s32 temperature_shutdown;
+};
+
+struct ci_pcie_perf_range {
+ u16 max;
+ u16 min;
+};
+
+enum ci_pt_config_reg_type {
+ CISLANDS_CONFIGREG_MMR = 0,
+ CISLANDS_CONFIGREG_SMC_IND,
+ CISLANDS_CONFIGREG_DIDT_IND,
+ CISLANDS_CONFIGREG_CACHE,
+ CISLANDS_CONFIGREG_MAX
+};
+
+#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+
+struct ci_pt_config_reg {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum ci_pt_config_reg_type type;
+};
+
+struct ci_pt_defaults {
+ u8 svi_load_line_en;
+ u8 svi_load_line_vddc;
+ u8 tdc_vddc_throttle_release_limit_perc;
+ u8 tdc_mawt;
+ u8 tdc_waterfall_ctl;
+ u8 dte_ambient_temp_base;
+ u32 display_cac;
+ u32 bapm_temp_gradient;
+ u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+ u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
+#define DPMTABLE_UPDATE_SCLK 0x00000004
+#define DPMTABLE_UPDATE_MCLK 0x00000008
+
+struct ci_power_info {
+ struct ci_dpm_table dpm_table;
+ u32 voltage_control;
+ u32 mvdd_control;
+ u32 vddci_control;
+ u32 active_auto_throttle_sources;
+ struct ci_clock_registers clock_registers;
+ u16 acpi_vddc;
+ u16 acpi_vddci;
+ enum amdgpu_pcie_gen force_pcie_gen;
+ enum amdgpu_pcie_gen acpi_pcie_gen;
+ struct ci_leakage_voltage vddc_leakage;
+ struct ci_leakage_voltage vddci_leakage;
+ u16 max_vddc_in_pp_table;
+ u16 min_vddc_in_pp_table;
+ u16 max_vddci_in_pp_table;
+ u16 min_vddci_in_pp_table;
+ u32 mclk_strobe_mode_threshold;
+ u32 mclk_stutter_mode_threshold;
+ u32 mclk_edc_enable_threshold;
+ u32 mclk_edc_wr_enable_threshold;
+ struct ci_vbios_boot_state vbios_boot_state;
+ /* smc offsets */
+ u32 sram_end;
+ u32 dpm_table_start;
+ u32 soft_regs_start;
+ u32 mc_reg_table_start;
+ u32 fan_table_start;
+ u32 arb_table_start;
+ /* smc tables */
+ SMU7_Discrete_DpmTable smc_state_table;
+ SMU7_Discrete_MCRegisters smc_mc_reg_table;
+ SMU7_Discrete_PmFuses smc_powertune_table;
+ /* other stuff */
+ struct ci_mc_reg_table mc_reg_table;
+ struct atom_voltage_table vddc_voltage_table;
+ struct atom_voltage_table vddci_voltage_table;
+ struct atom_voltage_table mvdd_voltage_table;
+ struct ci_ulv_parm ulv;
+ u32 power_containment_features;
+ const struct ci_pt_defaults *powertune_defaults;
+ u32 dte_tj_offset;
+ bool vddc_phase_shed_control;
+ struct ci_thermal_temperature_setting thermal_temp_setting;
+ struct ci_dpm_level_enable_mask dpm_level_enable_mask;
+ u32 need_update_smu7_dpm_table;
+ u32 sclk_dpm_key_disabled;
+ u32 mclk_dpm_key_disabled;
+ u32 pcie_dpm_key_disabled;
+ u32 thermal_sclk_dpm_enabled;
+ struct ci_pcie_perf_range pcie_gen_performance;
+ struct ci_pcie_perf_range pcie_lane_performance;
+ struct ci_pcie_perf_range pcie_gen_powersaving;
+ struct ci_pcie_perf_range pcie_lane_powersaving;
+ u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+ u32 mclk_activity_target;
+ u32 low_sclk_interrupt_t;
+ u32 last_mclk_dpm_enable_mask;
+ u32 sys_pcie_mask;
+ /* caps */
+ bool caps_power_containment;
+ bool caps_cac;
+ bool caps_sq_ramping;
+ bool caps_db_ramping;
+ bool caps_td_ramping;
+ bool caps_tcp_ramping;
+ bool caps_fps;
+ bool caps_sclk_ds;
+ bool caps_sclk_ss_support;
+ bool caps_mclk_ss_support;
+ bool caps_uvd_dpm;
+ bool caps_vce_dpm;
+ bool caps_samu_dpm;
+ bool caps_acp_dpm;
+ bool caps_automatic_dc_transition;
+ bool caps_sclk_throttle_low_notification;
+ bool caps_dynamic_ac_timing;
+ bool caps_od_fuzzy_fan_control_support;
+ /* flags */
+ bool thermal_protection;
+ bool pcie_performance_request;
+ bool dynamic_ss;
+ bool dll_default_on;
+ bool cac_enabled;
+ bool uvd_enabled;
+ bool battery_state;
+ bool pspp_notify_required;
+ bool enable_bapm_feature;
+ bool enable_tdc_limit_feature;
+ bool enable_pkg_pwr_tracking_feature;
+ bool use_pcie_performance_levels;
+ bool use_pcie_powersaving_levels;
+ bool uvd_power_gated;
+ /* driver states */
+ struct amdgpu_ps current_rps;
+ struct ci_ps current_ps;
+ struct amdgpu_ps requested_rps;
+ struct ci_ps requested_ps;
+ /* fan control */
+ bool fan_ctrl_is_in_default_mode;
+ bool fan_is_controlled_by_smc;
+ u32 t_min;
+ u32 fan_ctrl_default_mode;
+};
+
+#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
+#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
+#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
+
+#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
+
+#define CISLANDS_VRC_DFLT0 0x3FFFC000
+#define CISLANDS_VRC_DFLT1 0x000400
+#define CISLANDS_VRC_DFLT2 0xC00080
+#define CISLANDS_VRC_DFLT3 0xC00200
+#define CISLANDS_VRC_DFLT4 0xC01680
+#define CISLANDS_VRC_DFLT5 0xC00033
+#define CISLANDS_VRC_DFLT6 0xC00033
+#define CISLANDS_VRC_DFLT7 0x3FFFC000
+
+#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
+#define CISLAND_TARGETACTIVITY_DFLT 30
+#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
+
+#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
+#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
+#define PCIE_PERF_REQ_PECI_GEN1 2
+#define PCIE_PERF_REQ_PECI_GEN2 3
+#define PCIE_PERF_REQ_PECI_GEN3 4
+
+#define CISLANDS_SSTU_DFLT 0
+#define CISLANDS_SST_DFLT 0x00C8
+
+/* XXX are these ok? */
+#define CISLANDS_TEMP_RANGE_MIN (90 * 1000)
+#define CISLANDS_TEMP_RANGE_MAX (120 * 1000)
+
+int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+void amdgpu_ci_start_smc(struct amdgpu_device *adev);
+void amdgpu_ci_reset_smc(struct amdgpu_device *adev);
+int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev);
+void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev);
+void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev);
+bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev);
+PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
+PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev);
+int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
+int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
+ u32 smc_address, u32 *value, u32 limit);
+int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
+ u32 smc_address, u32 value, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_smc.c b/drivers/gpu/drm/amd/amdgpu/ci_smc.c
new file mode 100644
index 000000000000..7eb9069db8e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ci_smc.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "cikd.h"
+#include "ppsmc.h"
+#include "amdgpu_ucode.h"
+#include "ci_dpm.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+static int ci_set_smc_sram_address(struct amdgpu_device *adev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(mmSMC_IND_INDEX_0, smc_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
+
+ return 0;
+}
+
+int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ unsigned long flags;
+ u32 data, original_data;
+ u32 addr;
+ u32 extra_shift;
+ int ret = 0;
+
+ if (smc_start_address & 3)
+ return -EINVAL;
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ ret = ci_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ goto done;
+
+ WREG32(mmSMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = ci_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ goto done;
+
+ original_data = RREG32(mmSMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = ci_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ goto done;
+
+ WREG32(mmSMC_IND_DATA_0, data);
+ }
+
+done:
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return ret;
+}
+
+void amdgpu_ci_start_smc(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+
+ tmp &= ~SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
+ WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void amdgpu_ci_reset_smc(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+
+ tmp |= SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
+ WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev)
+{
+ static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ return amdgpu_ci_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+
+ tmp |= SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
+
+ WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+
+ tmp &= ~SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
+
+ WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev)
+{
+ u32 clk = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ u32 pc_c = RREG32_SMC(ixSMC_PC_C);
+
+ if (!(clk & SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK) && (0x20100 <= pc_c))
+ return true;
+
+ return false;
+}
+
+PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
+{
+ u32 tmp;
+ int i;
+
+ if (!amdgpu_ci_is_smc_running(adev))
+ return PPSMC_Result_Failed;
+
+ WREG32(mmSMC_MESSAGE_0, msg);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(mmSMC_RESP_0);
+ if (tmp != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(mmSMC_RESP_0);
+
+ return (PPSMC_Result)tmp;
+}
+
+PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ int i;
+
+ if (!amdgpu_ci_is_smc_running(adev))
+ return PPSMC_Result_OK;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ if ((tmp & SMC_SYSCON_CLOCK_CNTL_0__cken_MASK) == 0)
+ break;
+ udelay(1);
+ }
+
+ return PPSMC_Result_OK;
+}
+
+int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
+{
+ const struct smc_firmware_header_v1_0 *hdr;
+ unsigned long flags;
+ u32 ucode_start_address;
+ u32 ucode_size;
+ const u8 *src;
+ u32 data;
+
+ if (!adev->pm.fw)
+ return -EINVAL;
+
+ hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ if (ucode_size & 3)
+ return -EINVAL;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK,
+ ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
+ while (ucode_size >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ WREG32(mmSMC_IND_DATA_0, data);
+
+ src += 4;
+ ucode_size -= 4;
+ }
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return 0;
+}
+
+int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
+ u32 smc_address, u32 *value, u32 limit)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ ret = ci_set_smc_sram_address(adev, smc_address, limit);
+ if (ret == 0)
+ *value = RREG32(mmSMC_IND_DATA_0);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return ret;
+}
+
+int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
+ u32 smc_address, u32 value, u32 limit)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ ret = ci_set_smc_sram_address(adev, smc_address, limit);
+ if (ret == 0)
+ WREG32(mmSMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
new file mode 100644
index 000000000000..74ce0be2fbb7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -0,0 +1,2505 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+#include "cikd.h"
+#include "atom.h"
+
+#include "cik.h"
+#include "gmc_v7_0.h"
+#include "cik_ih.h"
+#include "dce_v8_0.h"
+#include "gfx_v7_0.h"
+#include "cik_sdma.h"
+#include "uvd_v4_2.h"
+#include "vce_v2_0.h"
+#include "cik_dpm.h"
+
+#include "uvd/uvd_4_2_d.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+/*
+ * Indirect registers accessor
+ */
+static u32 cik_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(mmPCIE_INDEX, reg);
+ (void)RREG32(mmPCIE_INDEX);
+ r = RREG32(mmPCIE_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static void cik_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(mmPCIE_INDEX, reg);
+ (void)RREG32(mmPCIE_INDEX);
+ WREG32(mmPCIE_DATA, v);
+ (void)RREG32(mmPCIE_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static u32 cik_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ r = RREG32(mmSMC_IND_DATA_0);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ return r;
+}
+
+static void cik_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ WREG32(mmSMC_IND_DATA_0, (v));
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
+static u32 cik_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
+ r = RREG32(mmUVD_CTX_DATA);
+ spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+ return r;
+}
+
+static void cik_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+ WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
+ WREG32(mmUVD_CTX_DATA, (v));
+ spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+}
+
+static u32 cik_didt_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ WREG32(mmDIDT_IND_INDEX, (reg));
+ r = RREG32(mmDIDT_IND_DATA);
+ spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+ return r;
+}
+
+static void cik_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->didt_idx_lock, flags);
+ WREG32(mmDIDT_IND_INDEX, (reg));
+ WREG32(mmDIDT_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+}
+
+static const u32 bonaire_golden_spm_registers[] =
+{
+ 0xc200, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 bonaire_golden_common_registers[] =
+{
+ 0x31dc, 0xffffffff, 0x00000800,
+ 0x31dd, 0xffffffff, 0x00000800,
+ 0x31e6, 0xffffffff, 0x00007fbf,
+ 0x31e7, 0xffffffff, 0x00007faf
+};
+
+static const u32 bonaire_golden_registers[] =
+{
+ 0xcd5, 0x00000333, 0x00000333,
+ 0xcd4, 0x000c0fc0, 0x00040200,
+ 0x2684, 0x00010000, 0x00058208,
+ 0xf000, 0xffff1fff, 0x00140000,
+ 0xf080, 0xfdfc0fff, 0x00000100,
+ 0xf08d, 0x40000000, 0x40000200,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x31e, 0x00000080, 0x00000000,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0xf0311fff, 0x80300000,
+ 0x263e, 0x73773777, 0x12010001,
+ 0xd43, 0x00810000, 0x408af000,
+ 0x1c0c, 0x31000111, 0x00000011,
+ 0xbd2, 0x73773777, 0x12010001,
+ 0x883, 0x00007fb6, 0x0021a1b1,
+ 0x884, 0x00007fb6, 0x002021b1,
+ 0x860, 0x00007fb6, 0x00002191,
+ 0x886, 0x00007fb6, 0x002121b1,
+ 0x887, 0x00007fb6, 0x002021b1,
+ 0x877, 0x00007fb6, 0x00002191,
+ 0x878, 0x00007fb6, 0x00002191,
+ 0xd8a, 0x0000003f, 0x0000000a,
+ 0xd8b, 0x0000003f, 0x0000000a,
+ 0xab9, 0x00073ffe, 0x000022a2,
+ 0x903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000003f, 0x00000007,
+ 0x22fc, 0x00002001, 0x00000001,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0xc281, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x06000000,
+ 0x136, 0x00000fff, 0x00000100,
+ 0xf9e, 0x00000001, 0x00000002,
+ 0x2440, 0x03000000, 0x0362c688,
+ 0x2300, 0x000000ff, 0x00000001,
+ 0x390, 0x00001fff, 0x00001fff,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b05, 0x000003ff, 0x000000f3,
+ 0x2b03, 0xffffffff, 0x00001032
+};
+
+static const u32 bonaire_mgcg_cgcg_init[] =
+{
+ 0x3108, 0xffffffff, 0xfffffffc,
+ 0xc200, 0xffffffff, 0xe0000000,
+ 0xf0a8, 0xffffffff, 0x00000100,
+ 0xf082, 0xffffffff, 0x00000100,
+ 0xf0b0, 0xffffffff, 0xc0000100,
+ 0xf0b2, 0xffffffff, 0xc0000100,
+ 0xf0b1, 0xffffffff, 0xc0000100,
+ 0x1579, 0xffffffff, 0x00600100,
+ 0xf0a0, 0xffffffff, 0x00000100,
+ 0xf085, 0xffffffff, 0x06000100,
+ 0xf088, 0xffffffff, 0x00000100,
+ 0xf086, 0xffffffff, 0x06000100,
+ 0xf081, 0xffffffff, 0x00000100,
+ 0xf0b8, 0xffffffff, 0x00000100,
+ 0xf089, 0xffffffff, 0x00000100,
+ 0xf080, 0xffffffff, 0x00000100,
+ 0xf08c, 0xffffffff, 0x00000100,
+ 0xf08d, 0xffffffff, 0x00000100,
+ 0xf094, 0xffffffff, 0x00000100,
+ 0xf095, 0xffffffff, 0x00000100,
+ 0xf096, 0xffffffff, 0x00000100,
+ 0xf097, 0xffffffff, 0x00000100,
+ 0xf098, 0xffffffff, 0x00000100,
+ 0xf09f, 0xffffffff, 0x00000100,
+ 0xf09e, 0xffffffff, 0x00000100,
+ 0xf084, 0xffffffff, 0x06000100,
+ 0xf0a4, 0xffffffff, 0x00000100,
+ 0xf09d, 0xffffffff, 0x00000100,
+ 0xf0ad, 0xffffffff, 0x00000100,
+ 0xf0ac, 0xffffffff, 0x00000100,
+ 0xf09c, 0xffffffff, 0x00000100,
+ 0xc200, 0xffffffff, 0xe0000000,
+ 0xf008, 0xffffffff, 0x00010000,
+ 0xf009, 0xffffffff, 0x00030002,
+ 0xf00a, 0xffffffff, 0x00040007,
+ 0xf00b, 0xffffffff, 0x00060005,
+ 0xf00c, 0xffffffff, 0x00090008,
+ 0xf00d, 0xffffffff, 0x00010000,
+ 0xf00e, 0xffffffff, 0x00030002,
+ 0xf00f, 0xffffffff, 0x00040007,
+ 0xf010, 0xffffffff, 0x00060005,
+ 0xf011, 0xffffffff, 0x00090008,
+ 0xf012, 0xffffffff, 0x00010000,
+ 0xf013, 0xffffffff, 0x00030002,
+ 0xf014, 0xffffffff, 0x00040007,
+ 0xf015, 0xffffffff, 0x00060005,
+ 0xf016, 0xffffffff, 0x00090008,
+ 0xf017, 0xffffffff, 0x00010000,
+ 0xf018, 0xffffffff, 0x00030002,
+ 0xf019, 0xffffffff, 0x00040007,
+ 0xf01a, 0xffffffff, 0x00060005,
+ 0xf01b, 0xffffffff, 0x00090008,
+ 0xf01c, 0xffffffff, 0x00010000,
+ 0xf01d, 0xffffffff, 0x00030002,
+ 0xf01e, 0xffffffff, 0x00040007,
+ 0xf01f, 0xffffffff, 0x00060005,
+ 0xf020, 0xffffffff, 0x00090008,
+ 0xf021, 0xffffffff, 0x00010000,
+ 0xf022, 0xffffffff, 0x00030002,
+ 0xf023, 0xffffffff, 0x00040007,
+ 0xf024, 0xffffffff, 0x00060005,
+ 0xf025, 0xffffffff, 0x00090008,
+ 0xf026, 0xffffffff, 0x00010000,
+ 0xf027, 0xffffffff, 0x00030002,
+ 0xf028, 0xffffffff, 0x00040007,
+ 0xf029, 0xffffffff, 0x00060005,
+ 0xf02a, 0xffffffff, 0x00090008,
+ 0xf000, 0xffffffff, 0x96e00200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x3109, 0xffffffff, 0x0020003f,
+ 0xe, 0xffffffff, 0x0140001c,
+ 0xf, 0x000f0000, 0x000f0000,
+ 0x88, 0xffffffff, 0xc060000c,
+ 0x89, 0xc0000fff, 0x00000100,
+ 0x3e4, 0xffffffff, 0x00000100,
+ 0x3e6, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3403, 0xff000ff0, 0x00000100,
+ 0x3603, 0xff000ff0, 0x00000100
+};
+
+static const u32 spectre_golden_spm_registers[] =
+{
+ 0xc200, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 spectre_golden_common_registers[] =
+{
+ 0x31dc, 0xffffffff, 0x00000800,
+ 0x31dd, 0xffffffff, 0x00000800,
+ 0x31e6, 0xffffffff, 0x00007fbf,
+ 0x31e7, 0xffffffff, 0x00007faf
+};
+
+static const u32 spectre_golden_registers[] =
+{
+ 0xf000, 0xffff1fff, 0x96940200,
+ 0xf003, 0xffff0001, 0xff000000,
+ 0xf080, 0xfffc0fff, 0x00000100,
+ 0x1bb6, 0x00010101, 0x00010000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0xfffffffc, 0x00020200,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0xf0311fff, 0x80300000,
+ 0x263e, 0x73773777, 0x12010001,
+ 0x26df, 0x00ff0000, 0x00fc0000,
+ 0xbd2, 0x73773777, 0x12010001,
+ 0x2285, 0xf000003f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0xa0d4, 0x3f3f3fff, 0x00000082,
+ 0xa0d5, 0x0000003f, 0x00000000,
+ 0xf9e, 0x00000001, 0x00000002,
+ 0x244f, 0xffff03df, 0x00000004,
+ 0x31da, 0x00000008, 0x00000008,
+ 0x2300, 0x000008ff, 0x00000800,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b03, 0xffffffff, 0x54763210,
+ 0x853e, 0x01ff01ff, 0x00000002,
+ 0x8526, 0x007ff800, 0x00200000,
+ 0x8057, 0xffffffff, 0x00000f40,
+ 0xc24d, 0xffffffff, 0x00000001
+};
+
+static const u32 spectre_mgcg_cgcg_init[] =
+{
+ 0x3108, 0xffffffff, 0xfffffffc,
+ 0xc200, 0xffffffff, 0xe0000000,
+ 0xf0a8, 0xffffffff, 0x00000100,
+ 0xf082, 0xffffffff, 0x00000100,
+ 0xf0b0, 0xffffffff, 0x00000100,
+ 0xf0b2, 0xffffffff, 0x00000100,
+ 0xf0b1, 0xffffffff, 0x00000100,
+ 0x1579, 0xffffffff, 0x00600100,
+ 0xf0a0, 0xffffffff, 0x00000100,
+ 0xf085, 0xffffffff, 0x06000100,
+ 0xf088, 0xffffffff, 0x00000100,
+ 0xf086, 0xffffffff, 0x06000100,
+ 0xf081, 0xffffffff, 0x00000100,
+ 0xf0b8, 0xffffffff, 0x00000100,
+ 0xf089, 0xffffffff, 0x00000100,
+ 0xf080, 0xffffffff, 0x00000100,
+ 0xf08c, 0xffffffff, 0x00000100,
+ 0xf08d, 0xffffffff, 0x00000100,
+ 0xf094, 0xffffffff, 0x00000100,
+ 0xf095, 0xffffffff, 0x00000100,
+ 0xf096, 0xffffffff, 0x00000100,
+ 0xf097, 0xffffffff, 0x00000100,
+ 0xf098, 0xffffffff, 0x00000100,
+ 0xf09f, 0xffffffff, 0x00000100,
+ 0xf09e, 0xffffffff, 0x00000100,
+ 0xf084, 0xffffffff, 0x06000100,
+ 0xf0a4, 0xffffffff, 0x00000100,
+ 0xf09d, 0xffffffff, 0x00000100,
+ 0xf0ad, 0xffffffff, 0x00000100,
+ 0xf0ac, 0xffffffff, 0x00000100,
+ 0xf09c, 0xffffffff, 0x00000100,
+ 0xc200, 0xffffffff, 0xe0000000,
+ 0xf008, 0xffffffff, 0x00010000,
+ 0xf009, 0xffffffff, 0x00030002,
+ 0xf00a, 0xffffffff, 0x00040007,
+ 0xf00b, 0xffffffff, 0x00060005,
+ 0xf00c, 0xffffffff, 0x00090008,
+ 0xf00d, 0xffffffff, 0x00010000,
+ 0xf00e, 0xffffffff, 0x00030002,
+ 0xf00f, 0xffffffff, 0x00040007,
+ 0xf010, 0xffffffff, 0x00060005,
+ 0xf011, 0xffffffff, 0x00090008,
+ 0xf012, 0xffffffff, 0x00010000,
+ 0xf013, 0xffffffff, 0x00030002,
+ 0xf014, 0xffffffff, 0x00040007,
+ 0xf015, 0xffffffff, 0x00060005,
+ 0xf016, 0xffffffff, 0x00090008,
+ 0xf017, 0xffffffff, 0x00010000,
+ 0xf018, 0xffffffff, 0x00030002,
+ 0xf019, 0xffffffff, 0x00040007,
+ 0xf01a, 0xffffffff, 0x00060005,
+ 0xf01b, 0xffffffff, 0x00090008,
+ 0xf01c, 0xffffffff, 0x00010000,
+ 0xf01d, 0xffffffff, 0x00030002,
+ 0xf01e, 0xffffffff, 0x00040007,
+ 0xf01f, 0xffffffff, 0x00060005,
+ 0xf020, 0xffffffff, 0x00090008,
+ 0xf021, 0xffffffff, 0x00010000,
+ 0xf022, 0xffffffff, 0x00030002,
+ 0xf023, 0xffffffff, 0x00040007,
+ 0xf024, 0xffffffff, 0x00060005,
+ 0xf025, 0xffffffff, 0x00090008,
+ 0xf026, 0xffffffff, 0x00010000,
+ 0xf027, 0xffffffff, 0x00030002,
+ 0xf028, 0xffffffff, 0x00040007,
+ 0xf029, 0xffffffff, 0x00060005,
+ 0xf02a, 0xffffffff, 0x00090008,
+ 0xf02b, 0xffffffff, 0x00010000,
+ 0xf02c, 0xffffffff, 0x00030002,
+ 0xf02d, 0xffffffff, 0x00040007,
+ 0xf02e, 0xffffffff, 0x00060005,
+ 0xf02f, 0xffffffff, 0x00090008,
+ 0xf000, 0xffffffff, 0x96e00200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x3109, 0xffffffff, 0x0020003f,
+ 0xe, 0xffffffff, 0x0140001c,
+ 0xf, 0x000f0000, 0x000f0000,
+ 0x88, 0xffffffff, 0xc060000c,
+ 0x89, 0xc0000fff, 0x00000100,
+ 0x3e4, 0xffffffff, 0x00000100,
+ 0x3e6, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3403, 0xff000ff0, 0x00000100,
+ 0x3603, 0xff000ff0, 0x00000100
+};
+
+static const u32 kalindi_golden_spm_registers[] =
+{
+ 0xc200, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 kalindi_golden_common_registers[] =
+{
+ 0x31dc, 0xffffffff, 0x00000800,
+ 0x31dd, 0xffffffff, 0x00000800,
+ 0x31e6, 0xffffffff, 0x00007fbf,
+ 0x31e7, 0xffffffff, 0x00007faf
+};
+
+static const u32 kalindi_golden_registers[] =
+{
+ 0xf000, 0xffffdfff, 0x6e944040,
+ 0x1579, 0xff607fff, 0xfc000100,
+ 0xf088, 0xff000fff, 0x00000100,
+ 0xf089, 0xff000fff, 0x00000100,
+ 0xf080, 0xfffc0fff, 0x00000100,
+ 0x1bb6, 0x00010101, 0x00010000,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0xf0311fff, 0x80300000,
+ 0x263e, 0x73773777, 0x12010001,
+ 0x263f, 0xffffffff, 0x00000010,
+ 0x26df, 0x00ff0000, 0x00fc0000,
+ 0x200c, 0x00001f0f, 0x0000100a,
+ 0xbd2, 0x73773777, 0x12010001,
+ 0x902, 0x000fffff, 0x000c007f,
+ 0x2285, 0xf000003f, 0x00000007,
+ 0x22c9, 0x3fff3fff, 0x00ffcfff,
+ 0xc281, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x06000000,
+ 0x136, 0x00000fff, 0x00000100,
+ 0xf9e, 0x00000001, 0x00000002,
+ 0x31da, 0x00000008, 0x00000008,
+ 0x2300, 0x000000ff, 0x00000003,
+ 0x853e, 0x01ff01ff, 0x00000002,
+ 0x8526, 0x007ff800, 0x00200000,
+ 0x8057, 0xffffffff, 0x00000f40,
+ 0x2231, 0x001f3ae3, 0x00000082,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0xc24d, 0xffffffff, 0x00000000
+};
+
+static const u32 kalindi_mgcg_cgcg_init[] =
+{
+ 0x3108, 0xffffffff, 0xfffffffc,
+ 0xc200, 0xffffffff, 0xe0000000,
+ 0xf0a8, 0xffffffff, 0x00000100,
+ 0xf082, 0xffffffff, 0x00000100,
+ 0xf0b0, 0xffffffff, 0x00000100,
+ 0xf0b2, 0xffffffff, 0x00000100,
+ 0xf0b1, 0xffffffff, 0x00000100,
+ 0x1579, 0xffffffff, 0x00600100,
+ 0xf0a0, 0xffffffff, 0x00000100,
+ 0xf085, 0xffffffff, 0x06000100,
+ 0xf088, 0xffffffff, 0x00000100,
+ 0xf086, 0xffffffff, 0x06000100,
+ 0xf081, 0xffffffff, 0x00000100,
+ 0xf0b8, 0xffffffff, 0x00000100,
+ 0xf089, 0xffffffff, 0x00000100,
+ 0xf080, 0xffffffff, 0x00000100,
+ 0xf08c, 0xffffffff, 0x00000100,
+ 0xf08d, 0xffffffff, 0x00000100,
+ 0xf094, 0xffffffff, 0x00000100,
+ 0xf095, 0xffffffff, 0x00000100,
+ 0xf096, 0xffffffff, 0x00000100,
+ 0xf097, 0xffffffff, 0x00000100,
+ 0xf098, 0xffffffff, 0x00000100,
+ 0xf09f, 0xffffffff, 0x00000100,
+ 0xf09e, 0xffffffff, 0x00000100,
+ 0xf084, 0xffffffff, 0x06000100,
+ 0xf0a4, 0xffffffff, 0x00000100,
+ 0xf09d, 0xffffffff, 0x00000100,
+ 0xf0ad, 0xffffffff, 0x00000100,
+ 0xf0ac, 0xffffffff, 0x00000100,
+ 0xf09c, 0xffffffff, 0x00000100,
+ 0xc200, 0xffffffff, 0xe0000000,
+ 0xf008, 0xffffffff, 0x00010000,
+ 0xf009, 0xffffffff, 0x00030002,
+ 0xf00a, 0xffffffff, 0x00040007,
+ 0xf00b, 0xffffffff, 0x00060005,
+ 0xf00c, 0xffffffff, 0x00090008,
+ 0xf00d, 0xffffffff, 0x00010000,
+ 0xf00e, 0xffffffff, 0x00030002,
+ 0xf00f, 0xffffffff, 0x00040007,
+ 0xf010, 0xffffffff, 0x00060005,
+ 0xf011, 0xffffffff, 0x00090008,
+ 0xf000, 0xffffffff, 0x96e00200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x3109, 0xffffffff, 0x0020003f,
+ 0xe, 0xffffffff, 0x0140001c,
+ 0xf, 0x000f0000, 0x000f0000,
+ 0x88, 0xffffffff, 0xc060000c,
+ 0x89, 0xc0000fff, 0x00000100,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3403, 0xff000ff0, 0x00000100,
+ 0x3603, 0xff000ff0, 0x00000100
+};
+
+static const u32 hawaii_golden_spm_registers[] =
+{
+ 0xc200, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 hawaii_golden_common_registers[] =
+{
+ 0xc200, 0xffffffff, 0xe0000000,
+ 0xa0d4, 0xffffffff, 0x3a00161a,
+ 0xa0d5, 0xffffffff, 0x0000002e,
+ 0x2684, 0xffffffff, 0x00018208,
+ 0x263e, 0xffffffff, 0x12011003
+};
+
+static const u32 hawaii_golden_registers[] =
+{
+ 0xcd5, 0x00000333, 0x00000333,
+ 0x2684, 0x00010000, 0x00058208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x31e, 0x00000080, 0x00000000,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0xf0311fff, 0x80300000,
+ 0xd43, 0x00810000, 0x408af000,
+ 0x1c0c, 0x31000111, 0x00000011,
+ 0xbd2, 0x73773777, 0x12010001,
+ 0x848, 0x0000007f, 0x0000001b,
+ 0x877, 0x00007fb6, 0x00002191,
+ 0xd8a, 0x0000003f, 0x0000000a,
+ 0xd8b, 0x0000003f, 0x0000000a,
+ 0xab9, 0x00073ffe, 0x000022a2,
+ 0x903, 0x000007ff, 0x00000000,
+ 0x22fc, 0x00002001, 0x00000001,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0xc281, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x06000000,
+ 0xf9e, 0x00000001, 0x00000002,
+ 0x31da, 0x00000008, 0x00000008,
+ 0x31dc, 0x00000f00, 0x00000800,
+ 0x31dd, 0x00000f00, 0x00000800,
+ 0x31e6, 0x00ffffff, 0x00ff7fbf,
+ 0x31e7, 0x00ffffff, 0x00ff7faf,
+ 0x2300, 0x000000ff, 0x00000800,
+ 0x390, 0x00001fff, 0x00001fff,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b80, 0x00100000, 0x000ff07c,
+ 0x2b05, 0x000003ff, 0x0000000f,
+ 0x2b04, 0xffffffff, 0x7564fdec,
+ 0x2b03, 0xffffffff, 0x3120b9a8,
+ 0x2b02, 0x20000000, 0x0f9c0000
+};
+
+static const u32 hawaii_mgcg_cgcg_init[] =
+{
+ 0x3108, 0xffffffff, 0xfffffffd,
+ 0xc200, 0xffffffff, 0xe0000000,
+ 0xf0a8, 0xffffffff, 0x00000100,
+ 0xf082, 0xffffffff, 0x00000100,
+ 0xf0b0, 0xffffffff, 0x00000100,
+ 0xf0b2, 0xffffffff, 0x00000100,
+ 0xf0b1, 0xffffffff, 0x00000100,
+ 0x1579, 0xffffffff, 0x00200100,
+ 0xf0a0, 0xffffffff, 0x00000100,
+ 0xf085, 0xffffffff, 0x06000100,
+ 0xf088, 0xffffffff, 0x00000100,
+ 0xf086, 0xffffffff, 0x06000100,
+ 0xf081, 0xffffffff, 0x00000100,
+ 0xf0b8, 0xffffffff, 0x00000100,
+ 0xf089, 0xffffffff, 0x00000100,
+ 0xf080, 0xffffffff, 0x00000100,
+ 0xf08c, 0xffffffff, 0x00000100,
+ 0xf08d, 0xffffffff, 0x00000100,
+ 0xf094, 0xffffffff, 0x00000100,
+ 0xf095, 0xffffffff, 0x00000100,
+ 0xf096, 0xffffffff, 0x00000100,
+ 0xf097, 0xffffffff, 0x00000100,
+ 0xf098, 0xffffffff, 0x00000100,
+ 0xf09f, 0xffffffff, 0x00000100,
+ 0xf09e, 0xffffffff, 0x00000100,
+ 0xf084, 0xffffffff, 0x06000100,
+ 0xf0a4, 0xffffffff, 0x00000100,
+ 0xf09d, 0xffffffff, 0x00000100,
+ 0xf0ad, 0xffffffff, 0x00000100,
+ 0xf0ac, 0xffffffff, 0x00000100,
+ 0xf09c, 0xffffffff, 0x00000100,
+ 0xc200, 0xffffffff, 0xe0000000,
+ 0xf008, 0xffffffff, 0x00010000,
+ 0xf009, 0xffffffff, 0x00030002,
+ 0xf00a, 0xffffffff, 0x00040007,
+ 0xf00b, 0xffffffff, 0x00060005,
+ 0xf00c, 0xffffffff, 0x00090008,
+ 0xf00d, 0xffffffff, 0x00010000,
+ 0xf00e, 0xffffffff, 0x00030002,
+ 0xf00f, 0xffffffff, 0x00040007,
+ 0xf010, 0xffffffff, 0x00060005,
+ 0xf011, 0xffffffff, 0x00090008,
+ 0xf012, 0xffffffff, 0x00010000,
+ 0xf013, 0xffffffff, 0x00030002,
+ 0xf014, 0xffffffff, 0x00040007,
+ 0xf015, 0xffffffff, 0x00060005,
+ 0xf016, 0xffffffff, 0x00090008,
+ 0xf017, 0xffffffff, 0x00010000,
+ 0xf018, 0xffffffff, 0x00030002,
+ 0xf019, 0xffffffff, 0x00040007,
+ 0xf01a, 0xffffffff, 0x00060005,
+ 0xf01b, 0xffffffff, 0x00090008,
+ 0xf01c, 0xffffffff, 0x00010000,
+ 0xf01d, 0xffffffff, 0x00030002,
+ 0xf01e, 0xffffffff, 0x00040007,
+ 0xf01f, 0xffffffff, 0x00060005,
+ 0xf020, 0xffffffff, 0x00090008,
+ 0xf021, 0xffffffff, 0x00010000,
+ 0xf022, 0xffffffff, 0x00030002,
+ 0xf023, 0xffffffff, 0x00040007,
+ 0xf024, 0xffffffff, 0x00060005,
+ 0xf025, 0xffffffff, 0x00090008,
+ 0xf026, 0xffffffff, 0x00010000,
+ 0xf027, 0xffffffff, 0x00030002,
+ 0xf028, 0xffffffff, 0x00040007,
+ 0xf029, 0xffffffff, 0x00060005,
+ 0xf02a, 0xffffffff, 0x00090008,
+ 0xf02b, 0xffffffff, 0x00010000,
+ 0xf02c, 0xffffffff, 0x00030002,
+ 0xf02d, 0xffffffff, 0x00040007,
+ 0xf02e, 0xffffffff, 0x00060005,
+ 0xf02f, 0xffffffff, 0x00090008,
+ 0xf030, 0xffffffff, 0x00010000,
+ 0xf031, 0xffffffff, 0x00030002,
+ 0xf032, 0xffffffff, 0x00040007,
+ 0xf033, 0xffffffff, 0x00060005,
+ 0xf034, 0xffffffff, 0x00090008,
+ 0xf035, 0xffffffff, 0x00010000,
+ 0xf036, 0xffffffff, 0x00030002,
+ 0xf037, 0xffffffff, 0x00040007,
+ 0xf038, 0xffffffff, 0x00060005,
+ 0xf039, 0xffffffff, 0x00090008,
+ 0xf03a, 0xffffffff, 0x00010000,
+ 0xf03b, 0xffffffff, 0x00030002,
+ 0xf03c, 0xffffffff, 0x00040007,
+ 0xf03d, 0xffffffff, 0x00060005,
+ 0xf03e, 0xffffffff, 0x00090008,
+ 0x30c6, 0xffffffff, 0x00020200,
+ 0xcd4, 0xffffffff, 0x00000200,
+ 0x570, 0xffffffff, 0x00000400,
+ 0x157a, 0xffffffff, 0x00000000,
+ 0xbd4, 0xffffffff, 0x00000902,
+ 0xf000, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x3109, 0xffffffff, 0x0020003f,
+ 0xe, 0xffffffff, 0x0140001c,
+ 0xf, 0x000f0000, 0x000f0000,
+ 0x88, 0xffffffff, 0xc060000c,
+ 0x89, 0xc0000fff, 0x00000100,
+ 0x3e4, 0xffffffff, 0x00000100,
+ 0x3e6, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3403, 0xff000ff0, 0x00000100,
+ 0x3603, 0xff000ff0, 0x00000100
+};
+
+static const u32 godavari_golden_registers[] =
+{
+ 0x1579, 0xff607fff, 0xfc000100,
+ 0x1bb6, 0x00010101, 0x00010000,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260c0, 0xf00fffff, 0x00000400,
+ 0x184c, 0xffffffff, 0x00010000,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0xf0311fff, 0x80300000,
+ 0x263e, 0x73773777, 0x12010001,
+ 0x263f, 0xffffffff, 0x00000010,
+ 0x200c, 0x00001f0f, 0x0000100a,
+ 0xbd2, 0x73773777, 0x12010001,
+ 0x902, 0x000fffff, 0x000c007f,
+ 0x2285, 0xf000003f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ff0fff,
+ 0xc281, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x06000000,
+ 0x136, 0x00000fff, 0x00000100,
+ 0x3405, 0x00010000, 0x00810001,
+ 0x3605, 0x00010000, 0x00810001,
+ 0xf9e, 0x00000001, 0x00000002,
+ 0x31da, 0x00000008, 0x00000008,
+ 0x31dc, 0x00000f00, 0x00000800,
+ 0x31dd, 0x00000f00, 0x00000800,
+ 0x31e6, 0x00ffffff, 0x00ff7fbf,
+ 0x31e7, 0x00ffffff, 0x00ff7faf,
+ 0x2300, 0x000000ff, 0x00000001,
+ 0x853e, 0x01ff01ff, 0x00000002,
+ 0x8526, 0x007ff800, 0x00200000,
+ 0x8057, 0xffffffff, 0x00000f40,
+ 0x2231, 0x001f3ae3, 0x00000082,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0xc24d, 0xffffffff, 0x00000000
+};
+
+static void cik_init_golden_registers(struct amdgpu_device *adev)
+{
+ /* Some of the registers might be dependent on GRBM_GFX_INDEX */
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ amdgpu_program_register_sequence(adev,
+ bonaire_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ bonaire_golden_registers,
+ (const u32)ARRAY_SIZE(bonaire_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ bonaire_golden_common_registers,
+ (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
+ amdgpu_program_register_sequence(adev,
+ bonaire_golden_spm_registers,
+ (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
+ break;
+ case CHIP_KABINI:
+ amdgpu_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ kalindi_golden_registers,
+ (const u32)ARRAY_SIZE(kalindi_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+ break;
+ case CHIP_MULLINS:
+ amdgpu_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ godavari_golden_registers,
+ (const u32)ARRAY_SIZE(godavari_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+ break;
+ case CHIP_KAVERI:
+ amdgpu_program_register_sequence(adev,
+ spectre_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ spectre_golden_registers,
+ (const u32)ARRAY_SIZE(spectre_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ spectre_golden_common_registers,
+ (const u32)ARRAY_SIZE(spectre_golden_common_registers));
+ amdgpu_program_register_sequence(adev,
+ spectre_golden_spm_registers,
+ (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
+ break;
+ case CHIP_HAWAII:
+ amdgpu_program_register_sequence(adev,
+ hawaii_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ hawaii_golden_registers,
+ (const u32)ARRAY_SIZE(hawaii_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ hawaii_golden_common_registers,
+ (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
+ amdgpu_program_register_sequence(adev,
+ hawaii_golden_spm_registers,
+ (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+/**
+ * cik_get_xclk - get the xclk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (CIK).
+ */
+static u32 cik_get_xclk(struct amdgpu_device *adev)
+{
+ u32 reference_clock = adev->clock.spll.reference_freq;
+
+ if (adev->flags & AMDGPU_IS_APU) {
+ if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
+ return reference_clock / 2;
+ } else {
+ if (RREG32_SMC(ixCG_CLKPIN_CNTL) & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
+ return reference_clock / 4;
+ }
+ return reference_clock;
+}
+
+/**
+ * cik_srbm_select - select specific register instances
+ *
+ * @adev: amdgpu_device pointer
+ * @me: selected ME (micro engine)
+ * @pipe: pipe
+ * @queue: queue
+ * @vmid: VMID
+ *
+ * Switches the currently active registers instances. Some
+ * registers are instanced per VMID, others are instanced per
+ * me/pipe/queue combination.
+ */
+void cik_srbm_select(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 queue, u32 vmid)
+{
+ u32 srbm_gfx_cntl =
+ (((pipe << SRBM_GFX_CNTL__PIPEID__SHIFT) & SRBM_GFX_CNTL__PIPEID_MASK)|
+ ((me << SRBM_GFX_CNTL__MEID__SHIFT) & SRBM_GFX_CNTL__MEID_MASK)|
+ ((vmid << SRBM_GFX_CNTL__VMID__SHIFT) & SRBM_GFX_CNTL__VMID_MASK)|
+ ((queue << SRBM_GFX_CNTL__QUEUEID__SHIFT) & SRBM_GFX_CNTL__QUEUEID_MASK));
+ WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
+}
+
+static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
+{
+ uint32_t tmp;
+
+ tmp = RREG32(mmCONFIG_CNTL);
+ if (state == false)
+ tmp |= CONFIG_CNTL__VGA_DIS_MASK;
+ else
+ tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
+ WREG32(mmCONFIG_CNTL, tmp);
+}
+
+static bool cik_read_disabled_bios(struct amdgpu_device *adev)
+{
+ u32 bus_cntl;
+ u32 d1vga_control = 0;
+ u32 d2vga_control = 0;
+ u32 vga_render_control = 0;
+ u32 rom_cntl;
+ bool r;
+
+ bus_cntl = RREG32(mmBUS_CNTL);
+ if (adev->mode_info.num_crtc) {
+ d1vga_control = RREG32(mmD1VGA_CONTROL);
+ d2vga_control = RREG32(mmD2VGA_CONTROL);
+ vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
+ }
+ rom_cntl = RREG32_SMC(ixROM_CNTL);
+
+ /* enable the rom */
+ WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
+ if (adev->mode_info.num_crtc) {
+ /* Disable VGA mode */
+ WREG32(mmD1VGA_CONTROL,
+ (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmD2VGA_CONTROL,
+ (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmVGA_RENDER_CONTROL,
+ (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
+ }
+ WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
+
+ r = amdgpu_read_bios(adev);
+
+ /* restore regs */
+ WREG32(mmBUS_CNTL, bus_cntl);
+ if (adev->mode_info.num_crtc) {
+ WREG32(mmD1VGA_CONTROL, d1vga_control);
+ WREG32(mmD2VGA_CONTROL, d2vga_control);
+ WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
+ }
+ WREG32_SMC(ixROM_CNTL, rom_cntl);
+ return r;
+}
+
+static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
+ {mmGRBM_STATUS, false},
+ {mmGB_ADDR_CONFIG, false},
+ {mmMC_ARB_RAMCFG, false},
+ {mmGB_TILE_MODE0, false},
+ {mmGB_TILE_MODE1, false},
+ {mmGB_TILE_MODE2, false},
+ {mmGB_TILE_MODE3, false},
+ {mmGB_TILE_MODE4, false},
+ {mmGB_TILE_MODE5, false},
+ {mmGB_TILE_MODE6, false},
+ {mmGB_TILE_MODE7, false},
+ {mmGB_TILE_MODE8, false},
+ {mmGB_TILE_MODE9, false},
+ {mmGB_TILE_MODE10, false},
+ {mmGB_TILE_MODE11, false},
+ {mmGB_TILE_MODE12, false},
+ {mmGB_TILE_MODE13, false},
+ {mmGB_TILE_MODE14, false},
+ {mmGB_TILE_MODE15, false},
+ {mmGB_TILE_MODE16, false},
+ {mmGB_TILE_MODE17, false},
+ {mmGB_TILE_MODE18, false},
+ {mmGB_TILE_MODE19, false},
+ {mmGB_TILE_MODE20, false},
+ {mmGB_TILE_MODE21, false},
+ {mmGB_TILE_MODE22, false},
+ {mmGB_TILE_MODE23, false},
+ {mmGB_TILE_MODE24, false},
+ {mmGB_TILE_MODE25, false},
+ {mmGB_TILE_MODE26, false},
+ {mmGB_TILE_MODE27, false},
+ {mmGB_TILE_MODE28, false},
+ {mmGB_TILE_MODE29, false},
+ {mmGB_TILE_MODE30, false},
+ {mmGB_TILE_MODE31, false},
+ {mmGB_MACROTILE_MODE0, false},
+ {mmGB_MACROTILE_MODE1, false},
+ {mmGB_MACROTILE_MODE2, false},
+ {mmGB_MACROTILE_MODE3, false},
+ {mmGB_MACROTILE_MODE4, false},
+ {mmGB_MACROTILE_MODE5, false},
+ {mmGB_MACROTILE_MODE6, false},
+ {mmGB_MACROTILE_MODE7, false},
+ {mmGB_MACROTILE_MODE8, false},
+ {mmGB_MACROTILE_MODE9, false},
+ {mmGB_MACROTILE_MODE10, false},
+ {mmGB_MACROTILE_MODE11, false},
+ {mmGB_MACROTILE_MODE12, false},
+ {mmGB_MACROTILE_MODE13, false},
+ {mmGB_MACROTILE_MODE14, false},
+ {mmGB_MACROTILE_MODE15, false},
+ {mmCC_RB_BACKEND_DISABLE, false, true},
+ {mmGC_USER_RB_BACKEND_DISABLE, false, true},
+ {mmGB_BACKEND_MAP, false, false},
+ {mmPA_SC_RASTER_CONFIG, false, true},
+ {mmPA_SC_RASTER_CONFIG_1, false, true},
+};
+
+static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num,
+ u32 reg_offset)
+{
+ uint32_t val;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ gfx_v7_0_select_se_sh(adev, se_num, sh_num);
+
+ val = RREG32(reg_offset);
+
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+}
+
+static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 reg_offset, u32 *value)
+{
+ uint32_t i;
+
+ *value = 0;
+ for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+ if (reg_offset != cik_allowed_read_registers[i].reg_offset)
+ continue;
+
+ if (!cik_allowed_read_registers[i].untouched)
+ *value = cik_allowed_read_registers[i].grbm_indexed ?
+ cik_read_indexed_register(adev, se_num,
+ sh_num, reg_offset) :
+ RREG32(reg_offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void cik_print_gpu_status_regs(struct amdgpu_device *adev)
+{
+ dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(mmGRBM_STATUS));
+ dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
+ RREG32(mmGRBM_STATUS2));
+ dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(mmGRBM_STATUS_SE0));
+ dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(mmGRBM_STATUS_SE1));
+ dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
+ RREG32(mmGRBM_STATUS_SE2));
+ dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
+ RREG32(mmGRBM_STATUS_SE3));
+ dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(mmSRBM_STATUS));
+ dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
+ RREG32(mmSRBM_STATUS2));
+ dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
+ RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
+ dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
+ RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
+ dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
+ dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
+ RREG32(mmCP_STALLED_STAT1));
+ dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
+ RREG32(mmCP_STALLED_STAT2));
+ dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
+ RREG32(mmCP_STALLED_STAT3));
+ dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
+ RREG32(mmCP_CPF_BUSY_STAT));
+ dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
+ RREG32(mmCP_CPF_STALLED_STAT1));
+ dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
+ dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
+ dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
+ RREG32(mmCP_CPC_STALLED_STAT1));
+ dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
+}
+
+/**
+ * cik_gpu_check_soft_reset - check which blocks are busy
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Check which blocks are busy and return the relevant reset
+ * mask to be used by cik_gpu_soft_reset().
+ * Returns a mask of the blocks to be reset.
+ */
+u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev)
+{
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(mmGRBM_STATUS);
+ if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
+ GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
+ GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
+ GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
+ GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
+ GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
+ reset_mask |= AMDGPU_RESET_GFX;
+
+ if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
+ reset_mask |= AMDGPU_RESET_CP;
+
+ /* GRBM_STATUS2 */
+ tmp = RREG32(mmGRBM_STATUS2);
+ if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
+ reset_mask |= AMDGPU_RESET_RLC;
+
+ /* SDMA0_STATUS_REG */
+ tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
+ if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
+ reset_mask |= AMDGPU_RESET_DMA;
+
+ /* SDMA1_STATUS_REG */
+ tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
+ if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
+ reset_mask |= AMDGPU_RESET_DMA1;
+
+ /* SRBM_STATUS2 */
+ tmp = RREG32(mmSRBM_STATUS2);
+ if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
+ reset_mask |= AMDGPU_RESET_DMA;
+
+ if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
+ reset_mask |= AMDGPU_RESET_DMA1;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(mmSRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+ reset_mask |= AMDGPU_RESET_IH;
+
+ if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
+ reset_mask |= AMDGPU_RESET_SEM;
+
+ if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
+ reset_mask |= AMDGPU_RESET_GRBM;
+
+ if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+ reset_mask |= AMDGPU_RESET_VMC;
+
+ if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
+ reset_mask |= AMDGPU_RESET_MC;
+
+ if (amdgpu_display_is_display_hung(adev))
+ reset_mask |= AMDGPU_RESET_DISPLAY;
+
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & AMDGPU_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~AMDGPU_RESET_MC;
+ }
+
+ return reset_mask;
+}
+
+/**
+ * cik_gpu_soft_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ * @reset_mask: mask of which blocks to reset
+ *
+ * Soft reset the blocks specified in @reset_mask.
+ */
+static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
+{
+ struct amdgpu_mode_mc_save save;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (reset_mask == 0)
+ return;
+
+ dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ cik_print_gpu_status_regs(adev);
+ dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+ /* disable CG/PG */
+
+ /* stop the rlc */
+ gfx_v7_0_rlc_stop(adev);
+
+ /* Disable GFX parsing/prefetching */
+ WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
+
+ /* Disable MEC parsing/prefetching */
+ WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
+
+ if (reset_mask & AMDGPU_RESET_DMA) {
+ /* sdma0 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ }
+ if (reset_mask & AMDGPU_RESET_DMA1) {
+ /* sdma1 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ }
+
+ gmc_v7_0_mc_stop(adev, &save);
+ if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP))
+ grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
+ GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
+
+ if (reset_mask & AMDGPU_RESET_CP) {
+ grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
+
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
+ }
+
+ if (reset_mask & AMDGPU_RESET_DMA)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+
+ if (reset_mask & AMDGPU_RESET_DMA1)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+
+ if (reset_mask & AMDGPU_RESET_DISPLAY)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+ if (reset_mask & AMDGPU_RESET_RLC)
+ grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
+
+ if (reset_mask & AMDGPU_RESET_SEM)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK;
+
+ if (reset_mask & AMDGPU_RESET_IH)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+ if (reset_mask & AMDGPU_RESET_GRBM)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
+
+ if (reset_mask & AMDGPU_RESET_VMC)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
+
+ if (!(adev->flags & AMDGPU_IS_APU)) {
+ if (reset_mask & AMDGPU_RESET_MC)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ }
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ gmc_v7_0_mc_resume(adev, &save);
+ udelay(50);
+
+ cik_print_gpu_status_regs(adev);
+}
+
+struct kv_reset_save_regs {
+ u32 gmcon_reng_execute;
+ u32 gmcon_misc;
+ u32 gmcon_misc3;
+};
+
+static void kv_save_regs_for_reset(struct amdgpu_device *adev,
+ struct kv_reset_save_regs *save)
+{
+ save->gmcon_reng_execute = RREG32(mmGMCON_RENG_EXECUTE);
+ save->gmcon_misc = RREG32(mmGMCON_MISC);
+ save->gmcon_misc3 = RREG32(mmGMCON_MISC3);
+
+ WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute &
+ ~GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK);
+ WREG32(mmGMCON_MISC, save->gmcon_misc &
+ ~(GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK |
+ GMCON_MISC__STCTRL_STUTTER_EN_MASK));
+}
+
+static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
+ struct kv_reset_save_regs *save)
+{
+ int i;
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0x200010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0x300010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0x210000);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0xa00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0x21003);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0xb00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0x2b00);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0xc00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0xd00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0x420000);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0x100010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0x120202);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0x500010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0x3e3e36);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0x600010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0x373f3e);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0x700010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+ WREG32(mmGMCON_PGFSM_WRITE, 0x3e1332);
+ WREG32(mmGMCON_PGFSM_CONFIG, 0xe00010ff);
+
+ WREG32(mmGMCON_MISC3, save->gmcon_misc3);
+ WREG32(mmGMCON_MISC, save->gmcon_misc);
+ WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
+}
+
+static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_mc_save save;
+ struct kv_reset_save_regs kv_save = { 0 };
+ u32 tmp, i;
+
+ dev_info(adev->dev, "GPU pci config reset\n");
+
+ /* disable dpm? */
+
+ /* disable cg/pg */
+
+ /* Disable GFX parsing/prefetching */
+ WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
+ CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
+
+ /* Disable MEC parsing/prefetching */
+ WREG32(mmCP_MEC_CNTL,
+ CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
+
+ /* sdma0 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ /* sdma1 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ /* XXX other engines? */
+
+ /* halt the rlc, disable cp internal ints */
+ gfx_v7_0_rlc_stop(adev);
+
+ udelay(50);
+
+ /* disable mem access */
+ gmc_v7_0_mc_stop(adev, &save);
+ if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ dev_warn(adev->dev, "Wait for MC idle timed out !\n");
+ }
+
+ if (adev->flags & AMDGPU_IS_APU)
+ kv_save_regs_for_reset(adev, &kv_save);
+
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+ /* reset */
+ amdgpu_pci_config_reset(adev);
+
+ udelay(100);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
+
+ /* does asic init need to be run first??? */
+ if (adev->flags & AMDGPU_IS_APU)
+ kv_restore_regs_for_reset(adev, &kv_save);
+}
+
+static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
+{
+ u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
+/**
+ * cik_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int cik_asic_reset(struct amdgpu_device *adev)
+{
+ u32 reset_mask;
+
+ reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
+
+ if (reset_mask)
+ cik_set_bios_scratch_engine_hung(adev, true);
+
+ /* try soft reset */
+ cik_gpu_soft_reset(adev, reset_mask);
+
+ reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
+
+ /* try pci config reset */
+ if (reset_mask && amdgpu_hard_reset)
+ cik_gpu_pci_config_reset(adev);
+
+ reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
+
+ if (!reset_mask)
+ cik_set_bios_scratch_engine_hung(adev, false);
+
+ return 0;
+}
+
+static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
+ u32 cntl_reg, u32 status_reg)
+{
+ int r, i;
+ struct atom_clock_dividers dividers;
+ uint32_t tmp;
+
+ r = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ clock, false, &dividers);
+ if (r)
+ return r;
+
+ tmp = RREG32_SMC(cntl_reg);
+ tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
+ CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
+ tmp |= dividers.post_divider;
+ WREG32_SMC(cntl_reg, tmp);
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int cik_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+{
+ int r = 0;
+
+ r = cik_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
+ if (r)
+ return r;
+
+ r = cik_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
+ return r;
+}
+
+static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
+{
+ int r, i;
+ struct atom_clock_dividers dividers;
+ u32 tmp;
+
+ r = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ ecclk, false, &dividers);
+ if (r)
+ return r;
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ tmp = RREG32_SMC(ixCG_ECLK_CNTL);
+ tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
+ CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
+ tmp |= dividers.post_divider;
+ WREG32_SMC(ixCG_ECLK_CNTL, tmp);
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
+{
+ struct pci_dev *root = adev->pdev->bus->self;
+ int bridge_pos, gpu_pos;
+ u32 speed_cntl, mask, current_data_rate;
+ int ret, i;
+ u16 tmp16;
+
+ if (amdgpu_pcie_gen2 == 0)
+ return;
+
+ if (adev->flags & AMDGPU_IS_APU)
+ return;
+
+ ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+ if (ret != 0)
+ return;
+
+ if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ return;
+
+ speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ if (mask & DRM_PCIE_SPEED_80) {
+ if (current_data_rate == 2) {
+ DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ } else if (mask & DRM_PCIE_SPEED_50) {
+ if (current_data_rate == 1) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ }
+
+ bridge_pos = pci_pcie_cap(root);
+ if (!bridge_pos)
+ return;
+
+ gpu_pos = pci_pcie_cap(adev->pdev);
+ if (!gpu_pos)
+ return;
+
+ if (mask & DRM_PCIE_SPEED_80) {
+ /* re-try equalization if gen3 is not already enabled */
+ if (current_data_rate != 2) {
+ u16 bridge_cfg, gpu_cfg;
+ u16 bridge_cfg2, gpu_cfg2;
+ u32 max_lw, current_lw, tmp;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
+ PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
+ current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK)
+ >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
+
+ if (current_lw < max_lw) {
+ tmp = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
+ tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK |
+ PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
+ tmp |= (max_lw <<
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
+ tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK |
+ PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK |
+ PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
+ WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
+ }
+ }
+
+ for (i = 0; i < 10; i++) {
+ /* check status */
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+ break;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+ tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
+
+ tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
+ WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
+
+ mdelay(100);
+
+ /* linkctl */
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ /* linkctl2 */
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
+ tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
+ }
+ }
+ }
+
+ /* set the link speed */
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK |
+ PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
+ speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
+ WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~0xf;
+ if (mask & DRM_PCIE_SPEED_80)
+ tmp16 |= 3; /* gen3 */
+ else if (mask & DRM_PCIE_SPEED_50)
+ tmp16 |= 2; /* gen2 */
+ else
+ tmp16 |= 1; /* gen1 */
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
+ WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void cik_program_aspm(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+ bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+ bool disable_clkreq = false;
+
+ if (amdgpu_aspm == 0)
+ return;
+
+ /* XXX double check APUs */
+ if (adev->flags & AMDGPU_IS_APU)
+ return;
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+ data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+ data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) |
+ PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL3, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+ data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_P_CNTL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK |
+ PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (!disable_l0s)
+ data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
+
+ if (!disable_l1) {
+ data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+
+ if (!disable_plloff_in_l1) {
+ bool clk_req_support;
+
+ orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_0);
+ data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
+ PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
+ (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
+ if (orig != data)
+ WREG32_PCIE(ixPB0_PIF_PWRDOWN_0, data);
+
+ orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_1);
+ data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
+ PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
+ (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
+ if (orig != data)
+ WREG32_PCIE(ixPB0_PIF_PWRDOWN_1, data);
+
+ orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_0);
+ data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
+ PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
+ (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
+ if (orig != data)
+ WREG32_PCIE(ixPB1_PIF_PWRDOWN_0, data);
+
+ orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_1);
+ data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
+ PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
+ (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
+ if (orig != data)
+ WREG32_PCIE(ixPB1_PIF_PWRDOWN_1, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+ data |= ~(3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
+
+ if (!disable_clkreq) {
+ struct pci_dev *root = adev->pdev->bus->self;
+ u32 lnkcap;
+
+ clk_req_support = false;
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+ clk_req_support = true;
+ } else {
+ clk_req_support = false;
+ }
+
+ if (clk_req_support) {
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL2, data);
+
+ orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
+ data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK |
+ THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+ data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
+ (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixTHM_CLK_CNTL, data);
+
+ orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
+ data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
+ MISC_CLK_CTRL__ZCLK_SEL_MASK);
+ data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
+ (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixMISC_CLK_CTRL, data);
+
+ orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
+ data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
+ if (orig != data)
+ WREG32_SMC(ixCG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
+ data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
+ if (orig != data)
+ WREG32_SMC(ixCG_CLKPIN_CNTL_2, data);
+
+ orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+ data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
+ }
+ }
+ } else {
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+ }
+
+ orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+ data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+ PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+ PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_CNTL2, data);
+
+ if (!disable_l0s) {
+ data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+ if ((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) ==
+ PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
+ data = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) &&
+ (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+ }
+ }
+ }
+}
+
+static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
+{
+ return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+ >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
+}
+
+static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ci_dpm_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
+static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ci_dpm_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
+static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
+static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
+static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMDGPU_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
+int cik_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->ip_blocks = bonaire_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
+ break;
+ case CHIP_HAWAII:
+ adev->ip_blocks = hawaii_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
+ break;
+ case CHIP_KAVERI:
+ adev->ip_blocks = kaveri_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
+ break;
+ case CHIP_KABINI:
+ adev->ip_blocks = kabini_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
+ break;
+ case CHIP_MULLINS:
+ adev->ip_blocks = mullins_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
+ if (adev->ip_block_enabled == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct amdgpu_asic_funcs cik_asic_funcs =
+{
+ .read_disabled_bios = &cik_read_disabled_bios,
+ .read_register = &cik_read_register,
+ .reset = &cik_asic_reset,
+ .set_vga_state = &cik_vga_set_state,
+ .get_xclk = &cik_get_xclk,
+ .set_uvd_clocks = &cik_set_uvd_clocks,
+ .set_vce_clocks = &cik_set_vce_clocks,
+ .get_cu_info = &gfx_v7_0_get_cu_info,
+ /* these should be moved to their own ip modules */
+ .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
+ .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
+};
+
+static int cik_common_early_init(struct amdgpu_device *adev)
+{
+ adev->smc_rreg = &cik_smc_rreg;
+ adev->smc_wreg = &cik_smc_wreg;
+ adev->pcie_rreg = &cik_pcie_rreg;
+ adev->pcie_wreg = &cik_pcie_wreg;
+ adev->uvd_ctx_rreg = &cik_uvd_ctx_rreg;
+ adev->uvd_ctx_wreg = &cik_uvd_ctx_wreg;
+ adev->didt_rreg = &cik_didt_rreg;
+ adev->didt_wreg = &cik_didt_wreg;
+
+ adev->asic_funcs = &cik_asic_funcs;
+
+ adev->has_uvd = true;
+
+ adev->rev_id = cik_get_rev_id(adev);
+ adev->external_rev_id = 0xFF;
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->cg_flags =
+ AMDGPU_CG_SUPPORT_GFX_MGCG |
+ AMDGPU_CG_SUPPORT_GFX_MGLS |
+ /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
+ AMDGPU_CG_SUPPORT_GFX_CGLS |
+ AMDGPU_CG_SUPPORT_GFX_CGTS |
+ AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
+ AMDGPU_CG_SUPPORT_GFX_CP_LS |
+ AMDGPU_CG_SUPPORT_MC_LS |
+ AMDGPU_CG_SUPPORT_MC_MGCG |
+ AMDGPU_CG_SUPPORT_SDMA_MGCG |
+ AMDGPU_CG_SUPPORT_SDMA_LS |
+ AMDGPU_CG_SUPPORT_BIF_LS |
+ AMDGPU_CG_SUPPORT_VCE_MGCG |
+ AMDGPU_CG_SUPPORT_UVD_MGCG |
+ AMDGPU_CG_SUPPORT_HDP_LS |
+ AMDGPU_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x14;
+ break;
+ case CHIP_HAWAII:
+ adev->cg_flags =
+ AMDGPU_CG_SUPPORT_GFX_MGCG |
+ AMDGPU_CG_SUPPORT_GFX_MGLS |
+ /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
+ AMDGPU_CG_SUPPORT_GFX_CGLS |
+ AMDGPU_CG_SUPPORT_GFX_CGTS |
+ AMDGPU_CG_SUPPORT_GFX_CP_LS |
+ AMDGPU_CG_SUPPORT_MC_LS |
+ AMDGPU_CG_SUPPORT_MC_MGCG |
+ AMDGPU_CG_SUPPORT_SDMA_MGCG |
+ AMDGPU_CG_SUPPORT_SDMA_LS |
+ AMDGPU_CG_SUPPORT_BIF_LS |
+ AMDGPU_CG_SUPPORT_VCE_MGCG |
+ AMDGPU_CG_SUPPORT_UVD_MGCG |
+ AMDGPU_CG_SUPPORT_HDP_LS |
+ AMDGPU_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ adev->external_rev_id = 0x28;
+ break;
+ case CHIP_KAVERI:
+ adev->cg_flags =
+ AMDGPU_CG_SUPPORT_GFX_MGCG |
+ AMDGPU_CG_SUPPORT_GFX_MGLS |
+ /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
+ AMDGPU_CG_SUPPORT_GFX_CGLS |
+ AMDGPU_CG_SUPPORT_GFX_CGTS |
+ AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
+ AMDGPU_CG_SUPPORT_GFX_CP_LS |
+ AMDGPU_CG_SUPPORT_SDMA_MGCG |
+ AMDGPU_CG_SUPPORT_SDMA_LS |
+ AMDGPU_CG_SUPPORT_BIF_LS |
+ AMDGPU_CG_SUPPORT_VCE_MGCG |
+ AMDGPU_CG_SUPPORT_UVD_MGCG |
+ AMDGPU_CG_SUPPORT_HDP_LS |
+ AMDGPU_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags =
+ /*AMDGPU_PG_SUPPORT_GFX_PG |
+ AMDGPU_PG_SUPPORT_GFX_SMG |
+ AMDGPU_PG_SUPPORT_GFX_DMG |*/
+ AMDGPU_PG_SUPPORT_UVD |
+ /*AMDGPU_PG_SUPPORT_VCE |
+ AMDGPU_PG_SUPPORT_CP |
+ AMDGPU_PG_SUPPORT_GDS |
+ AMDGPU_PG_SUPPORT_RLC_SMU_HS |
+ AMDGPU_PG_SUPPORT_ACP |
+ AMDGPU_PG_SUPPORT_SAMU |*/
+ 0;
+ if (adev->pdev->device == 0x1312 ||
+ adev->pdev->device == 0x1316 ||
+ adev->pdev->device == 0x1317)
+ adev->external_rev_id = 0x41;
+ else
+ adev->external_rev_id = 0x1;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->cg_flags =
+ AMDGPU_CG_SUPPORT_GFX_MGCG |
+ AMDGPU_CG_SUPPORT_GFX_MGLS |
+ /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
+ AMDGPU_CG_SUPPORT_GFX_CGLS |
+ AMDGPU_CG_SUPPORT_GFX_CGTS |
+ AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
+ AMDGPU_CG_SUPPORT_GFX_CP_LS |
+ AMDGPU_CG_SUPPORT_SDMA_MGCG |
+ AMDGPU_CG_SUPPORT_SDMA_LS |
+ AMDGPU_CG_SUPPORT_BIF_LS |
+ AMDGPU_CG_SUPPORT_VCE_MGCG |
+ AMDGPU_CG_SUPPORT_UVD_MGCG |
+ AMDGPU_CG_SUPPORT_HDP_LS |
+ AMDGPU_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags =
+ /*AMDGPU_PG_SUPPORT_GFX_PG |
+ AMDGPU_PG_SUPPORT_GFX_SMG | */
+ AMDGPU_PG_SUPPORT_UVD |
+ /*AMDGPU_PG_SUPPORT_VCE |
+ AMDGPU_PG_SUPPORT_CP |
+ AMDGPU_PG_SUPPORT_GDS |
+ AMDGPU_PG_SUPPORT_RLC_SMU_HS |
+ AMDGPU_PG_SUPPORT_SAMU |*/
+ 0;
+ if (adev->asic_type == CHIP_KABINI) {
+ if (adev->rev_id == 0)
+ adev->external_rev_id = 0x81;
+ else if (adev->rev_id == 1)
+ adev->external_rev_id = 0x82;
+ else if (adev->rev_id == 2)
+ adev->external_rev_id = 0x85;
+ } else
+ adev->external_rev_id = adev->rev_id + 0xa1;
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cik_common_sw_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static int cik_common_sw_fini(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static int cik_common_hw_init(struct amdgpu_device *adev)
+{
+ /* move the golden regs per IP block */
+ cik_init_golden_registers(adev);
+ /* enable pcie gen2/3 link */
+ cik_pcie_gen3_enable(adev);
+ /* enable aspm */
+ cik_program_aspm(adev);
+
+ return 0;
+}
+
+static int cik_common_hw_fini(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static int cik_common_suspend(struct amdgpu_device *adev)
+{
+ return cik_common_hw_fini(adev);
+}
+
+static int cik_common_resume(struct amdgpu_device *adev)
+{
+ return cik_common_hw_init(adev);
+}
+
+static bool cik_common_is_idle(struct amdgpu_device *adev)
+{
+ return true;
+}
+
+static int cik_common_wait_for_idle(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static void cik_common_print_status(struct amdgpu_device *adev)
+{
+
+}
+
+static int cik_common_soft_reset(struct amdgpu_device *adev)
+{
+ /* XXX hard reset?? */
+ return 0;
+}
+
+static int cik_common_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ return 0;
+}
+
+static int cik_common_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ return 0;
+}
+
+const struct amdgpu_ip_funcs cik_common_ip_funcs = {
+ .early_init = cik_common_early_init,
+ .late_init = NULL,
+ .sw_init = cik_common_sw_init,
+ .sw_fini = cik_common_sw_fini,
+ .hw_init = cik_common_hw_init,
+ .hw_fini = cik_common_hw_fini,
+ .suspend = cik_common_suspend,
+ .resume = cik_common_resume,
+ .is_idle = cik_common_is_idle,
+ .wait_for_idle = cik_common_wait_for_idle,
+ .soft_reset = cik_common_soft_reset,
+ .print_status = cik_common_print_status,
+ .set_clockgating_state = cik_common_set_clockgating_state,
+ .set_powergating_state = cik_common_set_powergating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
new file mode 100644
index 000000000000..967d630a4dcb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CIK_H__
+#define __CIK_H__
+
+extern const struct amdgpu_ip_funcs cik_common_ip_funcs;
+
+void cik_srbm_select(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 queue, u32 vmid);
+int cik_set_ip_blocks(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
new file mode 100644
index 000000000000..35d8efdcde7e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CIK_DPM_H__
+#define __CIK_DPM_H__
+
+extern const struct amdgpu_ip_funcs ci_dpm_ip_funcs;
+extern const struct amdgpu_ip_funcs kv_dpm_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
new file mode 100644
index 000000000000..81e8bbaba3e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "cikd.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+/*
+ * Interrupts
+ * Starting with r6xx, interrupts are handled via a ring buffer.
+ * Ring buffers are areas of GPU accessible memory that the GPU
+ * writes interrupt vectors into and the host reads vectors out of.
+ * There is a rptr (read pointer) that determines where the
+ * host is currently reading, and a wptr (write pointer)
+ * which determines where the GPU has written. When the
+ * pointers are equal, the ring is idle. When the GPU
+ * writes vectors to the ring buffer, it increments the
+ * wptr. When there is an interrupt, the host then starts
+ * fetching commands and processing them until the pointers are
+ * equal again at which point it updates the rptr.
+ */
+
+static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * cik_ih_enable_interrupts - Enable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enable the interrupt ring buffer (CIK).
+ */
+static void cik_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+ u32 ih_cntl = RREG32(mmIH_CNTL);
+ u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+
+ ih_cntl |= IH_CNTL__ENABLE_INTR_MASK;
+ ih_rb_cntl |= IH_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(mmIH_CNTL, ih_cntl);
+ WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+ adev->irq.ih.enabled = true;
+}
+
+/**
+ * cik_ih_disable_interrupts - Disable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable the interrupt ring buffer (CIK).
+ */
+static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+ u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+ u32 ih_cntl = RREG32(mmIH_CNTL);
+
+ ih_rb_cntl &= ~IH_RB_CNTL__RB_ENABLE_MASK;
+ ih_cntl &= ~IH_CNTL__ENABLE_INTR_MASK;
+ WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+ WREG32(mmIH_CNTL, ih_cntl);
+ /* set rptr, wptr to 0 */
+ WREG32(mmIH_RB_RPTR, 0);
+ WREG32(mmIH_RB_WPTR, 0);
+ adev->irq.ih.enabled = false;
+ adev->irq.ih.rptr = 0;
+}
+
+/**
+ * cik_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (CIK).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int cik_ih_irq_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+ int rb_bufsz;
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+ u64 wptr_off;
+
+ /* disable irqs */
+ cik_ih_disable_interrupts(adev);
+
+ /* setup interrupt control */
+ WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+ interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
+ /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
+ * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
+ */
+ interrupt_cntl &= ~INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK;
+ /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
+ interrupt_cntl &= ~INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK;
+ WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
+
+ WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+ rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+
+ ih_rb_cntl = (IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK |
+ IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK |
+ (rb_bufsz << 1));
+
+ ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
+
+ /* set the writeback address whether it's enabled or not */
+ wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+ WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+ WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+
+ WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+
+ /* set rptr, wptr to 0 */
+ WREG32(mmIH_RB_RPTR, 0);
+ WREG32(mmIH_RB_WPTR, 0);
+
+ /* Default settings for IH_CNTL (disabled at first) */
+ ih_cntl = (0x10 << IH_CNTL__MC_WRREQ_CREDIT__SHIFT) |
+ (0x10 << IH_CNTL__MC_WR_CLEAN_CNT__SHIFT) |
+ (0 << IH_CNTL__MC_VMID__SHIFT);
+ /* IH_CNTL__RPTR_REARM_MASK only works if msi's are enabled */
+ if (adev->irq.msi_enabled)
+ ih_cntl |= IH_CNTL__RPTR_REARM_MASK;
+ WREG32(mmIH_CNTL, ih_cntl);
+
+ pci_set_master(adev->pdev);
+
+ /* enable irqs */
+ cik_ih_enable_interrupts(adev);
+
+ return ret;
+}
+
+/**
+ * cik_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (CIK).
+ */
+static void cik_ih_irq_disable(struct amdgpu_device *adev)
+{
+ cik_ih_disable_interrupts(adev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+}
+
+/**
+ * cik_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (CIK). Also check for
+ * ring buffer overflow and deal with it.
+ * Used by cik_irq_process().
+ * Returns the value of the wptr.
+ */
+static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
+{
+ u32 wptr, tmp;
+
+ wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+ if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
+ wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+ adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(mmIH_RB_CNTL, tmp);
+ }
+ return (wptr & adev->irq.ih.ptr_mask);
+}
+
+/* CIK IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0] - interrupt source id
+ * [31:8] - reserved
+ * [59:32] - interrupt source data
+ * [63:60] - reserved
+ * [71:64] - RINGID
+ * CP:
+ * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
+ * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
+ * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
+ * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
+ * PIPE_ID - ME0 0=3D
+ * - ME1&2 compute dispatcher (4 pipes each)
+ * SDMA:
+ * INSTANCE_ID [1:0], QUEUE_ID[1:0]
+ * INSTANCE_ID - 0 = sdma0, 1 = sdma1
+ * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
+ * [79:72] - VMID
+ * [95:80] - PASID
+ * [127:96] - reserved
+ */
+
+ /**
+ * cik_ih_decode_iv - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position.
+ */
+static void cik_ih_decode_iv(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ /* wptr/rptr are in bytes! */
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ uint32_t dw[4];
+
+ dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+ entry->vm_id = (dw[2] >> 8) & 0xff;
+ entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+ /* wptr/rptr are in bytes! */
+ adev->irq.ih.rptr += 16;
+}
+
+/**
+ * cik_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void cik_ih_set_rptr(struct amdgpu_device *adev)
+{
+ WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int cik_ih_early_init(struct amdgpu_device *adev)
+{
+ cik_ih_set_interrupt_funcs(adev);
+
+ return 0;
+}
+
+static int cik_ih_sw_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_init(adev);
+
+ return r;
+}
+
+static int cik_ih_sw_fini(struct amdgpu_device *adev)
+{
+ amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev);
+
+ return 0;
+}
+
+static int cik_ih_hw_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = cik_ih_irq_init(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int cik_ih_hw_fini(struct amdgpu_device *adev)
+{
+ cik_ih_irq_disable(adev);
+
+ return 0;
+}
+
+static int cik_ih_suspend(struct amdgpu_device *adev)
+{
+ return cik_ih_hw_fini(adev);
+}
+
+static int cik_ih_resume(struct amdgpu_device *adev)
+{
+ return cik_ih_hw_init(adev);
+}
+
+static bool cik_ih_is_idle(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(mmSRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+ return false;
+
+ return true;
+}
+
+static int cik_ih_wait_for_idle(struct amdgpu_device *adev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(mmSRBM_STATUS) & SRBM_STATUS__IH_BUSY_MASK;
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static void cik_ih_print_status(struct amdgpu_device *adev)
+{
+ dev_info(adev->dev, "CIK IH registers\n");
+ dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(mmSRBM_STATUS));
+ dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
+ RREG32(mmSRBM_STATUS2));
+ dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
+ RREG32(mmINTERRUPT_CNTL));
+ dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
+ RREG32(mmINTERRUPT_CNTL2));
+ dev_info(adev->dev, " IH_CNTL=0x%08X\n",
+ RREG32(mmIH_CNTL));
+ dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
+ RREG32(mmIH_RB_CNTL));
+ dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
+ RREG32(mmIH_RB_BASE));
+ dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
+ RREG32(mmIH_RB_WPTR_ADDR_LO));
+ dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
+ RREG32(mmIH_RB_WPTR_ADDR_HI));
+ dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
+ RREG32(mmIH_RB_RPTR));
+ dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
+ RREG32(mmIH_RB_WPTR));
+}
+
+static int cik_ih_soft_reset(struct amdgpu_device *adev)
+{
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(mmSRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+ if (srbm_soft_reset) {
+ cik_ih_print_status(adev);
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ cik_ih_print_status(adev);
+ }
+
+ return 0;
+}
+
+static int cik_ih_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ return 0;
+}
+
+static int cik_ih_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ return 0;
+}
+
+const struct amdgpu_ip_funcs cik_ih_ip_funcs = {
+ .early_init = cik_ih_early_init,
+ .late_init = NULL,
+ .sw_init = cik_ih_sw_init,
+ .sw_fini = cik_ih_sw_fini,
+ .hw_init = cik_ih_hw_init,
+ .hw_fini = cik_ih_hw_fini,
+ .suspend = cik_ih_suspend,
+ .resume = cik_ih_resume,
+ .is_idle = cik_ih_is_idle,
+ .wait_for_idle = cik_ih_wait_for_idle,
+ .soft_reset = cik_ih_soft_reset,
+ .print_status = cik_ih_print_status,
+ .set_clockgating_state = cik_ih_set_clockgating_state,
+ .set_powergating_state = cik_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs cik_ih_funcs = {
+ .get_wptr = cik_ih_get_wptr,
+ .decode_iv = cik_ih_decode_iv,
+ .set_rptr = cik_ih_set_rptr
+};
+
+static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+ if (adev->irq.ih_funcs == NULL)
+ adev->irq.ih_funcs = &cik_ih_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
new file mode 100644
index 000000000000..f70162525034
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CIK_IH_H__
+#define __CIK_IH_H__
+
+extern const struct amdgpu_ip_funcs cik_ih_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
new file mode 100644
index 000000000000..ae2bb26fa46e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -0,0 +1,1422 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_ucode.h"
+#include "amdgpu_trace.h"
+#include "cikd.h"
+#include "cik.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+ SDMA0_REGISTER_OFFSET,
+ SDMA1_REGISTER_OFFSET
+};
+
+static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
+static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
+static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
+static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
+MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
+MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
+MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
+MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
+MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
+MODULE_FIRMWARE("radeon/kabini_sdma.bin");
+MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
+MODULE_FIRMWARE("radeon/mullins_sdma.bin");
+MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
+
+u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+/*
+ * sDMA - System DMA
+ * Starting with CIK, the GPU has new asynchronous
+ * DMA engines. These engines are used for compute
+ * and gfx. There are two DMA engines (SDMA0, SDMA1)
+ * and each one supports 1 ring buffer used for gfx
+ * and 2 queues used for compute.
+ *
+ * The programming model is very similar to the CP
+ * (ring buffer, IBs, etc.), but sDMA has it's own
+ * packet format that is different from the PM4 format
+ * used by the CP. sDMA supports copying data, writing
+ * embedded data, solid fills, and a number of other
+ * things. It also has support for tiling/detiling of
+ * buffers.
+ */
+
+/**
+ * cik_sdma_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int cik_sdma_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err, i;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ chip_name = "bonaire";
+ break;
+ case CHIP_HAWAII:
+ chip_name = "hawaii";
+ break;
+ case CHIP_KAVERI:
+ chip_name = "kaveri";
+ break;
+ case CHIP_KABINI:
+ chip_name = "kabini";
+ break;
+ case CHIP_MULLINS:
+ chip_name = "mullins";
+ break;
+ default: BUG();
+ }
+
+ for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+ if (i == 0)
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
+ err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->sdma[i].fw);
+ }
+out:
+ if (err) {
+ printk(KERN_ERR
+ "cik_sdma: Failed to load firmware \"%s\"\n",
+ fw_name);
+ for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+ release_firmware(adev->sdma[i].fw);
+ adev->sdma[i].fw = NULL;
+ }
+ }
+ return err;
+}
+
+/**
+ * cik_sdma_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware (CIK+).
+ */
+static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ u32 rptr;
+
+ rptr = ring->adev->wb.wb[ring->rptr_offs];
+
+ return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (CIK+).
+ */
+static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
+
+ return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (CIK+).
+ */
+static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
+
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+}
+
+static void cik_sdma_hdp_flush_ring_emit(struct amdgpu_ring *);
+
+/**
+ * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
+ *
+ * @ring: amdgpu ring pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (CIK).
+ */
+static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib)
+{
+ u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+ u32 next_rptr = ring->wptr + 5;
+
+ if (ib->flush_hdp_writefifo)
+ next_rptr += 6;
+
+ while ((next_rptr & 7) != 4)
+ next_rptr++;
+
+ next_rptr += 4;
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+ amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, 1); /* number of DWs to follow */
+ amdgpu_ring_write(ring, next_rptr);
+
+ if (ib->flush_hdp_writefifo) {
+ /* flush HDP */
+ cik_sdma_hdp_flush_ring_emit(ring);
+ }
+
+ /* IB packet must end on a 8 DW boundary */
+ while ((ring->wptr & 7) != 4)
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
+ amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, ib->length_dw);
+
+}
+
+/**
+ * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void cik_sdma_hdp_flush_ring_emit(struct amdgpu_ring *ring)
+{
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (ring == &ring->adev->sdma[0].ring)
+ ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
+ else
+ ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
+ amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
+ amdgpu_ring_write(ring, ref_and_mask); /* reference */
+ amdgpu_ring_write(ring, ref_and_mask); /* mask */
+ amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
+/**
+ * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (CIK).
+ */
+static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ bool write64bit)
+{
+ /* write the fence */
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+
+ /* optionally write high bits as well */
+ if (write64bit) {
+ addr += 4;
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+ }
+
+ /* generate an interrupt */
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
+}
+
+/**
+ * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @semaphore: amdgpu semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (CIK).
+ */
+static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
+ struct amdgpu_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
+ amdgpu_ring_write(ring, addr & 0xfffffff8);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+
+ return true;
+}
+
+/**
+ * cik_sdma_gfx_stop - stop the gfx async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the gfx async dma ring buffers (CIK).
+ */
+static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
+ struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+ u32 rb_cntl;
+ int i;
+
+ if ((adev->mman.buffer_funcs_ring == sdma0) ||
+ (adev->mman.buffer_funcs_ring == sdma1))
+ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+
+ for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+ rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
+ rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
+ }
+ sdma0->ready = false;
+ sdma1->ready = false;
+}
+
+/**
+ * cik_sdma_rlc_stop - stop the compute async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the compute async dma queues (CIK).
+ */
+static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
+{
+ /* XXX todo */
+}
+
+/**
+ * cik_sdma_enable - stop the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines (CIK).
+ */
+static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 me_cntl;
+ int i;
+
+ if (enable == false) {
+ cik_sdma_gfx_stop(adev);
+ cik_sdma_rlc_stop(adev);
+ }
+
+ for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+ me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
+ if (enable)
+ me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
+ else
+ me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
+ }
+}
+
+/**
+ * cik_sdma_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 rb_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 wb_offset;
+ int i, j, r;
+
+ for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+ ring = &adev->sdma[i].ring;
+ wb_offset = (ring->rptr_offs * 4);
+
+ mutex_lock(&adev->srbm_mutex);
+ for (j = 0; j < 16; j++) {
+ cik_srbm_select(adev, 0, 0, 0, j);
+ /* SDMA GFX */
+ WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
+ /* XXX SDMA RLC - todo */
+ }
+ cik_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+
+ WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
+ ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
+
+ WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+ WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+
+ /* enable DMA RB */
+ WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
+ rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
+
+ ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
+#endif
+ /* enable DMA IBs */
+ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+ ring->ready = true;
+
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+ }
+
+ return 0;
+}
+
+/**
+ * cik_sdma_rlc_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the compute DMA queues and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
+{
+ /* XXX todo */
+ return 0;
+}
+
+/**
+ * cik_sdma_load_microcode - load the sDMA ME ucode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int cik_sdma_load_microcode(struct amdgpu_device *adev)
+{
+ const struct sdma_firmware_header_v1_0 *hdr;
+ const __le32 *fw_data;
+ u32 fw_size;
+ int i, j;
+
+ if (!adev->sdma[0].fw || !adev->sdma[1].fw)
+ return -EINVAL;
+
+ /* halt the MEs */
+ cik_sdma_enable(adev, false);
+
+ for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+ amdgpu_ucode_print_sdma_hdr(&hdr->header);
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ fw_data = (const __le32 *)
+ (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
+ for (j = 0; j < fw_size; j++)
+ WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
+ WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
+ }
+
+ return 0;
+}
+
+/**
+ * cik_sdma_start - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the DMA engines and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_start(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = cik_sdma_load_microcode(adev);
+ if (r)
+ return r;
+
+ /* unhalt the MEs */
+ cik_sdma_enable(adev, true);
+
+ /* start the gfx rings and rlc compute queues */
+ r = cik_sdma_gfx_resume(adev);
+ if (r)
+ return r;
+ r = cik_sdma_rlc_resume(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * cik_sdma_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned i;
+ unsigned index;
+ int r;
+ u32 tmp;
+ u64 gpu_addr;
+
+ r = amdgpu_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
+ }
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
+ r = amdgpu_ring_lock(ring, 5);
+ if (r) {
+ DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ amdgpu_wb_free(adev, index);
+ return r;
+ }
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+ amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, 1); /* number of DWs to follow */
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_unlock_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_wb_free(adev, index);
+
+ return r;
+}
+
+/**
+ * cik_sdma_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (CIK).
+ * Returns 0 on success, error on failure.
+ */
+static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ unsigned i;
+ unsigned index;
+ int r;
+ u32 tmp = 0;
+ u64 gpu_addr;
+
+ r = amdgpu_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
+ }
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
+ r = amdgpu_ib_get(ring, NULL, 256, &ib);
+ if (r) {
+ amdgpu_wb_free(adev, index);
+ DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr);
+ ib.ptr[3] = 1;
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
+
+ r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+ if (r) {
+ amdgpu_ib_free(adev, &ib);
+ amdgpu_wb_free(adev, index);
+ DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = amdgpu_fence_wait(ib.fence, false);
+ if (r) {
+ amdgpu_ib_free(adev, &ib);
+ amdgpu_wb_free(adev, index);
+ DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
+ ib.fence->ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ amdgpu_ib_free(adev, &ib);
+ amdgpu_wb_free(adev, index);
+ return r;
+}
+
+/**
+ * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0x1FFFF8)
+ bytes = 0x1FFFF8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+}
+
+/**
+ * cik_sdma_vm_write_pages - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update PTEs by writing them manually using sDMA (CIK).
+ */
+static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & AMDGPU_PTE_SYSTEM) {
+ value = amdgpu_vm_map_gart(ib->ring->adev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & AMDGPU_PTE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+}
+
+/**
+ * cik_sdma_vm_set_pages - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count;
+ if (ndw > 0x7FFFF)
+ ndw = 0x7FFFF;
+
+ if (flags & AMDGPU_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+ pe += ndw * 8;
+ addr += ndw * incr;
+ count -= ndw;
+ }
+}
+
+/**
+ * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
+{
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+}
+
+/**
+ * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (CIK).
+ */
+static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ if (vm_id < 8) {
+ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ } else {
+ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+ }
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ /* update SH_MEM_* regs */
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+ amdgpu_ring_write(ring, VMID(vm_id));
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmSH_MEM_BASES);
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmSH_MEM_CONFIG);
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmSH_MEM_APE1_BASE);
+ amdgpu_ring_write(ring, 1);
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmSH_MEM_APE1_LIMIT);
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+ amdgpu_ring_write(ring, VMID(0));
+
+ /* flush TLB */
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 1 << vm_id);
+
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0); /* reference */
+ amdgpu_ring_write(ring, 0); /* mask */
+ amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
+static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
+ WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
+ WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
+ } else {
+ orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
+ data |= 0xff000000;
+ if (data != orig)
+ WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
+ data |= 0xff000000;
+ if (data != orig)
+ WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
+ }
+}
+
+static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
+ orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+ data |= 0x100;
+ if (orig != data)
+ WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+ data |= 0x100;
+ if (orig != data)
+ WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+ } else {
+ orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+ data &= ~0x100;
+ if (orig != data)
+ WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+ data &= ~0x100;
+ if (orig != data)
+ WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+ }
+}
+
+static int cik_sdma_early_init(struct amdgpu_device *adev)
+{
+ cik_sdma_set_ring_funcs(adev);
+ cik_sdma_set_irq_funcs(adev);
+ cik_sdma_set_buffer_funcs(adev);
+ cik_sdma_set_vm_pte_funcs(adev);
+
+ return 0;
+}
+
+static int cik_sdma_sw_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ r = cik_sdma_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load sdma firmware!\n");
+ return r;
+ }
+
+ /* SDMA trap event */
+ r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+ if (r)
+ return r;
+
+ /* SDMA Privileged inst */
+ r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+ if (r)
+ return r;
+
+ /* SDMA Privileged inst */
+ r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
+ if (r)
+ return r;
+
+ ring = &adev->sdma[0].ring;
+ ring->ring_obj = NULL;
+
+ ring = &adev->sdma[1].ring;
+ ring->ring_obj = NULL;
+
+ ring = &adev->sdma[0].ring;
+ sprintf(ring->name, "sdma0");
+ r = amdgpu_ring_init(adev, ring, 256 * 1024,
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
+ &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
+ AMDGPU_RING_TYPE_SDMA);
+ if (r)
+ return r;
+
+ ring = &adev->sdma[1].ring;
+ sprintf(ring->name, "sdma1");
+ r = amdgpu_ring_init(adev, ring, 256 * 1024,
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
+ &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
+ AMDGPU_RING_TYPE_SDMA);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int cik_sdma_sw_fini(struct amdgpu_device *adev)
+{
+ amdgpu_ring_fini(&adev->sdma[0].ring);
+ amdgpu_ring_fini(&adev->sdma[1].ring);
+
+ return 0;
+}
+
+static int cik_sdma_hw_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = cik_sdma_start(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int cik_sdma_hw_fini(struct amdgpu_device *adev)
+{
+ cik_sdma_enable(adev, false);
+
+ return 0;
+}
+
+static int cik_sdma_suspend(struct amdgpu_device *adev)
+{
+
+ return cik_sdma_hw_fini(adev);
+}
+
+static int cik_sdma_resume(struct amdgpu_device *adev)
+{
+
+ return cik_sdma_hw_init(adev);
+}
+
+static bool cik_sdma_is_idle(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(mmSRBM_STATUS2);
+
+ if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
+ SRBM_STATUS2__SDMA1_BUSY_MASK))
+ return false;
+
+ return true;
+}
+
+static int cik_sdma_wait_for_idle(struct amdgpu_device *adev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
+ SRBM_STATUS2__SDMA1_BUSY_MASK);
+
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static void cik_sdma_print_status(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ dev_info(adev->dev, "CIK SDMA registers\n");
+ dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
+ RREG32(mmSRBM_STATUS2));
+ for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+ dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
+ i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
+ i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
+ i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
+ i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
+ i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
+ i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
+ i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
+ i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
+ i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
+ i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
+ i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
+ i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
+ i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
+ mutex_lock(&adev->srbm_mutex);
+ for (j = 0; j < 16; j++) {
+ cik_srbm_select(adev, 0, 0, 0, j);
+ dev_info(adev->dev, " VM %d:\n", j);
+ dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
+ RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n",
+ RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
+ }
+ cik_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+}
+
+static int cik_sdma_soft_reset(struct amdgpu_device *adev)
+{
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(mmSRBM_STATUS2);
+
+ if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
+ /* sdma0 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+ }
+ if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
+ /* sdma1 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+ }
+
+ if (srbm_soft_reset) {
+ cik_sdma_print_status(adev);
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ cik_sdma_print_status(adev);
+ }
+
+ return 0;
+}
+
+static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_cntl;
+
+ switch (type) {
+ case AMDGPU_SDMA_IRQ_TRAP0:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
+ sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
+ sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
+ break;
+ default:
+ break;
+ }
+ break;
+ case AMDGPU_SDMA_IRQ_TRAP1:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
+ sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
+ sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 instance_id, queue_id;
+
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ queue_id = (entry->ring_id & 0xc) >> 2;
+ DRM_DEBUG("IH: SDMA trap\n");
+ switch (instance_id) {
+ case 0:
+ switch (queue_id) {
+ case 0:
+ amdgpu_fence_process(&adev->sdma[0].ring);
+ break;
+ case 1:
+ /* XXX compute */
+ break;
+ case 2:
+ /* XXX compute */
+ break;
+ }
+ break;
+ case 1:
+ switch (queue_id) {
+ case 0:
+ amdgpu_fence_process(&adev->sdma[1].ring);
+ break;
+ case 1:
+ /* XXX compute */
+ break;
+ case 2:
+ /* XXX compute */
+ break;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal instruction in SDMA command stream\n");
+ schedule_work(&adev->reset_work);
+ return 0;
+}
+
+static int cik_sdma_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ bool gate = false;
+
+ if (state == AMDGPU_CG_STATE_GATE)
+ gate = true;
+
+ cik_enable_sdma_mgcg(adev, gate);
+ cik_enable_sdma_mgls(adev, gate);
+
+ return 0;
+}
+
+static int cik_sdma_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ return 0;
+}
+
+const struct amdgpu_ip_funcs cik_sdma_ip_funcs = {
+ .early_init = cik_sdma_early_init,
+ .late_init = NULL,
+ .sw_init = cik_sdma_sw_init,
+ .sw_fini = cik_sdma_sw_fini,
+ .hw_init = cik_sdma_hw_init,
+ .hw_fini = cik_sdma_hw_fini,
+ .suspend = cik_sdma_suspend,
+ .resume = cik_sdma_resume,
+ .is_idle = cik_sdma_is_idle,
+ .wait_for_idle = cik_sdma_wait_for_idle,
+ .soft_reset = cik_sdma_soft_reset,
+ .print_status = cik_sdma_print_status,
+ .set_clockgating_state = cik_sdma_set_clockgating_state,
+ .set_powergating_state = cik_sdma_set_powergating_state,
+};
+
+/**
+ * cik_sdma_ring_is_lockup - Check if the DMA engine is locked up
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (CIK).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring)
+{
+
+ if (cik_sdma_is_idle(ring->adev)) {
+ amdgpu_ring_lockup_update(ring);
+ return false;
+ }
+ return amdgpu_ring_test_lockup(ring);
+}
+
+static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
+ .get_rptr = cik_sdma_ring_get_rptr,
+ .get_wptr = cik_sdma_ring_get_wptr,
+ .set_wptr = cik_sdma_ring_set_wptr,
+ .parse_cs = NULL,
+ .emit_ib = cik_sdma_ring_emit_ib,
+ .emit_fence = cik_sdma_ring_emit_fence,
+ .emit_semaphore = cik_sdma_ring_emit_semaphore,
+ .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
+ .test_ring = cik_sdma_ring_test_ring,
+ .test_ib = cik_sdma_ring_test_ib,
+ .is_lockup = cik_sdma_ring_is_lockup,
+};
+
+static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs;
+ adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
+ .set = cik_sdma_set_trap_irq_state,
+ .process = cik_sdma_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
+ .process = cik_sdma_process_illegal_inst_irq,
+};
+
+static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs;
+ adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
+}
+
+/**
+ * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (CIK).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ uint32_t byte_count)
+{
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
+ amdgpu_ring_write(ring, byte_count);
+ amdgpu_ring_write(ring, 0); /* src/dst endian swap */
+ amdgpu_ring_write(ring, lower_32_bits(src_offset));
+ amdgpu_ring_write(ring, upper_32_bits(src_offset));
+ amdgpu_ring_write(ring, lower_32_bits(dst_offset));
+ amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+}
+
+/**
+ * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (CIK).
+ */
+static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring,
+ uint32_t src_data,
+ uint64_t dst_offset,
+ uint32_t byte_count)
+{
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0));
+ amdgpu_ring_write(ring, lower_32_bits(dst_offset));
+ amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+ amdgpu_ring_write(ring, src_data);
+ amdgpu_ring_write(ring, byte_count);
+}
+
+static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
+ .copy_max_bytes = 0x1fffff,
+ .copy_num_dw = 7,
+ .emit_copy_buffer = cik_sdma_emit_copy_buffer,
+
+ .fill_max_bytes = 0x1fffff,
+ .fill_num_dw = 5,
+ .emit_fill_buffer = cik_sdma_emit_fill_buffer,
+};
+
+static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mman.buffer_funcs == NULL) {
+ adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+ }
+}
+
+static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
+ .copy_pte = cik_sdma_vm_copy_pte,
+ .write_pte = cik_sdma_vm_write_pte,
+ .set_pte_pde = cik_sdma_vm_set_pte_pde,
+ .pad_ib = cik_sdma_vm_pad_ib,
+};
+
+static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+ if (adev->vm_manager.vm_pte_funcs == NULL) {
+ adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+ adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
new file mode 100644
index 000000000000..42b59960bc53
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CIK_SDMA_H__
+#define __CIK_SDMA_H__
+
+extern const struct amdgpu_ip_funcs cik_sdma_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_ci.h b/drivers/gpu/drm/amd/amdgpu/clearstate_ci.h
new file mode 100644
index 000000000000..c3982f9475fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_ci.h
@@ -0,0 +1,944 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const unsigned int ci_SECT_CONTEXT_def_1[] =
+{
+ 0x00000000, // DB_RENDER_CONTROL
+ 0x00000000, // DB_COUNT_CONTROL
+ 0x00000000, // DB_DEPTH_VIEW
+ 0x00000000, // DB_RENDER_OVERRIDE
+ 0x00000000, // DB_RENDER_OVERRIDE2
+ 0x00000000, // DB_HTILE_DATA_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_BOUNDS_MIN
+ 0x00000000, // DB_DEPTH_BOUNDS_MAX
+ 0x00000000, // DB_STENCIL_CLEAR
+ 0x00000000, // DB_DEPTH_CLEAR
+ 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+ 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_INFO
+ 0x00000000, // DB_Z_INFO
+ 0x00000000, // DB_STENCIL_INFO
+ 0x00000000, // DB_Z_READ_BASE
+ 0x00000000, // DB_STENCIL_READ_BASE
+ 0x00000000, // DB_Z_WRITE_BASE
+ 0x00000000, // DB_STENCIL_WRITE_BASE
+ 0x00000000, // DB_DEPTH_SIZE
+ 0x00000000, // DB_DEPTH_SLICE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // TA_BC_BASE_ADDR
+ 0x00000000, // TA_BC_BASE_ADDR_HI
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // COHER_DEST_BASE_HI_0
+ 0x00000000, // COHER_DEST_BASE_HI_1
+ 0x00000000, // COHER_DEST_BASE_HI_2
+ 0x00000000, // COHER_DEST_BASE_HI_3
+ 0x00000000, // COHER_DEST_BASE_2
+ 0x00000000, // COHER_DEST_BASE_3
+ 0x00000000, // PA_SC_WINDOW_OFFSET
+ 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+ 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+ 0x0000ffff, // PA_SC_CLIPRECT_RULE
+ 0x00000000, // PA_SC_CLIPRECT_0_TL
+ 0x40004000, // PA_SC_CLIPRECT_0_BR
+ 0x00000000, // PA_SC_CLIPRECT_1_TL
+ 0x40004000, // PA_SC_CLIPRECT_1_BR
+ 0x00000000, // PA_SC_CLIPRECT_2_TL
+ 0x40004000, // PA_SC_CLIPRECT_2_BR
+ 0x00000000, // PA_SC_CLIPRECT_3_TL
+ 0x40004000, // PA_SC_CLIPRECT_3_BR
+ 0xaa99aaaa, // PA_SC_EDGERULE
+ 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+ 0xffffffff, // CB_TARGET_MASK
+ 0xffffffff, // CB_SHADER_MASK
+ 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+ 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+ 0x00000000, // COHER_DEST_BASE_0
+ 0x00000000, // COHER_DEST_BASE_1
+ 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+ 0x00000000, // PA_SC_VPORT_ZMIN_0
+ 0x3f800000, // PA_SC_VPORT_ZMAX_0
+ 0x00000000, // PA_SC_VPORT_ZMIN_1
+ 0x3f800000, // PA_SC_VPORT_ZMAX_1
+ 0x00000000, // PA_SC_VPORT_ZMIN_2
+ 0x3f800000, // PA_SC_VPORT_ZMAX_2
+ 0x00000000, // PA_SC_VPORT_ZMIN_3
+ 0x3f800000, // PA_SC_VPORT_ZMAX_3
+ 0x00000000, // PA_SC_VPORT_ZMIN_4
+ 0x3f800000, // PA_SC_VPORT_ZMAX_4
+ 0x00000000, // PA_SC_VPORT_ZMIN_5
+ 0x3f800000, // PA_SC_VPORT_ZMAX_5
+ 0x00000000, // PA_SC_VPORT_ZMIN_6
+ 0x3f800000, // PA_SC_VPORT_ZMAX_6
+ 0x00000000, // PA_SC_VPORT_ZMIN_7
+ 0x3f800000, // PA_SC_VPORT_ZMAX_7
+ 0x00000000, // PA_SC_VPORT_ZMIN_8
+ 0x3f800000, // PA_SC_VPORT_ZMAX_8
+ 0x00000000, // PA_SC_VPORT_ZMIN_9
+ 0x3f800000, // PA_SC_VPORT_ZMAX_9
+ 0x00000000, // PA_SC_VPORT_ZMIN_10
+ 0x3f800000, // PA_SC_VPORT_ZMAX_10
+ 0x00000000, // PA_SC_VPORT_ZMIN_11
+ 0x3f800000, // PA_SC_VPORT_ZMAX_11
+ 0x00000000, // PA_SC_VPORT_ZMIN_12
+ 0x3f800000, // PA_SC_VPORT_ZMAX_12
+ 0x00000000, // PA_SC_VPORT_ZMIN_13
+ 0x3f800000, // PA_SC_VPORT_ZMAX_13
+ 0x00000000, // PA_SC_VPORT_ZMIN_14
+ 0x3f800000, // PA_SC_VPORT_ZMAX_14
+ 0x00000000, // PA_SC_VPORT_ZMIN_15
+ 0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const unsigned int ci_SECT_CONTEXT_def_2[] =
+{
+ 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+ 0, // HOLE
+ 0x00000000, // CP_PERFMON_CNTX_CNTL
+ 0x00000000, // CP_RINGID
+ 0x00000000, // CP_VMID
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0xffffffff, // VGT_MAX_VTX_INDX
+ 0x00000000, // VGT_MIN_VTX_INDX
+ 0x00000000, // VGT_INDX_OFFSET
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+ 0, // HOLE
+ 0x00000000, // CB_BLEND_RED
+ 0x00000000, // CB_BLEND_GREEN
+ 0x00000000, // CB_BLEND_BLUE
+ 0x00000000, // CB_BLEND_ALPHA
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_STENCIL_CONTROL
+ 0x00000000, // DB_STENCILREFMASK
+ 0x00000000, // DB_STENCILREFMASK_BF
+ 0, // HOLE
+ 0x00000000, // PA_CL_VPORT_XSCALE
+ 0x00000000, // PA_CL_VPORT_XOFFSET
+ 0x00000000, // PA_CL_VPORT_YSCALE
+ 0x00000000, // PA_CL_VPORT_YOFFSET
+ 0x00000000, // PA_CL_VPORT_ZSCALE
+ 0x00000000, // PA_CL_VPORT_ZOFFSET
+ 0x00000000, // PA_CL_VPORT_XSCALE_1
+ 0x00000000, // PA_CL_VPORT_XOFFSET_1
+ 0x00000000, // PA_CL_VPORT_YSCALE_1
+ 0x00000000, // PA_CL_VPORT_YOFFSET_1
+ 0x00000000, // PA_CL_VPORT_ZSCALE_1
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_1
+ 0x00000000, // PA_CL_VPORT_XSCALE_2
+ 0x00000000, // PA_CL_VPORT_XOFFSET_2
+ 0x00000000, // PA_CL_VPORT_YSCALE_2
+ 0x00000000, // PA_CL_VPORT_YOFFSET_2
+ 0x00000000, // PA_CL_VPORT_ZSCALE_2
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_2
+ 0x00000000, // PA_CL_VPORT_XSCALE_3
+ 0x00000000, // PA_CL_VPORT_XOFFSET_3
+ 0x00000000, // PA_CL_VPORT_YSCALE_3
+ 0x00000000, // PA_CL_VPORT_YOFFSET_3
+ 0x00000000, // PA_CL_VPORT_ZSCALE_3
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_3
+ 0x00000000, // PA_CL_VPORT_XSCALE_4
+ 0x00000000, // PA_CL_VPORT_XOFFSET_4
+ 0x00000000, // PA_CL_VPORT_YSCALE_4
+ 0x00000000, // PA_CL_VPORT_YOFFSET_4
+ 0x00000000, // PA_CL_VPORT_ZSCALE_4
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_4
+ 0x00000000, // PA_CL_VPORT_XSCALE_5
+ 0x00000000, // PA_CL_VPORT_XOFFSET_5
+ 0x00000000, // PA_CL_VPORT_YSCALE_5
+ 0x00000000, // PA_CL_VPORT_YOFFSET_5
+ 0x00000000, // PA_CL_VPORT_ZSCALE_5
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_5
+ 0x00000000, // PA_CL_VPORT_XSCALE_6
+ 0x00000000, // PA_CL_VPORT_XOFFSET_6
+ 0x00000000, // PA_CL_VPORT_YSCALE_6
+ 0x00000000, // PA_CL_VPORT_YOFFSET_6
+ 0x00000000, // PA_CL_VPORT_ZSCALE_6
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_6
+ 0x00000000, // PA_CL_VPORT_XSCALE_7
+ 0x00000000, // PA_CL_VPORT_XOFFSET_7
+ 0x00000000, // PA_CL_VPORT_YSCALE_7
+ 0x00000000, // PA_CL_VPORT_YOFFSET_7
+ 0x00000000, // PA_CL_VPORT_ZSCALE_7
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_7
+ 0x00000000, // PA_CL_VPORT_XSCALE_8
+ 0x00000000, // PA_CL_VPORT_XOFFSET_8
+ 0x00000000, // PA_CL_VPORT_YSCALE_8
+ 0x00000000, // PA_CL_VPORT_YOFFSET_8
+ 0x00000000, // PA_CL_VPORT_ZSCALE_8
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_8
+ 0x00000000, // PA_CL_VPORT_XSCALE_9
+ 0x00000000, // PA_CL_VPORT_XOFFSET_9
+ 0x00000000, // PA_CL_VPORT_YSCALE_9
+ 0x00000000, // PA_CL_VPORT_YOFFSET_9
+ 0x00000000, // PA_CL_VPORT_ZSCALE_9
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_9
+ 0x00000000, // PA_CL_VPORT_XSCALE_10
+ 0x00000000, // PA_CL_VPORT_XOFFSET_10
+ 0x00000000, // PA_CL_VPORT_YSCALE_10
+ 0x00000000, // PA_CL_VPORT_YOFFSET_10
+ 0x00000000, // PA_CL_VPORT_ZSCALE_10
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_10
+ 0x00000000, // PA_CL_VPORT_XSCALE_11
+ 0x00000000, // PA_CL_VPORT_XOFFSET_11
+ 0x00000000, // PA_CL_VPORT_YSCALE_11
+ 0x00000000, // PA_CL_VPORT_YOFFSET_11
+ 0x00000000, // PA_CL_VPORT_ZSCALE_11
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_11
+ 0x00000000, // PA_CL_VPORT_XSCALE_12
+ 0x00000000, // PA_CL_VPORT_XOFFSET_12
+ 0x00000000, // PA_CL_VPORT_YSCALE_12
+ 0x00000000, // PA_CL_VPORT_YOFFSET_12
+ 0x00000000, // PA_CL_VPORT_ZSCALE_12
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_12
+ 0x00000000, // PA_CL_VPORT_XSCALE_13
+ 0x00000000, // PA_CL_VPORT_XOFFSET_13
+ 0x00000000, // PA_CL_VPORT_YSCALE_13
+ 0x00000000, // PA_CL_VPORT_YOFFSET_13
+ 0x00000000, // PA_CL_VPORT_ZSCALE_13
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_13
+ 0x00000000, // PA_CL_VPORT_XSCALE_14
+ 0x00000000, // PA_CL_VPORT_XOFFSET_14
+ 0x00000000, // PA_CL_VPORT_YSCALE_14
+ 0x00000000, // PA_CL_VPORT_YOFFSET_14
+ 0x00000000, // PA_CL_VPORT_ZSCALE_14
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_14
+ 0x00000000, // PA_CL_VPORT_XSCALE_15
+ 0x00000000, // PA_CL_VPORT_XOFFSET_15
+ 0x00000000, // PA_CL_VPORT_YSCALE_15
+ 0x00000000, // PA_CL_VPORT_YOFFSET_15
+ 0x00000000, // PA_CL_VPORT_ZSCALE_15
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_15
+ 0x00000000, // PA_CL_UCP_0_X
+ 0x00000000, // PA_CL_UCP_0_Y
+ 0x00000000, // PA_CL_UCP_0_Z
+ 0x00000000, // PA_CL_UCP_0_W
+ 0x00000000, // PA_CL_UCP_1_X
+ 0x00000000, // PA_CL_UCP_1_Y
+ 0x00000000, // PA_CL_UCP_1_Z
+ 0x00000000, // PA_CL_UCP_1_W
+ 0x00000000, // PA_CL_UCP_2_X
+ 0x00000000, // PA_CL_UCP_2_Y
+ 0x00000000, // PA_CL_UCP_2_Z
+ 0x00000000, // PA_CL_UCP_2_W
+ 0x00000000, // PA_CL_UCP_3_X
+ 0x00000000, // PA_CL_UCP_3_Y
+ 0x00000000, // PA_CL_UCP_3_Z
+ 0x00000000, // PA_CL_UCP_3_W
+ 0x00000000, // PA_CL_UCP_4_X
+ 0x00000000, // PA_CL_UCP_4_Y
+ 0x00000000, // PA_CL_UCP_4_Z
+ 0x00000000, // PA_CL_UCP_4_W
+ 0x00000000, // PA_CL_UCP_5_X
+ 0x00000000, // PA_CL_UCP_5_Y
+ 0x00000000, // PA_CL_UCP_5_Z
+ 0x00000000, // PA_CL_UCP_5_W
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_CNTL_0
+ 0x00000000, // SPI_PS_INPUT_CNTL_1
+ 0x00000000, // SPI_PS_INPUT_CNTL_2
+ 0x00000000, // SPI_PS_INPUT_CNTL_3
+ 0x00000000, // SPI_PS_INPUT_CNTL_4
+ 0x00000000, // SPI_PS_INPUT_CNTL_5
+ 0x00000000, // SPI_PS_INPUT_CNTL_6
+ 0x00000000, // SPI_PS_INPUT_CNTL_7
+ 0x00000000, // SPI_PS_INPUT_CNTL_8
+ 0x00000000, // SPI_PS_INPUT_CNTL_9
+ 0x00000000, // SPI_PS_INPUT_CNTL_10
+ 0x00000000, // SPI_PS_INPUT_CNTL_11
+ 0x00000000, // SPI_PS_INPUT_CNTL_12
+ 0x00000000, // SPI_PS_INPUT_CNTL_13
+ 0x00000000, // SPI_PS_INPUT_CNTL_14
+ 0x00000000, // SPI_PS_INPUT_CNTL_15
+ 0x00000000, // SPI_PS_INPUT_CNTL_16
+ 0x00000000, // SPI_PS_INPUT_CNTL_17
+ 0x00000000, // SPI_PS_INPUT_CNTL_18
+ 0x00000000, // SPI_PS_INPUT_CNTL_19
+ 0x00000000, // SPI_PS_INPUT_CNTL_20
+ 0x00000000, // SPI_PS_INPUT_CNTL_21
+ 0x00000000, // SPI_PS_INPUT_CNTL_22
+ 0x00000000, // SPI_PS_INPUT_CNTL_23
+ 0x00000000, // SPI_PS_INPUT_CNTL_24
+ 0x00000000, // SPI_PS_INPUT_CNTL_25
+ 0x00000000, // SPI_PS_INPUT_CNTL_26
+ 0x00000000, // SPI_PS_INPUT_CNTL_27
+ 0x00000000, // SPI_PS_INPUT_CNTL_28
+ 0x00000000, // SPI_PS_INPUT_CNTL_29
+ 0x00000000, // SPI_PS_INPUT_CNTL_30
+ 0x00000000, // SPI_PS_INPUT_CNTL_31
+ 0x00000000, // SPI_VS_OUT_CONFIG
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_ENA
+ 0x00000000, // SPI_PS_INPUT_ADDR
+ 0x00000000, // SPI_INTERP_CONTROL_0
+ 0x00000002, // SPI_PS_IN_CONTROL
+ 0, // HOLE
+ 0x00000000, // SPI_BARYC_CNTL
+ 0, // HOLE
+ 0x00000000, // SPI_TMPRING_SIZE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_SHADER_POS_FORMAT
+ 0x00000000, // SPI_SHADER_Z_FORMAT
+ 0x00000000, // SPI_SHADER_COL_FORMAT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_BLEND0_CONTROL
+ 0x00000000, // CB_BLEND1_CONTROL
+ 0x00000000, // CB_BLEND2_CONTROL
+ 0x00000000, // CB_BLEND3_CONTROL
+ 0x00000000, // CB_BLEND4_CONTROL
+ 0x00000000, // CB_BLEND5_CONTROL
+ 0x00000000, // CB_BLEND6_CONTROL
+ 0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int ci_SECT_CONTEXT_def_3[] =
+{
+ 0x00000000, // PA_CL_POINT_X_RAD
+ 0x00000000, // PA_CL_POINT_Y_RAD
+ 0x00000000, // PA_CL_POINT_SIZE
+ 0x00000000, // PA_CL_POINT_CULL_RAD
+ 0x00000000, // VGT_DMA_BASE_HI
+ 0x00000000, // VGT_DMA_BASE
+};
+static const unsigned int ci_SECT_CONTEXT_def_4[] =
+{
+ 0x00000000, // DB_DEPTH_CONTROL
+ 0x00000000, // DB_EQAA
+ 0x00000000, // CB_COLOR_CONTROL
+ 0x00000000, // DB_SHADER_CONTROL
+ 0x00090000, // PA_CL_CLIP_CNTL
+ 0x00000004, // PA_SU_SC_MODE_CNTL
+ 0x00000000, // PA_CL_VTE_CNTL
+ 0x00000000, // PA_CL_VS_OUT_CNTL
+ 0x00000000, // PA_CL_NANINF_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+ 0x00000000, // PA_SU_PRIM_FILTER_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SU_POINT_SIZE
+ 0x00000000, // PA_SU_POINT_MINMAX
+ 0x00000000, // PA_SU_LINE_CNTL
+ 0x00000000, // PA_SC_LINE_STIPPLE
+ 0x00000000, // VGT_OUTPUT_PATH_CNTL
+ 0x00000000, // VGT_HOS_CNTL
+ 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+ 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+ 0x00000000, // VGT_HOS_REUSE_DEPTH
+ 0x00000000, // VGT_GROUP_PRIM_TYPE
+ 0x00000000, // VGT_GROUP_FIRST_DECR
+ 0x00000000, // VGT_GROUP_DECR
+ 0x00000000, // VGT_GROUP_VECT_0_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_CNTL
+ 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+ 0x00000000, // VGT_GS_MODE
+ 0x00000000, // VGT_GS_ONCHIP_CNTL
+ 0x00000000, // PA_SC_MODE_CNTL_0
+ 0x00000000, // PA_SC_MODE_CNTL_1
+ 0x00000000, // VGT_ENHANCE
+ 0x00000100, // VGT_GS_PER_ES
+ 0x00000080, // VGT_ES_PER_GS
+ 0x00000002, // VGT_GS_PER_VS
+ 0x00000000, // VGT_GSVS_RING_OFFSET_1
+ 0x00000000, // VGT_GSVS_RING_OFFSET_2
+ 0x00000000, // VGT_GSVS_RING_OFFSET_3
+ 0x00000000, // VGT_GS_OUT_PRIM_TYPE
+ 0x00000000, // IA_ENHANCE
+};
+static const unsigned int ci_SECT_CONTEXT_def_5[] =
+{
+ 0x00000000, // WD_ENHANCE
+ 0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int ci_SECT_CONTEXT_def_6[] =
+{
+ 0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int ci_SECT_CONTEXT_def_7[] =
+{
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_0
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_1
+ 0x000000ff, // IA_MULTI_VGT_PARAM
+ 0x00000000, // VGT_ESGS_RING_ITEMSIZE
+ 0x00000000, // VGT_GSVS_RING_ITEMSIZE
+ 0x00000000, // VGT_REUSE_OFF
+ 0x00000000, // VGT_VTX_CNT_EN
+ 0x00000000, // DB_HTILE_SURFACE
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE0
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE1
+ 0x00000000, // DB_PRELOAD_CONTROL
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0, // HOLE
+ 0x00000000, // VGT_GS_MAX_VERT_OUT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_SHADER_STAGES_EN
+ 0x00000000, // VGT_LS_HS_CONFIG
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+ 0x00000000, // VGT_TF_PARAM
+ 0x00000000, // DB_ALPHA_TO_MASK
+ 0, // HOLE
+ 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+ 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+ 0x00000000, // VGT_GS_INSTANCE_CNT
+ 0x00000000, // VGT_STRMOUT_CONFIG
+ 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_0
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_1
+ 0x00001000, // PA_SC_LINE_CNTL
+ 0x00000000, // PA_SC_AA_CONFIG
+ 0x00000005, // PA_SU_VTX_CNTL
+ 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+ 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+ 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+ 0x00000010, // VGT_OUT_DEALLOC_CNTL
+ 0x00000000, // CB_COLOR0_BASE
+ 0x00000000, // CB_COLOR0_PITCH
+ 0x00000000, // CB_COLOR0_SLICE
+ 0x00000000, // CB_COLOR0_VIEW
+ 0x00000000, // CB_COLOR0_INFO
+ 0x00000000, // CB_COLOR0_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_CMASK
+ 0x00000000, // CB_COLOR0_CMASK_SLICE
+ 0x00000000, // CB_COLOR0_FMASK
+ 0x00000000, // CB_COLOR0_FMASK_SLICE
+ 0x00000000, // CB_COLOR0_CLEAR_WORD0
+ 0x00000000, // CB_COLOR0_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_BASE
+ 0x00000000, // CB_COLOR1_PITCH
+ 0x00000000, // CB_COLOR1_SLICE
+ 0x00000000, // CB_COLOR1_VIEW
+ 0x00000000, // CB_COLOR1_INFO
+ 0x00000000, // CB_COLOR1_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_CMASK
+ 0x00000000, // CB_COLOR1_CMASK_SLICE
+ 0x00000000, // CB_COLOR1_FMASK
+ 0x00000000, // CB_COLOR1_FMASK_SLICE
+ 0x00000000, // CB_COLOR1_CLEAR_WORD0
+ 0x00000000, // CB_COLOR1_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_BASE
+ 0x00000000, // CB_COLOR2_PITCH
+ 0x00000000, // CB_COLOR2_SLICE
+ 0x00000000, // CB_COLOR2_VIEW
+ 0x00000000, // CB_COLOR2_INFO
+ 0x00000000, // CB_COLOR2_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_CMASK
+ 0x00000000, // CB_COLOR2_CMASK_SLICE
+ 0x00000000, // CB_COLOR2_FMASK
+ 0x00000000, // CB_COLOR2_FMASK_SLICE
+ 0x00000000, // CB_COLOR2_CLEAR_WORD0
+ 0x00000000, // CB_COLOR2_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_BASE
+ 0x00000000, // CB_COLOR3_PITCH
+ 0x00000000, // CB_COLOR3_SLICE
+ 0x00000000, // CB_COLOR3_VIEW
+ 0x00000000, // CB_COLOR3_INFO
+ 0x00000000, // CB_COLOR3_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_CMASK
+ 0x00000000, // CB_COLOR3_CMASK_SLICE
+ 0x00000000, // CB_COLOR3_FMASK
+ 0x00000000, // CB_COLOR3_FMASK_SLICE
+ 0x00000000, // CB_COLOR3_CLEAR_WORD0
+ 0x00000000, // CB_COLOR3_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_BASE
+ 0x00000000, // CB_COLOR4_PITCH
+ 0x00000000, // CB_COLOR4_SLICE
+ 0x00000000, // CB_COLOR4_VIEW
+ 0x00000000, // CB_COLOR4_INFO
+ 0x00000000, // CB_COLOR4_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_CMASK
+ 0x00000000, // CB_COLOR4_CMASK_SLICE
+ 0x00000000, // CB_COLOR4_FMASK
+ 0x00000000, // CB_COLOR4_FMASK_SLICE
+ 0x00000000, // CB_COLOR4_CLEAR_WORD0
+ 0x00000000, // CB_COLOR4_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_BASE
+ 0x00000000, // CB_COLOR5_PITCH
+ 0x00000000, // CB_COLOR5_SLICE
+ 0x00000000, // CB_COLOR5_VIEW
+ 0x00000000, // CB_COLOR5_INFO
+ 0x00000000, // CB_COLOR5_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_CMASK
+ 0x00000000, // CB_COLOR5_CMASK_SLICE
+ 0x00000000, // CB_COLOR5_FMASK
+ 0x00000000, // CB_COLOR5_FMASK_SLICE
+ 0x00000000, // CB_COLOR5_CLEAR_WORD0
+ 0x00000000, // CB_COLOR5_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_BASE
+ 0x00000000, // CB_COLOR6_PITCH
+ 0x00000000, // CB_COLOR6_SLICE
+ 0x00000000, // CB_COLOR6_VIEW
+ 0x00000000, // CB_COLOR6_INFO
+ 0x00000000, // CB_COLOR6_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_CMASK
+ 0x00000000, // CB_COLOR6_CMASK_SLICE
+ 0x00000000, // CB_COLOR6_FMASK
+ 0x00000000, // CB_COLOR6_FMASK_SLICE
+ 0x00000000, // CB_COLOR6_CLEAR_WORD0
+ 0x00000000, // CB_COLOR6_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_BASE
+ 0x00000000, // CB_COLOR7_PITCH
+ 0x00000000, // CB_COLOR7_SLICE
+ 0x00000000, // CB_COLOR7_VIEW
+ 0x00000000, // CB_COLOR7_INFO
+ 0x00000000, // CB_COLOR7_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_CMASK
+ 0x00000000, // CB_COLOR7_CMASK_SLICE
+ 0x00000000, // CB_COLOR7_FMASK
+ 0x00000000, // CB_COLOR7_FMASK_SLICE
+ 0x00000000, // CB_COLOR7_CLEAR_WORD0
+ 0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
+{
+ {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+ {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
+ {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+ {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+ {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+ {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+ {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+ { 0, 0, 0 }
+};
+static const struct cs_section_def ci_cs_data[] = {
+ { ci_SECT_CONTEXT_defs, SECT_CONTEXT },
+ { 0, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
new file mode 100644
index 000000000000..c1bc6935c88e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -0,0 +1,3830 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "cikd.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "atombios_crtc.h"
+#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "gca/gfx_7_2_enum.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
+static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static const u32 crtc_offsets[6] =
+{
+ CRTC0_REGISTER_OFFSET,
+ CRTC1_REGISTER_OFFSET,
+ CRTC2_REGISTER_OFFSET,
+ CRTC3_REGISTER_OFFSET,
+ CRTC4_REGISTER_OFFSET,
+ CRTC5_REGISTER_OFFSET
+};
+
+static const uint32_t dig_offsets[] = {
+ CRTC0_REGISTER_OFFSET,
+ CRTC1_REGISTER_OFFSET,
+ CRTC2_REGISTER_OFFSET,
+ CRTC3_REGISTER_OFFSET,
+ CRTC4_REGISTER_OFFSET,
+ CRTC5_REGISTER_OFFSET,
+ (0x13830 - 0x7030) >> 2,
+};
+
+static const struct {
+ uint32_t reg;
+ uint32_t vblank;
+ uint32_t vline;
+ uint32_t hpd;
+
+} interrupt_status_offsets[6] = { {
+ .reg = mmDISP_INTERRUPT_STATUS,
+ .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
+}, {
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
+}, {
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
+}, {
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
+}, {
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
+}, {
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
+} };
+
+static const uint32_t hpd_int_control_offsets[6] = {
+ mmDC_HPD1_INT_CONTROL,
+ mmDC_HPD2_INT_CONTROL,
+ mmDC_HPD3_INT_CONTROL,
+ mmDC_HPD4_INT_CONTROL,
+ mmDC_HPD5_INT_CONTROL,
+ mmDC_HPD6_INT_CONTROL,
+};
+
+static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
+ u32 block_offset, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
+ WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+ r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
+ spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
+
+ return r;
+}
+
+static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
+ u32 block_offset, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
+ WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+ WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+ spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
+}
+
+static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
+{
+ if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
+ CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
+ return true;
+ else
+ return false;
+}
+
+static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v8_0_vblank_wait - vblank wait asic callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+ unsigned i = 0;
+
+ if (crtc >= adev->mode_info.num_crtc)
+ return;
+
+ if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (dce_v8_0_is_in_vblank(adev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce_v8_0_is_counter_moving(adev, crtc))
+ break;
+ }
+ }
+
+ while (!dce_v8_0_is_in_vblank(adev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce_v8_0_is_counter_moving(adev, crtc))
+ break;
+ }
+ }
+}
+
+static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else
+ return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+/**
+ * dce_v8_0_page_flip - pageflip callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+static void dce_v8_0_page_flip(struct amdgpu_device *adev,
+ int crtc_id, u64 crtc_base)
+{
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
+ int i;
+
+ /* Lock the graphics update lock */
+ tmp |= GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
+ WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
+ GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
+ break;
+ udelay(1);
+ }
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
+ WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+
+ *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+ *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ return 0;
+}
+
+/**
+ * dce_v8_0_hpd_sense - hpd sense callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ bool connected = false;
+
+ switch (hpd) {
+ case AMDGPU_HPD_1:
+ if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ connected = true;
+ break;
+ case AMDGPU_HPD_2:
+ if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
+ connected = true;
+ break;
+ case AMDGPU_HPD_3:
+ if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
+ connected = true;
+ break;
+ case AMDGPU_HPD_4:
+ if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
+ connected = true;
+ break;
+ case AMDGPU_HPD_5:
+ if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
+ connected = true;
+ break;
+ case AMDGPU_HPD_6:
+ if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+
+ return connected;
+}
+
+/**
+ * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = dce_v8_0_hpd_sense(adev, hpd);
+
+ switch (hpd) {
+ case AMDGPU_HPD_1:
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_2:
+ tmp = RREG32(mmDC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD2_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_3:
+ tmp = RREG32(mmDC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD3_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_4:
+ tmp = RREG32(mmDC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD4_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_5:
+ tmp = RREG32(mmDC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD5_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_6:
+ tmp = RREG32(mmDC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * dce_v8_0_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+ u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
+ (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
+ DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ continue;
+ }
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ WREG32(mmDC_HPD1_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_2:
+ WREG32(mmDC_HPD2_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_3:
+ WREG32(mmDC_HPD3_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_4:
+ WREG32(mmDC_HPD4_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_5:
+ WREG32(mmDC_HPD5_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_6:
+ WREG32(mmDC_HPD6_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+ amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+ }
+}
+
+/**
+ * dce_v8_0_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ WREG32(mmDC_HPD1_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_2:
+ WREG32(mmDC_HPD2_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_3:
+ WREG32(mmDC_HPD3_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_4:
+ WREG32(mmDC_HPD4_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_5:
+ WREG32(mmDC_HPD5_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_6:
+ WREG32(mmDC_HPD6_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+ amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+ }
+}
+
+static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+ return mmDC_GPIO_HPD_A;
+}
+
+static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
+ crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
+
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
+}
+
+static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 crtc_enabled, tmp;
+ int i;
+
+ save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
+
+ /* disable VGA render */
+ tmp = RREG32(mmVGA_RENDER_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ WREG32(mmVGA_RENDER_CONTROL, tmp);
+
+ /* blank the display controllers */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+#if 0
+ u32 frame_count;
+ int j;
+
+ save->crtc_enabled[i] = true;
+ tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
+ if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
+ amdgpu_display_vblank_wait(adev, i);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
+ WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ /* wait for the next frame */
+ frame_count = amdgpu_display_vblank_get_counter(adev, i);
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+ if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
+ tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
+ WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
+ tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
+ WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+#else
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
+#endif
+ } else {
+ save->crtc_enabled[i] = false;
+ }
+ }
+}
+
+static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 tmp, frame_count;
+ int i, j;
+
+ /* update crtc base addresses */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(adev->mc.vram_start));
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(adev->mc.vram_start));
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)adev->mc.vram_start);
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)adev->mc.vram_start);
+
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
+ if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
+ tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
+ WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+ if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
+ tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
+ WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
+ tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
+ WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < adev->usec_timeout; j++) {
+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+ if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ /* wait for the next frame */
+ frame_count = amdgpu_display_vblank_get_counter(adev, i);
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
+ WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+ WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
+
+ /* Unlock vga access */
+ WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
+ bool render)
+{
+ u32 tmp;
+
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32(mmVGA_HDP_CONTROL);
+ if (render)
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
+ else
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ WREG32(mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32(mmVGA_RENDER_CONTROL);
+ if (render)
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
+ else
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ WREG32(mmVGA_RENDER_CONTROL, tmp);
+}
+
+static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ bpc = amdgpu_connector_get_monitor_bpc(connector);
+ dither = amdgpu_connector->dither;
+ }
+
+ /* LVDS/eDP FMT is set up by atom */
+ if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ /* not needed for analog */
+ if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+ (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == AMDGPU_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
+ (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
+ else
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
+ (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
+ break;
+ case 8:
+ if (dither == AMDGPU_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
+ (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
+ else
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
+ (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
+ break;
+ case 10:
+ if (dither == AMDGPU_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
+ (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
+ else
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
+ (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
+ break;
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+
+/* display watermark setup */
+/**
+ * dce_v8_0_line_buffer_adjust - Set up the line buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @mode: the current display mode on the selected display
+ * controller
+ *
+ * Setup up the line buffer allocation for
+ * the selected display controller (CIK).
+ * Returns the line buffer size in pixels.
+ */
+static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
+ struct amdgpu_crtc *amdgpu_crtc,
+ struct drm_display_mode *mode)
+{
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
+ /*
+ * Line Buffer Setup
+ * There are 6 line buffers, one for each display controllers.
+ * There are 3 partitions per LB. Select the number of partitions
+ * to enable based on the display width. For display widths larger
+ * than 4096, you need use to use 2 display controllers and combine
+ * them using the stereo blender.
+ */
+ if (amdgpu_crtc->base.enabled && mode) {
+ if (mode->crtc_hdisplay < 1920) {
+ tmp = 1;
+ buffer_alloc = 2;
+ } else if (mode->crtc_hdisplay < 2560) {
+ tmp = 2;
+ buffer_alloc = 2;
+ } else if (mode->crtc_hdisplay < 4096) {
+ tmp = 0;
+ buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+ } else {
+ DRM_DEBUG_KMS("Mode too big for LB!\n");
+ tmp = 0;
+ buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+ }
+ } else {
+ tmp = 1;
+ buffer_alloc = 0;
+ }
+
+ WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
+ (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
+ (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
+
+ WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
+ break;
+ udelay(1);
+ }
+
+ if (amdgpu_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+ default:
+ return 4096 * 2;
+ case 1:
+ return 1920 * 2;
+ case 2:
+ return 2560 * 2;
+ }
+ }
+
+ /* controller not enabled, so no lb used */
+ return 0;
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(mmMC_SHARED_CHMAP);
+
+ switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
+ case 0:
+ default:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ case 4:
+ return 3;
+ case 5:
+ return 6;
+ case 6:
+ return 10;
+ case 7:
+ return 12;
+ case 8:
+ return 16;
+ }
+}
+
+struct dce8_wm_params {
+ u32 dram_channels; /* number of dram channels */
+ u32 yclk; /* bandwidth per dram data pin in kHz */
+ u32 sclk; /* engine clock in kHz */
+ u32 disp_clk; /* display clock in kHz */
+ u32 src_width; /* viewport width */
+ u32 active_time; /* active display time in ns */
+ u32 blank_time; /* blank time in ns */
+ bool interlaced; /* mode is interlaced */
+ fixed20_12 vsc; /* vertical scale ratio */
+ u32 num_heads; /* number of active crtcs */
+ u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+ u32 lb_size; /* line buffer allocated to pipe */
+ u32 vtaps; /* vertical scaler taps */
+};
+
+/**
+ * dce_v8_0_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
+{
+ /* Calculate raw DRAM Bandwidth */
+ fixed20_12 dram_efficiency; /* 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ dram_efficiency.full = dfixed_const(7);
+ dram_efficiency.full = dfixed_div(dram_efficiency, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+ disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v8_0_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
+{
+ /* Calculate the display Data return Bandwidth */
+ fixed20_12 return_efficiency; /* 0.8 */
+ fixed20_12 sclk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ sclk.full = dfixed_const(wm->sclk);
+ sclk.full = dfixed_div(sclk, a);
+ a.full = dfixed_const(10);
+ return_efficiency.full = dfixed_const(8);
+ return_efficiency.full = dfixed_div(return_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, sclk);
+ bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
+{
+ /* Calculate the DMIF Request Bandwidth */
+ fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+ fixed20_12 disp_clk, bandwidth;
+ fixed20_12 a, b;
+
+ a.full = dfixed_const(1000);
+ disp_clk.full = dfixed_const(wm->disp_clk);
+ disp_clk.full = dfixed_div(disp_clk, a);
+ a.full = dfixed_const(32);
+ b.full = dfixed_mul(a, disp_clk);
+
+ a.full = dfixed_const(10);
+ disp_clk_request_efficiency.full = dfixed_const(8);
+ disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+ bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v8_0_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
+{
+ /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+ u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
+ u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
+ u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
+
+ return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce_v8_0_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
+{
+ /* Calculate the display mode Average Bandwidth
+ * DisplayMode should contain the source and destination dimensions,
+ * timing, etc.
+ */
+ fixed20_12 bpp;
+ fixed20_12 line_time;
+ fixed20_12 src_width;
+ fixed20_12 bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+ line_time.full = dfixed_div(line_time, a);
+ bpp.full = dfixed_const(wm->bytes_per_pixel);
+ src_width.full = dfixed_const(wm->src_width);
+ bandwidth.full = dfixed_mul(src_width, bpp);
+ bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+ bandwidth.full = dfixed_div(bandwidth, line_time);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v8_0_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
+{
+ /* First calculate the latency in ns */
+ u32 mc_latency = 2000; /* 2000 ns. */
+ u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
+ u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+ u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+ u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+ u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+ (wm->num_heads * cursor_line_pair_return_time);
+ u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+ u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+ u32 tmp, dmif_size = 12288;
+ fixed20_12 a, b, c;
+
+ if (wm->num_heads == 0)
+ return 0;
+
+ a.full = dfixed_const(2);
+ b.full = dfixed_const(1);
+ if ((wm->vsc.full > a.full) ||
+ ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+ (wm->vtaps >= 5) ||
+ ((wm->vsc.full >= a.full) && wm->interlaced))
+ max_src_lines_per_dst_line = 4;
+ else
+ max_src_lines_per_dst_line = 2;
+
+ a.full = dfixed_const(available_bandwidth);
+ b.full = dfixed_const(wm->num_heads);
+ a.full = dfixed_div(a, b);
+
+ b.full = dfixed_const(mc_latency + 512);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(b, c);
+
+ c.full = dfixed_const(dmif_size);
+ b.full = dfixed_div(c, b);
+
+ tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(c, b);
+ c.full = dfixed_const(wm->bytes_per_pixel);
+ b.full = dfixed_mul(b, c);
+
+ lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+ a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(lb_fill_bw);
+ b.full = dfixed_div(c, b);
+ a.full = dfixed_div(a, b);
+ line_fill_time = dfixed_trunc(a);
+
+ if (line_fill_time < wm->active_time)
+ return latency;
+ else
+ return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
+{
+ if (dce_v8_0_average_bandwidth(wm) <=
+ (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
+{
+ if (dce_v8_0_average_bandwidth(wm) <=
+ (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v8_0_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
+{
+ u32 lb_partitions = wm->lb_size / wm->src_width;
+ u32 line_time = wm->active_time + wm->blank_time;
+ u32 latency_tolerant_lines;
+ u32 latency_hiding;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1);
+ if (wm->vsc.full > a.full)
+ latency_tolerant_lines = 1;
+ else {
+ if (lb_partitions <= (wm->vtaps + 1))
+ latency_tolerant_lines = 1;
+ else
+ latency_tolerant_lines = 2;
+ }
+
+ latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+ if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v8_0_program_watermarks - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
+ struct amdgpu_crtc *amdgpu_crtc,
+ u32 lb_size, u32 num_heads)
+{
+ struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
+ struct dce8_wm_params wm_low, wm_high;
+ u32 pixel_period;
+ u32 line_time = 0;
+ u32 latency_watermark_a = 0, latency_watermark_b = 0;
+ u32 tmp, wm_mask;
+
+ if (amdgpu_crtc->base.enabled && num_heads && mode) {
+ pixel_period = 1000000 / (u32)mode->clock;
+ line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+
+ /* watermark for high clocks */
+ if (adev->pm.dpm_enabled) {
+ wm_high.yclk =
+ amdgpu_dpm_get_mclk(adev, false) * 10;
+ wm_high.sclk =
+ amdgpu_dpm_get_sclk(adev, false) * 10;
+ } else {
+ wm_high.yclk = adev->pm.current_mclk * 10;
+ wm_high.sclk = adev->pm.current_sclk * 10;
+ }
+
+ wm_high.disp_clk = mode->clock;
+ wm_high.src_width = mode->crtc_hdisplay;
+ wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.blank_time = line_time - wm_high.active_time;
+ wm_high.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm_high.interlaced = true;
+ wm_high.vsc = amdgpu_crtc->vsc;
+ wm_high.vtaps = 1;
+ if (amdgpu_crtc->rmx_type != RMX_OFF)
+ wm_high.vtaps = 2;
+ wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_high.lb_size = lb_size;
+ wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
+ wm_high.num_heads = num_heads;
+
+ /* set for high clocks */
+ latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+ !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+ !dce_v8_0_check_latency_hiding(&wm_high) ||
+ (adev->mode_info.disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ }
+
+ /* watermark for low clocks */
+ if (adev->pm.dpm_enabled) {
+ wm_low.yclk =
+ amdgpu_dpm_get_mclk(adev, true) * 10;
+ wm_low.sclk =
+ amdgpu_dpm_get_sclk(adev, true) * 10;
+ } else {
+ wm_low.yclk = adev->pm.current_mclk * 10;
+ wm_low.sclk = adev->pm.current_sclk * 10;
+ }
+
+ wm_low.disp_clk = mode->clock;
+ wm_low.src_width = mode->crtc_hdisplay;
+ wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.blank_time = line_time - wm_low.active_time;
+ wm_low.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm_low.interlaced = true;
+ wm_low.vsc = amdgpu_crtc->vsc;
+ wm_low.vtaps = 1;
+ if (amdgpu_crtc->rmx_type != RMX_OFF)
+ wm_low.vtaps = 2;
+ wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_low.lb_size = lb_size;
+ wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
+ wm_low.num_heads = num_heads;
+
+ /* set for low clocks */
+ latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+ !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+ !dce_v8_0_check_latency_hiding(&wm_low) ||
+ (adev->mode_info.disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ }
+ }
+
+ /* select wm A */
+ wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = wm_mask;
+ tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
+ WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
+ (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
+ /* select wm B */
+ tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
+ WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
+ (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
+ /* restore original selection */
+ WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
+
+ /* save values for DPM */
+ amdgpu_crtc->line_time = line_time;
+ amdgpu_crtc->wm_high = latency_watermark_a;
+ amdgpu_crtc->wm_low = latency_watermark_b;
+}
+
+/**
+ * dce_v8_0_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
+{
+ struct drm_display_mode *mode = NULL;
+ u32 num_heads = 0, lb_size;
+ int i;
+
+ amdgpu_update_display_priority(adev);
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->mode_info.crtcs[i]->base.enabled)
+ num_heads++;
+ }
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ mode = &adev->mode_info.crtcs[i]->base.mode;
+ lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
+ dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
+ lb_size, num_heads);
+ }
+}
+
+static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
+{
+ int i;
+ u32 offset, tmp;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ offset = adev->mode_info.audio.pin[i].offset;
+ tmp = RREG32_AUDIO_ENDPT(offset,
+ ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+ if (((tmp &
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
+ adev->mode_info.audio.pin[i].connected = false;
+ else
+ adev->mode_info.audio.pin[i].connected = true;
+ }
+}
+
+static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
+{
+ int i;
+
+ dce_v8_0_audio_get_connected_pins(adev);
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ if (adev->mode_info.audio.pin[i].connected)
+ return &adev->mode_info.audio.pin[i];
+ }
+ DRM_ERROR("No connected audio pins found!\n");
+ return NULL;
+}
+
+static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+{
+ struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
+
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->offset;
+
+ WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
+ (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
+}
+
+static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct amdgpu_connector *amdgpu_connector = NULL;
+ u32 tmp = 0, offset;
+
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ amdgpu_connector = to_amdgpu_connector(connector);
+ break;
+ }
+ }
+
+ if (!amdgpu_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (connector->latency_present[1])
+ tmp =
+ (connector->video_latency[1] <<
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
+ (connector->audio_latency[1] <<
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
+ else
+ tmp =
+ (0 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
+ (0 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
+ } else {
+ if (connector->latency_present[0])
+ tmp =
+ (connector->video_latency[0] <<
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
+ (connector->audio_latency[0] <<
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
+ else
+ tmp =
+ (0 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
+ (0 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
+
+ }
+ WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
+static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct amdgpu_connector *amdgpu_connector = NULL;
+ u32 offset, tmp;
+ u8 *sadb = NULL;
+ int sad_count;
+
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ amdgpu_connector = to_amdgpu_connector(connector);
+ break;
+ }
+ }
+
+ if (!amdgpu_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ sad_count = 0;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
+ if (sad_count)
+ tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
+ else
+ tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
+ WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
+static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
+ struct drm_connector *connector;
+ struct amdgpu_connector *amdgpu_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ amdgpu_connector = to_amdgpu_connector(connector);
+ break;
+ }
+ }
+
+ if (!amdgpu_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
+ if (sad_count <= 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ if (sad->channels > max_channels) {
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
+ max_channels = sad->channels;
+ }
+
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ stereo_freqs |= sad->freq;
+ else
+ break;
+ }
+ }
+
+ value |= (stereo_freqs <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
+
+ WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
+static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
+ struct amdgpu_audio_pin *pin,
+ bool enable)
+{
+ if (!pin)
+ return;
+
+ WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
+}
+
+static const u32 pin_offsets[7] =
+{
+ (0x1780 - 0x1780),
+ (0x1786 - 0x1780),
+ (0x178c - 0x1780),
+ (0x1792 - 0x1780),
+ (0x1798 - 0x1780),
+ (0x179d - 0x1780),
+ (0x17a4 - 0x1780),
+};
+
+static int dce_v8_0_audio_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (!amdgpu_audio)
+ return 0;
+
+ adev->mode_info.audio.enabled = true;
+
+ if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
+ adev->mode_info.audio.num_pins = 7;
+ else if ((adev->asic_type == CHIP_KABINI) ||
+ (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
+ adev->mode_info.audio.num_pins = 3;
+ else if ((adev->asic_type == CHIP_BONAIRE) ||
+ (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
+ adev->mode_info.audio.num_pins = 7;
+ else
+ adev->mode_info.audio.num_pins = 3;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ adev->mode_info.audio.pin[i].channels = -1;
+ adev->mode_info.audio.pin[i].rate = -1;
+ adev->mode_info.audio.pin[i].bits_per_sample = -1;
+ adev->mode_info.audio.pin[i].status_bits = 0;
+ adev->mode_info.audio.pin[i].category_code = 0;
+ adev->mode_info.audio.pin[i].connected = false;
+ adev->mode_info.audio.pin[i].offset = pin_offsets[i];
+ adev->mode_info.audio.pin[i].id = i;
+ /* disable audio. it will be set up later */
+ /* XXX remove once we switch to ip funcs */
+ dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+ }
+
+ return 0;
+}
+
+static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (!adev->mode_info.audio.enabled)
+ return;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++)
+ dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+
+ adev->mode_info.audio.enabled = false;
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
+ WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
+
+ WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
+ WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
+ WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
+ uint8_t *header = buffer;
+
+ WREG32(mmAFMT_AVI_INFO0 + offset,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(mmAFMT_AVI_INFO1 + offset,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(mmAFMT_AVI_INFO2 + offset,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(mmAFMT_AVI_INFO3 + offset,
+ frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
+}
+
+static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+ u32 dto_phase = 24 * 1000;
+ u32 dto_modulo = clock;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* XXX two dtos; generally use dto0 for hdmi */
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
+ WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
+ uint32_t offset, val;
+ ssize_t err;
+ int bpc = 8;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ /* hdmi deep color mode general control packets setup, if bpc > 8 */
+ if (encoder->crtc) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+ bpc = amdgpu_crtc->bpc;
+ }
+
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
+ dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
+
+ dce_v8_0_audio_set_dto(encoder, mode->clock);
+
+ WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
+
+ WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ val = RREG32(mmHDMI_CONTROL + offset);
+ val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
+ val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
+
+ switch (bpc) {
+ case 0:
+ case 6:
+ case 8:
+ case 16:
+ default:
+ DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
+ connector->name, bpc);
+ break;
+ case 10:
+ val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
+ val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
+ DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
+ connector->name);
+ break;
+ case 12:
+ val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
+ val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
+ DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
+ connector->name);
+ break;
+ }
+
+ WREG32(mmHDMI_CONTROL + offset, val);
+
+ WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
+ HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
+ HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
+
+ WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
+
+ WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
+ AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
+
+ WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
+ (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
+
+ WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+
+ WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
+ (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
+ (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+ WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
+
+ /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
+
+ if (bpc > 8)
+ WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
+ else
+ WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
+ HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
+
+ dce_v8_0_afmt_update_ACR(encoder, mode->clock);
+
+ WREG32(mmAFMT_60958_0 + offset,
+ (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
+
+ WREG32(mmAFMT_60958_1 + offset,
+ (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
+
+ WREG32(mmAFMT_60958_2 + offset,
+ (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
+ (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
+ (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
+ (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
+ (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
+ (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
+
+ dce_v8_0_audio_write_speaker_allocation(encoder);
+
+
+ WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
+ (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
+
+ dce_v8_0_afmt_audio_select_pin(encoder);
+ dce_v8_0_audio_write_sad_regs(encoder);
+ dce_v8_0_audio_write_latency_fields(encoder, mode);
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+
+ WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
+ HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
+
+ WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
+ (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
+ ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
+
+ WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
+
+ /* enable audio after to setting up hw */
+ dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
+}
+
+static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (enable && dig->afmt->enabled)
+ return;
+ if (!enable && !dig->afmt->enabled)
+ return;
+
+ if (!enable && dig->afmt->pin) {
+ dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
+ dig->afmt->pin = NULL;
+ }
+
+ dig->afmt->enabled = enable;
+
+ DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
+ enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
+}
+
+static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->mode_info.num_dig; i++)
+ adev->mode_info.afmt[i] = NULL;
+
+ /* DCE8 has audio blocks tied to DIG encoders */
+ for (i = 0; i < adev->mode_info.num_dig; i++) {
+ adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
+ if (adev->mode_info.afmt[i]) {
+ adev->mode_info.afmt[i]->offset = dig_offsets[i];
+ adev->mode_info.afmt[i]->id = i;
+ }
+ }
+}
+
+static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->mode_info.num_dig; i++) {
+ kfree(adev->mode_info.afmt[i]);
+ adev->mode_info.afmt[i] = NULL;
+ }
+}
+
+static const u32 vga_control_regs[6] =
+{
+ mmD1VGA_CONTROL,
+ mmD2VGA_CONTROL,
+ mmD3VGA_CONTROL,
+ mmD4VGA_CONTROL,
+ mmD5VGA_CONTROL,
+ mmD6VGA_CONTROL,
+};
+
+static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ u32 vga_control;
+
+ vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
+ if (enable)
+ WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
+ else
+ WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
+}
+
+static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ if (enable)
+ WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
+ else
+ WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v8_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw,
+ unsigned *bankh, unsigned *mtaspect,
+ unsigned *tile_split)
+{
+ *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK;
+ *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK;
+ *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK;
+ *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK;
+ switch (*bankw) {
+ default:
+ case 1:
+ *bankw = ADDR_SURF_BANK_WIDTH_1;
+ break;
+ case 2:
+ *bankw = ADDR_SURF_BANK_WIDTH_2;
+ break;
+ case 4:
+ *bankw = ADDR_SURF_BANK_WIDTH_4;
+ break;
+ case 8:
+ *bankw = ADDR_SURF_BANK_WIDTH_8;
+ break;
+ }
+ switch (*bankh) {
+ default:
+ case 1:
+ *bankh = ADDR_SURF_BANK_HEIGHT_1;
+ break;
+ case 2:
+ *bankh = ADDR_SURF_BANK_HEIGHT_2;
+ break;
+ case 4:
+ *bankh = ADDR_SURF_BANK_HEIGHT_4;
+ break;
+ case 8:
+ *bankh = ADDR_SURF_BANK_HEIGHT_8;
+ break;
+ }
+ switch (*mtaspect) {
+ default:
+ case 1:
+ *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_1;
+ break;
+ case 2:
+ *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_2;
+ break;
+ case 4:
+ *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_4;
+ break;
+ case 8:
+ *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_8;
+ break;
+ }
+}
+
+static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_framebuffer *target_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *rbo;
+ uint64_t fb_location, tiling_flags;
+ uint32_t fb_format, fb_pitch_pixels;
+ unsigned bankw, bankh, mtaspect, tile_split;
+ u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+ u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f;
+ u32 tmp, viewport_w, viewport_h;
+ int r;
+ bool bypass_lut = false;
+
+ /* no fb bound */
+ if (!atomic && !crtc->primary->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ if (atomic) {
+ amdgpu_fb = to_amdgpu_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ target_fb = crtc->primary->fb;
+ }
+
+ /* If atomic, assume fb object is pinned & idle & fenced and
+ * just update base pointers
+ */
+ obj = amdgpu_fb->obj;
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ if (atomic)
+ fb_location = amdgpu_bo_gpu_offset(rbo);
+ else {
+ r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(rbo);
+ return -EINVAL;
+ }
+ }
+
+ amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+ amdgpu_bo_unreserve(rbo);
+
+ switch (target_fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ break;
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+#ifdef __BIG_ENDIAN
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+#endif
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+#ifdef __BIG_ENDIAN
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+#endif
+ break;
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_BGRA5551:
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+#ifdef __BIG_ENDIAN
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+#endif
+ break;
+ case DRM_FORMAT_RGB565:
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+#ifdef __BIG_ENDIAN
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+#endif
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+#ifdef __BIG_ENDIAN
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+#endif
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+#ifdef __BIG_ENDIAN
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_BGRA1010102:
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+#ifdef __BIG_ENDIAN
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format));
+ return -EINVAL;
+ }
+
+ if (tiling_flags & AMDGPU_TILING_MACRO) {
+ unsigned tileb, index, num_banks, tile_split_bytes;
+
+ dce_v8_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
+ /* Set NUM_BANKS. */
+ /* Calculate the macrotile mode index. */
+ tile_split_bytes = 64 << tile_split;
+ tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+ tileb = min(tile_split_bytes, tileb);
+
+ for (index = 0; tileb > 64; index++) {
+ tileb >>= 1;
+ }
+
+ if (index >= 16) {
+ DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+ target_fb->bits_per_pixel, tile_split);
+ return -EINVAL;
+ }
+
+ num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3;
+ fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
+ fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
+ fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
+ fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
+ fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
+ fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
+ fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
+ } else if (tiling_flags & AMDGPU_TILING_MICRO) {
+ fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
+ }
+
+ /* Read the pipe config from the 2D TILED SCANOUT mode.
+ * It should be the same for the other modes too, but not all
+ * modes set the pipe config field. */
+ fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
+
+ dce_v8_0_vga_enable(crtc, false);
+
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+ WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
+
+ /*
+ * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+ * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+ * retain the full precision throughout the pipeline.
+ */
+ WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
+ (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
+ ~LUT_10BIT_BYPASS_EN);
+
+ if (bypass_lut)
+ DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+ WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+ WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+
+ dce_v8_0_grph_enable(crtc, true);
+
+ WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+ target_fb->height);
+
+ x &= ~3;
+ y &= ~1;
+ WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
+ (x << 16) | y);
+ viewport_w = crtc->mode.hdisplay;
+ viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+ WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+ (viewport_w << 16) | viewport_h);
+
+ /* pageflip setup */
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK;
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen only at start of vblank interval (front porch) */
+ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+
+ if (!atomic && fb && fb != crtc->primary->fb) {
+ amdgpu_fb = to_amdgpu_framebuffer(fb);
+ rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ }
+
+ /* Bytes per pixel may have changed */
+ dce_v8_0_bandwidth_update(adev);
+
+ return 0;
+}
+
+static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
+ LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
+ else
+ WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
+
+ WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+ (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+ WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+ PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
+ WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+ PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
+ WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+ (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
+
+ WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+ WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+
+ WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+
+ WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+
+ WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->lut_r[i] << 20) |
+ (amdgpu_crtc->lut_g[i] << 10) |
+ (amdgpu_crtc->lut_b[i] << 0));
+ }
+
+ WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+ WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+ ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+ (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+ WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+ (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+ WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+ (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
+ /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+ WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
+ /* XXX this only needs to be programmed once per crtc at startup,
+ * not sure where the best place for it is
+ */
+ WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
+ ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
+}
+
+static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+ switch (amdgpu_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ return 6;
+ break;
+ default:
+ DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+ return 0;
+ }
+}
+
+/**
+ * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders. For non-DP
+ * monitors a dedicated PPLL must be used. If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself. If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 8.x
+ * KB/KV
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
+ * CI
+ * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
+static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ u32 pll_in_use;
+ int pll;
+
+ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
+ if (adev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ else {
+ /* use the same PPLL for all DP monitors */
+ pll = amdgpu_pll_get_shared_dp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+ /* otherwise, pick one of the plls */
+ if ((adev->asic_type == CHIP_KABINI) ||
+ (adev->asic_type == CHIP_MULLINS)) {
+ /* KB/ML has PPLL1 and PPLL2 */
+ pll_in_use = amdgpu_pll_get_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
+ } else {
+ /* CI/KV has PPLL0, PPLL1, and PPLL2 */
+ pll_in_use = amdgpu_pll_get_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL0)))
+ return ATOM_PPLL0;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
+ }
+ return ATOM_PPLL_INVALID;
+}
+
+static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ uint32_t cur_lock;
+
+ cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
+ else
+ cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
+ WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+}
+
+static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+
+ WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+}
+
+static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+
+ WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+}
+
+static void dce_v8_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+ uint64_t gpu_addr)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+
+ WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(gpu_addr));
+ WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
+}
+
+static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ int xorigin = 0, yorigin = 0;
+
+ /* avivo cursor are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ if (x < 0) {
+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+ y = 0;
+ }
+
+ dce_v8_0_lock_cursor(crtc, true);
+ WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+ WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+ dce_v8_0_lock_cursor(crtc, false);
+
+ return 0;
+}
+
+static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *robj;
+ uint64_t gpu_addr;
+ int ret;
+
+ if (!handle) {
+ /* turn off cursor */
+ dce_v8_0_hide_cursor(crtc);
+ obj = NULL;
+ goto unpin;
+ }
+
+ if ((width > amdgpu_crtc->max_cursor_width) ||
+ (height > amdgpu_crtc->max_cursor_height)) {
+ DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+ return -ENOENT;
+ }
+
+ robj = gem_to_amdgpu_bo(obj);
+ ret = amdgpu_bo_reserve(robj, false);
+ if (unlikely(ret != 0))
+ goto fail;
+ ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
+ 0, &gpu_addr);
+ amdgpu_bo_unreserve(robj);
+ if (ret)
+ goto fail;
+
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+
+ dce_v8_0_lock_cursor(crtc, true);
+ dce_v8_0_set_cursor(crtc, obj, gpu_addr);
+ dce_v8_0_show_cursor(crtc);
+ dce_v8_0_lock_cursor(crtc, false);
+
+unpin:
+ if (amdgpu_crtc->cursor_bo) {
+ robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ ret = amdgpu_bo_reserve(robj, false);
+ if (likely(ret == 0)) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
+ drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ }
+
+ amdgpu_crtc->cursor_bo = obj;
+ return 0;
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int end = (start + size > 256) ? 256 : start + size, i;
+
+ /* userspace palettes are always correct as is */
+ for (i = start; i < end; i++) {
+ amdgpu_crtc->lut_r[i] = red[i] >> 6;
+ amdgpu_crtc->lut_g[i] = green[i] >> 6;
+ amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+ }
+ dce_v8_0_crtc_load_lut(crtc);
+}
+
+static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ destroy_workqueue(amdgpu_crtc->pflip_queue);
+ kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
+ .cursor_set = dce_v8_0_crtc_cursor_set,
+ .cursor_move = dce_v8_0_crtc_cursor_move,
+ .gamma_set = dce_v8_0_crtc_gamma_set,
+ .set_config = amdgpu_crtc_set_config,
+ .destroy = dce_v8_0_crtc_destroy,
+ .page_flip = amdgpu_crtc_page_flip,
+};
+
+static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ amdgpu_crtc->enabled = true;
+ amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
+ dce_v8_0_vga_enable(crtc, true);
+ amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
+ dce_v8_0_vga_enable(crtc, false);
+ drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ dce_v8_0_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ if (amdgpu_crtc->enabled) {
+ dce_v8_0_vga_enable(crtc, true);
+ amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
+ dce_v8_0_vga_enable(crtc, false);
+ }
+ amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
+ amdgpu_crtc->enabled = false;
+ break;
+ }
+ /* adjust pm to dpms */
+ amdgpu_pm_compute_clocks(adev);
+}
+
+static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
+{
+ /* disable crtc pair power gating before programming */
+ amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
+ amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
+ dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
+{
+ dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
+}
+
+static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_atom_ss ss;
+ int i;
+
+ dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->primary->fb) {
+ int r;
+ struct amdgpu_framebuffer *amdgpu_fb;
+ struct amdgpu_bo *rbo;
+
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ else {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ }
+ }
+ /* disable the GRPH */
+ dce_v8_0_grph_enable(crtc, false);
+
+ amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->mode_info.crtcs[i] &&
+ adev->mode_info.crtcs[i]->enabled &&
+ i != amdgpu_crtc->crtc_id &&
+ amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
+ /* one other crtc is using this pll don't turn
+ * off the pll
+ */
+ goto done;
+ }
+ }
+
+ switch (amdgpu_crtc->pll_id) {
+ case ATOM_PPLL1:
+ case ATOM_PPLL2:
+ /* disable the ppll */
+ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ break;
+ case ATOM_PPLL0:
+ /* disable the ppll */
+ if ((adev->asic_type == CHIP_KAVERI) ||
+ (adev->asic_type == CHIP_BONAIRE) ||
+ (adev->asic_type == CHIP_HAWAII))
+ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ break;
+ default:
+ break;
+ }
+done:
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->adjusted_clock = 0;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+}
+
+static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (!amdgpu_crtc->adjusted_clock)
+ return -EINVAL;
+
+ amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+ amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
+ dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
+ amdgpu_atombios_crtc_scaler_setup(crtc);
+ /* update the hw version fpr dpm */
+ amdgpu_crtc->hw_mode = *adjusted_mode;
+
+ return 0;
+}
+
+static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ amdgpu_crtc->encoder = encoder;
+ amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+ break;
+ }
+ }
+ if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ return false;
+ }
+ if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ return false;
+ if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
+ return false;
+ /* pick pll */
+ amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
+ /* if we can't get a PPLL for a non-DP encoder, fail */
+ if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
+ !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+ return false;
+
+ return true;
+}
+
+static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
+ .dpms = dce_v8_0_crtc_dpms,
+ .mode_fixup = dce_v8_0_crtc_mode_fixup,
+ .mode_set = dce_v8_0_crtc_mode_set,
+ .mode_set_base = dce_v8_0_crtc_set_base,
+ .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
+ .prepare = dce_v8_0_crtc_prepare,
+ .commit = dce_v8_0_crtc_commit,
+ .load_lut = dce_v8_0_crtc_load_lut,
+ .disable = dce_v8_0_crtc_disable,
+};
+
+static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ int i;
+
+ amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+ (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (amdgpu_crtc == NULL)
+ return -ENOMEM;
+
+ drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+ amdgpu_crtc->crtc_id = index;
+ amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
+ adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+ amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
+ amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
+ adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+
+ for (i = 0; i < 256; i++) {
+ amdgpu_crtc->lut_r[i] = i << 2;
+ amdgpu_crtc->lut_g[i] = i << 2;
+ amdgpu_crtc->lut_b[i] = i << 2;
+ }
+
+ amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->adjusted_clock = 0;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int dce_v8_0_early_init(struct amdgpu_device *adev)
+{
+ adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
+ adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
+
+ dce_v8_0_set_display_funcs(adev);
+ dce_v8_0_set_irq_funcs(adev);
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_KAVERI:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6; /* ? */
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dce_v8_0_sw_init(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = 8; i < 20; i += 2) {
+ r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+ r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+ if (r)
+ return r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ /* allocate crtcs */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = dce_v8_0_crtc_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ if (amdgpu_atombios_get_connector_info_from_object_table(adev))
+ amdgpu_print_display_setup(adev->ddev);
+ else
+ return -EINVAL;
+
+ /* setup afmt */
+ dce_v8_0_afmt_init(adev);
+
+ r = dce_v8_0_audio_init(adev);
+ if (r)
+ return r;
+
+ drm_kms_helper_poll_init(adev->ddev);
+
+ return r;
+}
+
+static int dce_v8_0_sw_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->mode_info.bios_hardcoded_edid);
+
+ drm_kms_helper_poll_fini(adev->ddev);
+
+ dce_v8_0_audio_fini(adev);
+
+ dce_v8_0_afmt_fini(adev);
+
+ drm_mode_config_cleanup(adev->ddev);
+ adev->mode_info.mode_config_initialized = false;
+
+ return 0;
+}
+
+static int dce_v8_0_hw_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ /* init dig PHYs, disp eng pll */
+ amdgpu_atombios_encoder_init_dig(adev);
+ amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+
+ /* initialize hpd */
+ dce_v8_0_hpd_init(adev);
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+ }
+
+ return 0;
+}
+
+static int dce_v8_0_hw_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ dce_v8_0_hpd_fini(adev);
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+ }
+
+ return 0;
+}
+
+static int dce_v8_0_suspend(struct amdgpu_device *adev)
+{
+ struct drm_connector *connector;
+
+ drm_kms_helper_poll_disable(adev->ddev);
+
+ /* turn off display hw */
+ list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+
+ amdgpu_atombios_scratch_regs_save(adev);
+
+ dce_v8_0_hpd_fini(adev);
+
+ return 0;
+}
+
+static int dce_v8_0_resume(struct amdgpu_device *adev)
+{
+ struct drm_connector *connector;
+
+ amdgpu_atombios_scratch_regs_restore(adev);
+
+ /* init dig PHYs, disp eng pll */
+ amdgpu_atombios_encoder_init_dig(adev);
+ amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+ /* turn on the BL */
+ if (adev->mode_info.bl_encoder) {
+ u8 bl_level = amdgpu_display_backlight_get_level(adev,
+ adev->mode_info.bl_encoder);
+ amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
+ bl_level);
+ }
+
+ /* initialize hpd */
+ dce_v8_0_hpd_init(adev);
+
+ /* blat the mode back in */
+ drm_helper_resume_force_mode(adev->ddev);
+ /* turn on display hw */
+ list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+
+ drm_kms_helper_poll_enable(adev->ddev);
+
+ return 0;
+}
+
+static bool dce_v8_0_is_idle(struct amdgpu_device *adev)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dce_v8_0_wait_for_idle(struct amdgpu_device *adev)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static void dce_v8_0_print_status(struct amdgpu_device *adev)
+{
+ dev_info(adev->dev, "DCE 8.x registers\n");
+ /* XXX todo */
+}
+
+static int dce_v8_0_soft_reset(struct amdgpu_device *adev)
+{
+ u32 srbm_soft_reset = 0, tmp;
+
+ if (dce_v8_0_is_display_hung(adev))
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+ if (srbm_soft_reset) {
+ dce_v8_0_print_status(adev);
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dce_v8_0_print_status(adev);
+ }
+ return 0;
+}
+
+static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ u32 reg_block, lb_interrupt_mask;
+
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ switch (crtc) {
+ case 0:
+ reg_block = CRTC0_REGISTER_OFFSET;
+ break;
+ case 1:
+ reg_block = CRTC1_REGISTER_OFFSET;
+ break;
+ case 2:
+ reg_block = CRTC2_REGISTER_OFFSET;
+ break;
+ case 3:
+ reg_block = CRTC3_REGISTER_OFFSET;
+ break;
+ case 4:
+ reg_block = CRTC4_REGISTER_OFFSET;
+ break;
+ case 5:
+ reg_block = CRTC5_REGISTER_OFFSET;
+ break;
+ default:
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
+ lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
+ WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
+ lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
+ WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ u32 reg_block, lb_interrupt_mask;
+
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ switch (crtc) {
+ case 0:
+ reg_block = CRTC0_REGISTER_OFFSET;
+ break;
+ case 1:
+ reg_block = CRTC1_REGISTER_OFFSET;
+ break;
+ case 2:
+ reg_block = CRTC2_REGISTER_OFFSET;
+ break;
+ case 3:
+ reg_block = CRTC3_REGISTER_OFFSET;
+ break;
+ case 4:
+ reg_block = CRTC4_REGISTER_OFFSET;
+ break;
+ case 5:
+ reg_block = CRTC5_REGISTER_OFFSET;
+ break;
+ default:
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
+ lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
+ WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
+ lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
+ WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
+ break;
+ default:
+ break;
+ }
+}
+
+static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+ switch (type) {
+ case AMDGPU_HPD_1:
+ dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_2:
+ dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_3:
+ dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_4:
+ dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_5:
+ dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_6:
+ dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
+ break;
+ default:
+ DRM_DEBUG("invalid hdp %d\n", type);
+ return 0;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (type) {
+ case AMDGPU_CRTC_IRQ_VBLANK1:
+ dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK2:
+ dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK3:
+ dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK4:
+ dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK5:
+ dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK6:
+ dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE1:
+ dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE2:
+ dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE3:
+ dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE4:
+ dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE5:
+ dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE6:
+ dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned crtc = entry->src_id - 1;
+ uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
+ unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+ switch (entry->src_data) {
+ case 0: /* vblank */
+ if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
+ }
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ }
+ break;
+ case 1: /* vline */
+ if (disp_int & interrupt_status_offsets[crtc].vline) {
+ WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+ break;
+ }
+
+ return 0;
+}
+
+static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 reg, reg_block;
+ /* now deal with page flip IRQ */
+ switch (type) {
+ case AMDGPU_PAGEFLIP_IRQ_D1:
+ reg_block = CRTC0_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D2:
+ reg_block = CRTC1_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D3:
+ reg_block = CRTC2_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D4:
+ reg_block = CRTC3_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D5:
+ reg_block = CRTC4_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D6:
+ reg_block = CRTC5_REGISTER_OFFSET;
+ break;
+ default:
+ DRM_ERROR("invalid pageflip crtc %d\n", type);
+ return -EINVAL;
+ }
+
+ reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
+ if (state == AMDGPU_IRQ_STATE_DISABLE)
+ WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+ else
+ WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+
+ return 0;
+}
+
+static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int reg_block;
+ unsigned long flags;
+ unsigned crtc_id;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_flip_work *works;
+
+ crtc_id = (entry->src_id - 8) >> 1;
+ amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+ /* ack the interrupt */
+ switch(crtc_id){
+ case AMDGPU_PAGEFLIP_IRQ_D1:
+ reg_block = CRTC0_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D2:
+ reg_block = CRTC1_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D3:
+ reg_block = CRTC2_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D4:
+ reg_block = CRTC3_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D5:
+ reg_block = CRTC4_REGISTER_OFFSET;
+ break;
+ case AMDGPU_PAGEFLIP_IRQ_D6:
+ reg_block = CRTC5_REGISTER_OFFSET;
+ break;
+ default:
+ DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+ return -EINVAL;
+ }
+
+ if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+ WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+
+ /* IRQ could occur when in initial stage */
+ if (amdgpu_crtc == NULL)
+ return 0;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ works = amdgpu_crtc->pflip_works;
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+ "AMDGPU_FLIP_SUBMITTED(%d)\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return 0;
+ }
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ amdgpu_crtc->pflip_works = NULL;
+
+ /* wakeup usersapce */
+ if (works->event)
+ drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
+ queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+
+ return 0;
+}
+
+static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t disp_int, mask, int_control, tmp;
+ unsigned hpd;
+
+ if (entry->src_data > 6) {
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+ return 0;
+ }
+
+ hpd = entry->src_data;
+ disp_int = RREG32(interrupt_status_offsets[hpd].reg);
+ mask = interrupt_status_offsets[hpd].hpd;
+ int_control = hpd_int_control_offsets[hpd];
+
+ if (disp_int & mask) {
+ tmp = RREG32(int_control);
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+ WREG32(int_control, tmp);
+ schedule_work(&adev->hotplug_work);
+ DRM_DEBUG("IH: HPD%d\n", hpd + 1);
+ }
+
+ return 0;
+
+}
+
+static int dce_v8_0_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ return 0;
+}
+
+static int dce_v8_0_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ return 0;
+}
+
+const struct amdgpu_ip_funcs dce_v8_0_ip_funcs = {
+ .early_init = dce_v8_0_early_init,
+ .late_init = NULL,
+ .sw_init = dce_v8_0_sw_init,
+ .sw_fini = dce_v8_0_sw_fini,
+ .hw_init = dce_v8_0_hw_init,
+ .hw_fini = dce_v8_0_hw_fini,
+ .suspend = dce_v8_0_suspend,
+ .resume = dce_v8_0_resume,
+ .is_idle = dce_v8_0_is_idle,
+ .wait_for_idle = dce_v8_0_wait_for_idle,
+ .soft_reset = dce_v8_0_soft_reset,
+ .print_status = dce_v8_0_print_status,
+ .set_clockgating_state = dce_v8_0_set_clockgating_state,
+ .set_powergating_state = dce_v8_0_set_powergating_state,
+};
+
+static void
+dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->pixel_clock = adjusted_mode->clock;
+
+ /* need to call this here rather than in prepare() since we need some crtc info */
+ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ /* set scaler clears this on some chips */
+ dce_v8_0_set_interleave(encoder->crtc, mode);
+
+ if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ dce_v8_0_afmt_enable(encoder, true);
+ dce_v8_0_afmt_setmode(encoder, adjusted_mode);
+ }
+}
+
+static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+ if ((amdgpu_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+ ENCODER_OBJECT_ID_NONE)) {
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ if (dig) {
+ dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
+ if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
+ dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
+ }
+ }
+
+ amdgpu_atombios_scratch_regs_lock(adev, true);
+
+ if (connector) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+ /* select the clock/data port if it uses a router */
+ if (amdgpu_connector->router.cd_valid)
+ amdgpu_i2c_router_select_cd_port(amdgpu_connector);
+
+ /* turn eDP panel on for mode set */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_atombios_encoder_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ }
+
+ /* this is needed for the pll/ss setup to work correctly in some cases */
+ amdgpu_atombios_encoder_set_crtc_source(encoder);
+ /* set up the FMT blocks */
+ dce_v8_0_program_fmt(encoder);
+}
+
+static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ /* need to call this here as we need the crtc set up */
+ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ amdgpu_atombios_scratch_regs_lock(adev, false);
+}
+
+static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig;
+
+ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (amdgpu_atombios_encoder_is_digital(encoder)) {
+ if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ dce_v8_0_afmt_enable(encoder, false);
+ dig = amdgpu_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
+ amdgpu_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
+ .dpms = dce_v8_0_ext_dpms,
+ .mode_fixup = dce_v8_0_ext_mode_fixup,
+ .prepare = dce_v8_0_ext_prepare,
+ .mode_set = dce_v8_0_ext_mode_set,
+ .commit = dce_v8_0_ext_commit,
+ .disable = dce_v8_0_ext_disable,
+ /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
+ .dpms = amdgpu_atombios_encoder_dpms,
+ .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+ .prepare = dce_v8_0_encoder_prepare,
+ .mode_set = dce_v8_0_encoder_mode_set,
+ .commit = dce_v8_0_encoder_commit,
+ .disable = dce_v8_0_encoder_disable,
+ .detect = amdgpu_atombios_encoder_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
+ .dpms = amdgpu_atombios_encoder_dpms,
+ .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+ .prepare = dce_v8_0_encoder_prepare,
+ .mode_set = dce_v8_0_encoder_mode_set,
+ .commit = dce_v8_0_encoder_commit,
+ .detect = amdgpu_atombios_encoder_dac_detect,
+};
+
+static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
+ kfree(amdgpu_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
+ .destroy = dce_v8_0_encoder_destroy,
+};
+
+static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (amdgpu_encoder->encoder_enum == encoder_enum) {
+ amdgpu_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+ if (!amdgpu_encoder)
+ return;
+
+ encoder = &amdgpu_encoder->base;
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ encoder->possible_crtcs = 0x1;
+ break;
+ case 2:
+ default:
+ encoder->possible_crtcs = 0x3;
+ break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
+
+ amdgpu_encoder->enc_priv = NULL;
+
+ amdgpu_encoder->encoder_enum = encoder_enum;
+ amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ amdgpu_encoder->devices = supported_device;
+ amdgpu_encoder->rmx_type = RMX_OFF;
+ amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+ amdgpu_encoder->is_ext_encoder = false;
+ amdgpu_encoder->caps = caps;
+
+ switch (amdgpu_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ amdgpu_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
+ } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+ }
+ drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_SI170B:
+ case ENCODER_OBJECT_ID_CH7303:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+ case ENCODER_OBJECT_ID_TITFP513:
+ case ENCODER_OBJECT_ID_VT1623:
+ case ENCODER_OBJECT_ID_HDMI_SI1930:
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
+ /* these are handled by the primary encoders */
+ amdgpu_encoder->is_ext_encoder = true;
+ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+ drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ else
+ drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
+ break;
+ }
+}
+
+static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
+ .set_vga_render_state = &dce_v8_0_set_vga_render_state,
+ .bandwidth_update = &dce_v8_0_bandwidth_update,
+ .vblank_get_counter = &dce_v8_0_vblank_get_counter,
+ .vblank_wait = &dce_v8_0_vblank_wait,
+ .is_display_hung = &dce_v8_0_is_display_hung,
+ .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
+ .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
+ .hpd_sense = &dce_v8_0_hpd_sense,
+ .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
+ .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
+ .page_flip = &dce_v8_0_page_flip,
+ .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
+ .add_encoder = &dce_v8_0_encoder_add,
+ .add_connector = &amdgpu_connector_add,
+ .stop_mc_access = &dce_v8_0_stop_mc_access,
+ .resume_mc_access = &dce_v8_0_resume_mc_access,
+};
+
+static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dce_v8_0_display_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
+ .set = dce_v8_0_set_crtc_interrupt_state,
+ .process = dce_v8_0_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
+ .set = dce_v8_0_set_pageflip_interrupt_state,
+ .process = dce_v8_0_pageflip_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
+ .set = dce_v8_0_set_hpd_interrupt_state,
+ .process = dce_v8_0_hpd_irq,
+};
+
+static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
+
+ adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
new file mode 100644
index 000000000000..3a0a73b41041
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DCE_V8_0_H__
+#define __DCE_V8_0_H__
+
+extern const struct amdgpu_ip_funcs dce_v8_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
new file mode 100644
index 000000000000..675b096417f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -0,0 +1,5635 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_gfx.h"
+#include "cikd.h"
+#include "cik.h"
+#include "atom.h"
+#include "amdgpu_ucode.h"
+#include "clearstate_ci.h"
+
+#include "uvd/uvd_4_2_d.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gca/gfx_7_0_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+#include "gmc/gmc_7_0_d.h"
+#include "gmc/gmc_7_0_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+#define GFX7_NUM_GFX_RINGS 1
+#define GFX7_NUM_COMPUTE_RINGS 8
+
+static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
+int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *);
+
+MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
+MODULE_FIRMWARE("radeon/bonaire_me.bin");
+MODULE_FIRMWARE("radeon/bonaire_ce.bin");
+MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
+MODULE_FIRMWARE("radeon/bonaire_mec.bin");
+
+MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
+MODULE_FIRMWARE("radeon/hawaii_me.bin");
+MODULE_FIRMWARE("radeon/hawaii_ce.bin");
+MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
+MODULE_FIRMWARE("radeon/hawaii_mec.bin");
+
+MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
+MODULE_FIRMWARE("radeon/kaveri_me.bin");
+MODULE_FIRMWARE("radeon/kaveri_ce.bin");
+MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
+
+MODULE_FIRMWARE("radeon/kabini_pfp.bin");
+MODULE_FIRMWARE("radeon/kabini_me.bin");
+MODULE_FIRMWARE("radeon/kabini_ce.bin");
+MODULE_FIRMWARE("radeon/kabini_rlc.bin");
+MODULE_FIRMWARE("radeon/kabini_mec.bin");
+
+MODULE_FIRMWARE("radeon/mullins_pfp.bin");
+MODULE_FIRMWARE("radeon/mullins_me.bin");
+MODULE_FIRMWARE("radeon/mullins_ce.bin");
+MODULE_FIRMWARE("radeon/mullins_rlc.bin");
+MODULE_FIRMWARE("radeon/mullins_mec.bin");
+
+static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
+{
+ {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
+ {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
+ {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
+ {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
+ {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
+ {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
+ {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
+ {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
+ {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
+ {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
+ {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
+ {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
+ {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
+ {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
+ {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
+ {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
+};
+
+static const u32 spectre_rlc_save_restore_register_list[] =
+{
+ (0x0e00 << 16) | (0xc12c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc140 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc150 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc15c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc168 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc170 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc178 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc204 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8228 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x829c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x869c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc260 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c000 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c00c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8900 >> 2),
+ 0x00000000,
+ 0x3,
+ (0x0e00 << 16) | (0xc130 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc134 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc208 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc264 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc268 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc26c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc270 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc274 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc278 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc27c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc280 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc284 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc288 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc28c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc290 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc294 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc298 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc29c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x301d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30238 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30250 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30254 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30258 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3025c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc99c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bf0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30a04 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a10 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a14 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a2c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc704 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc708 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc768 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc770 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc774 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc778 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc77c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc780 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc784 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc788 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc78c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc798 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc79c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a0 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a4 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a8 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7ac >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7b0 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c010 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92cc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c38 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c3c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9604 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac58 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac68 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac6c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac70 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac74 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac78 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac80 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac84 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac88 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac8c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x970c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9714 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9718 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x971c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88bc >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8980 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30938 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3093c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30940 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30904 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c210 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c214 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c218 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8904 >> 2),
+ 0x00000000,
+ 0x5,
+ (0x0e00 << 16) | (0x8c28 >> 2),
+ (0x0e00 << 16) | (0x8c2c >> 2),
+ (0x0e00 << 16) | (0x8c30 >> 2),
+ (0x0e00 << 16) | (0x8c34 >> 2),
+ (0x0e00 << 16) | (0x9600 >> 2),
+};
+
+static const u32 kalindi_rlc_save_restore_register_list[] =
+{
+ (0x0e00 << 16) | (0xc12c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc140 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc150 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc15c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc168 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc170 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc204 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8228 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x829c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x869c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc260 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c000 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c00c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8900 >> 2),
+ 0x00000000,
+ 0x3,
+ (0x0e00 << 16) | (0xc130 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc134 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc208 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc264 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc268 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc26c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc270 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc274 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc28c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc290 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc294 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc298 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x301d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30238 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30250 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30254 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30258 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3025c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc99c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bf0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30a04 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a10 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a14 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a2c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc704 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc708 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc768 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc770 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc774 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc798 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc79c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c010 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c38 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c3c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9604 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac58 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac68 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac6c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac70 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac74 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac78 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac80 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac84 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac88 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac8c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x970c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9714 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9718 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x971c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88bc >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8980 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30938 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3093c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30940 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30904 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3e1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c210 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c214 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c218 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8904 >> 2),
+ 0x00000000,
+ 0x5,
+ (0x0e00 << 16) | (0x8c28 >> 2),
+ (0x0e00 << 16) | (0x8c2c >> 2),
+ (0x0e00 << 16) | (0x8c30 >> 2),
+ (0x0e00 << 16) | (0x8c34 >> 2),
+ (0x0e00 << 16) | (0x9600 >> 2),
+};
+
+static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
+static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
+
+/*
+ * Core functions
+ */
+/**
+ * gfx_v7_0_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ chip_name = "bonaire";
+ break;
+ case CHIP_HAWAII:
+ chip_name = "hawaii";
+ break;
+ case CHIP_KAVERI:
+ chip_name = "kaveri";
+ break;
+ case CHIP_KABINI:
+ chip_name = "kabini";
+ break;
+ case CHIP_MULLINS:
+ chip_name = "mullins";
+ break;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ if (err)
+ goto out;
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ if (err)
+ goto out;
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ if (err)
+ goto out;
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ if (err)
+ goto out;
+
+ if (adev->asic_type == CHIP_KAVERI) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+ if (err)
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+
+out:
+ if (err) {
+ printk(KERN_ERR
+ "gfx7: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ }
+ return err;
+}
+
+/**
+ * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Starting with SI, the tiling setup is done globally in a
+ * set of 32 tiling modes. Rather than selecting each set of
+ * parameters per surface as on older asics, we just select
+ * which index in the tiling table we want to use, and the
+ * surface uses those parameters (CIK).
+ */
+static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
+{
+ const u32 num_tile_mode_states = 32;
+ const u32 num_secondary_tile_mode_states = 16;
+ u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+ switch (adev->gfx.config.mem_row_size_in_kb) {
+ case 1:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+ break;
+ case 2:
+ default:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+ break;
+ case 4:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+ break;
+ }
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 1:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 2:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 3:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 4:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 5:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 6:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 7:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16));
+ break;
+ case 9:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ break;
+ case 10:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 11:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 12:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 13:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ break;
+ case 14:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 15:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 16:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 17:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 19:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ break;
+ case 20:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 21:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 22:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 23:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 24:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 25:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 26:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 27:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ break;
+ case 28:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 29:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 30:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 1:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 2:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 3:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 4:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 5:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 6:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 9:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 10:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 11:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 12:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 13:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 14:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ break;
+ case CHIP_HAWAII:
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 1:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 2:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 3:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 4:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 5:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 6:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 7:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+ break;
+ case 9:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ break;
+ case 10:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 11:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 12:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 13:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ break;
+ case 14:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 15:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 16:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 17:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 18:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 19:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
+ break;
+ case 20:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 21:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 22:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 23:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 24:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 25:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 26:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 27:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ break;
+ case 28:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 29:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 30:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 1:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 2:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 3:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 4:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 5:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 6:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 9:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 10:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 11:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 12:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 13:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 14:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ break;
+ case CHIP_KABINI:
+ case CHIP_KAVERI:
+ case CHIP_MULLINS:
+ default:
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 1:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 2:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 3:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 4:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 5:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ break;
+ case 6:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 7:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 9:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ break;
+ case 10:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 11:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 12:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 13:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ break;
+ case 14:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 15:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 16:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 17:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 19:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
+ break;
+ case 20:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 21:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 22:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 23:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 24:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 25:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 26:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ break;
+ case 27:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ break;
+ case 28:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ break;
+ case 29:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ break;
+ case 30:
+ gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 1:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 2:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 3:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 4:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 5:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 6:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 9:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 10:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 11:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 12:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 13:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 14:
+ gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ break;
+ }
+}
+
+/**
+ * gfx_v7_0_select_se_sh - select which SE, SH to address
+ *
+ * @adev: amdgpu_device pointer
+ * @se_num: shader engine to address
+ * @sh_num: sh block to address
+ *
+ * Select which SE, SH combinations to address. Certain
+ * registers are instanced per SE or SH. 0xffffffff means
+ * broadcast to all SEs or SHs (CIK).
+ */
+void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+{
+ u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK;
+
+ if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+ data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+ GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
+ else if (se_num == 0xffffffff)
+ data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
+ (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
+ else if (sh_num == 0xffffffff)
+ data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+ (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ else
+ data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
+ (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ WREG32(mmGRBM_GFX_INDEX, data);
+}
+
+/**
+ * gfx_v7_0_create_bitmask - create a bitmask
+ *
+ * @bit_width: length of the mask
+ *
+ * create a variable length bit mask (CIK).
+ * Returns the bitmask.
+ */
+static u32 gfx_v7_0_create_bitmask(u32 bit_width)
+{
+ u32 i, mask = 0;
+
+ for (i = 0; i < bit_width; i++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+ return mask;
+}
+
+/**
+ * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs
+ *
+ * @adev: amdgpu_device pointer
+ * @max_rb_num: max RBs (render backends) for the asic
+ * @se_num: number of SEs (shader engines) for the asic
+ * @sh_per_se: number of SH blocks per SE for the asic
+ *
+ * Calculates the bitmask of disabled RBs (CIK).
+ * Returns the disabled RB bitmask.
+ */
+static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev,
+ u32 max_rb_num_per_se,
+ u32 sh_per_se)
+{
+ u32 data, mask;
+
+ data = RREG32(mmCC_RB_BACKEND_DISABLE);
+ if (data & 1)
+ data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
+ else
+ data = 0;
+
+ data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+
+ data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+
+ mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+ return data & mask;
+}
+
+/**
+ * gfx_v7_0_setup_rb - setup the RBs on the asic
+ *
+ * @adev: amdgpu_device pointer
+ * @se_num: number of SEs (shader engines) for the asic
+ * @sh_per_se: number of SH blocks per SE for the asic
+ * @max_rb_num: max RBs (render backends) for the asic
+ *
+ * Configures per-SE/SH RB registers (CIK).
+ */
+static void gfx_v7_0_setup_rb(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_per_se,
+ u32 max_rb_num_per_se)
+{
+ int i, j;
+ u32 data, mask;
+ u32 disabled_rbs = 0;
+ u32 enabled_rbs = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ gfx_v7_0_select_se_sh(adev, i, j);
+ data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
+ if (adev->asic_type == CHIP_HAWAII)
+ disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
+ else
+ disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
+ }
+ }
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ mask = 1;
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+ if (!(disabled_rbs & mask))
+ enabled_rbs |= mask;
+ mask <<= 1;
+ }
+
+ adev->gfx.config.backend_enable_mask = enabled_rbs;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < se_num; i++) {
+ gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
+ data = 0;
+ for (j = 0; j < sh_per_se; j++) {
+ switch (enabled_rbs & 3) {
+ case 0:
+ if (j == 0)
+ data |= (RASTER_CONFIG_RB_MAP_3 <<
+ PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
+ else
+ data |= (RASTER_CONFIG_RB_MAP_0 <<
+ PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
+ break;
+ case 1:
+ data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+ break;
+ case 2:
+ data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+ break;
+ case 3:
+ default:
+ data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+ break;
+ }
+ enabled_rbs >>= 2;
+ }
+ WREG32(mmPA_SC_RASTER_CONFIG, data);
+ }
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+/**
+ * gfx_v7_0_gpu_init - setup the 3D engine
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Configures the 3D engine and tiling configuration
+ * registers so that the 3D engine is usable.
+ */
+static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
+{
+ u32 gb_addr_config;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
+ u32 tmp;
+ int i;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->gfx.config.max_shader_engines = 2;
+ adev->gfx.config.max_tile_pipes = 4;
+ adev->gfx.config.max_cu_per_sh = 7;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 2;
+ adev->gfx.config.max_texture_channel_caches = 4;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_HAWAII:
+ adev->gfx.config.max_shader_engines = 4;
+ adev->gfx.config.max_tile_pipes = 16;
+ adev->gfx.config.max_cu_per_sh = 11;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 4;
+ adev->gfx.config.max_texture_channel_caches = 16;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_KAVERI:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 4;
+ if ((adev->pdev->device == 0x1304) ||
+ (adev->pdev->device == 0x1305) ||
+ (adev->pdev->device == 0x130C) ||
+ (adev->pdev->device == 0x130F) ||
+ (adev->pdev->device == 0x1310) ||
+ (adev->pdev->device == 0x1311) ||
+ (adev->pdev->device == 0x131C)) {
+ adev->gfx.config.max_cu_per_sh = 8;
+ adev->gfx.config.max_backends_per_se = 2;
+ } else if ((adev->pdev->device == 0x1309) ||
+ (adev->pdev->device == 0x130A) ||
+ (adev->pdev->device == 0x130D) ||
+ (adev->pdev->device == 0x1313) ||
+ (adev->pdev->device == 0x131D)) {
+ adev->gfx.config.max_cu_per_sh = 6;
+ adev->gfx.config.max_backends_per_se = 2;
+ } else if ((adev->pdev->device == 0x1306) ||
+ (adev->pdev->device == 0x1307) ||
+ (adev->pdev->device == 0x130B) ||
+ (adev->pdev->device == 0x130E) ||
+ (adev->pdev->device == 0x1315) ||
+ (adev->pdev->device == 0x131B)) {
+ adev->gfx.config.max_cu_per_sh = 4;
+ adev->gfx.config.max_backends_per_se = 1;
+ } else {
+ adev->gfx.config.max_cu_per_sh = 3;
+ adev->gfx.config.max_backends_per_se = 1;
+ }
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_texture_channel_caches = 4;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 16;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ default:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 2;
+ adev->gfx.config.max_cu_per_sh = 2;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 1;
+ adev->gfx.config.max_texture_channel_caches = 2;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 16;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ }
+
+ WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
+
+ mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
+ adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
+ mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+
+ adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+ adev->gfx.config.mem_max_burst_length_bytes = 256;
+ if (adev->flags & AMDGPU_IS_APU) {
+ /* Get memory bank mapping mode. */
+ tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
+ dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+ dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+ tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
+ dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+ dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+ /* Validate settings in case only one DIMM installed. */
+ if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
+ dimm00_addr_map = 0;
+ if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
+ dimm01_addr_map = 0;
+ if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
+ dimm10_addr_map = 0;
+ if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
+ dimm11_addr_map = 0;
+
+ /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
+ /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
+ if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
+ adev->gfx.config.mem_row_size_in_kb = 2;
+ else
+ adev->gfx.config.mem_row_size_in_kb = 1;
+ } else {
+ tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
+ adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+ if (adev->gfx.config.mem_row_size_in_kb > 4)
+ adev->gfx.config.mem_row_size_in_kb = 4;
+ }
+ /* XXX use MC settings? */
+ adev->gfx.config.shader_engine_tile_size = 32;
+ adev->gfx.config.num_gpus = 1;
+ adev->gfx.config.multi_gpu_tile_size = 64;
+
+ /* fix up row size */
+ gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
+ switch (adev->gfx.config.mem_row_size_in_kb) {
+ case 1:
+ default:
+ gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
+ break;
+ case 2:
+ gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
+ break;
+ case 4:
+ gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
+ break;
+ }
+ adev->gfx.config.gb_addr_config = gb_addr_config;
+
+ WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
+ WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
+ WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
+ WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+
+ gfx_v7_0_tiling_mode_table_init(adev);
+
+ gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se,
+ adev->gfx.config.max_backends_per_se);
+
+ /* set HW defaults for 3D engine */
+ WREG32(mmCP_MEQ_THRESHOLDS,
+ (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+ (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ /*
+ * making sure that the following register writes will be broadcasted
+ * to all the shaders
+ */
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+
+ /* XXX SH_MEM regs */
+ /* where to put LDS, scratch, GPUVM in FSA64 space */
+ mutex_lock(&adev->srbm_mutex);
+ for (i = 0; i < 16; i++) {
+ cik_srbm_select(adev, 0, 0, 0, i);
+ /* CP and shaders */
+ WREG32(mmSH_MEM_CONFIG, 0);
+ WREG32(mmSH_MEM_APE1_BASE, 1);
+ WREG32(mmSH_MEM_APE1_LIMIT, 0);
+ WREG32(mmSH_MEM_BASES, 0);
+ }
+ cik_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+
+ WREG32(mmSX_DEBUG_1, 0x20);
+
+ WREG32(mmTA_CNTL_AUX, 0x00010000);
+
+ tmp = RREG32(mmSPI_CONFIG_CNTL);
+ tmp |= 0x03000000;
+ WREG32(mmSPI_CONFIG_CNTL, tmp);
+
+ WREG32(mmSQ_CONFIG, 1);
+
+ WREG32(mmDB_DEBUG, 0);
+
+ tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
+ tmp |= 0x00000400;
+ WREG32(mmDB_DEBUG2, tmp);
+
+ tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
+ tmp |= 0x00020200;
+ WREG32(mmDB_DEBUG3, tmp);
+
+ tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
+ tmp |= 0x00018208;
+ WREG32(mmCB_HW_CONTROL, tmp);
+
+ WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
+
+ WREG32(mmPA_SC_FIFO_SIZE,
+ ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
+ (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
+ (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
+ (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
+
+ WREG32(mmVGT_NUM_INSTANCES, 1);
+
+ WREG32(mmCP_PERFMON_CNTL, 0);
+
+ WREG32(mmSQ_CONFIG, 0);
+
+ WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
+ ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
+ (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
+
+ WREG32(mmVGT_CACHE_INVALIDATION,
+ (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
+ (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
+
+ WREG32(mmVGT_GS_VERTEX_REUSE, 16);
+ WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
+ (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
+ WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ udelay(50);
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+/**
+ * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the number and offset of the CP scratch registers.
+ * NOTE: use of CP scratch registers is a legacy inferface and
+ * is not used by default on newer asics (r6xx+). On newer asics,
+ * memory buffers are used for fences rather than scratch regs.
+ */
+static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
+ for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+ adev->gfx.scratch.free[i] = true;
+ adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
+ }
+}
+
+/**
+ * gfx_v7_0_ring_test_ring - basic gfx ring test
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Allocate a scratch register and write to it using the gfx ring (CIK).
+ * Provides a basic gfx ring test to verify that the ring is working.
+ * Used by gfx_v7_0_cp_gfx_resume();
+ * Returns 0 on success, error on failure.
+ */
+static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = amdgpu_ring_lock(ring, 3);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_unlock_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+ ring->idx, scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
+/**
+ * gfx_v7_0_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp flush on the cp.
+ */
+static void gfx_v7_0_hdp_flush_cp_ring_emit(struct amdgpu_ring *ring)
+{
+ u32 ref_and_mask;
+
+ if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ switch (ring->me) {
+ case 1:
+ ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
+ break;
+ case 2:
+ ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
+ break;
+ default:
+ return;
+ }
+ } else {
+ ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
+ WAIT_REG_MEM_FUNCTION(3) | /* == */
+ WAIT_REG_MEM_ENGINE(1))); /* pfp */
+ amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
+ amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
+ amdgpu_ring_write(ring, ref_and_mask);
+ amdgpu_ring_write(ring, ref_and_mask);
+ amdgpu_ring_write(ring, 0x20); /* poll interval */
+}
+
+/**
+ * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
+ *
+ * @adev: amdgpu_device pointer
+ * @fence: amdgpu fence object
+ *
+ * Emits a fence sequnce number on the gfx ring and flushes
+ * GPU caches.
+ */
+static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, bool write64bit)
+{
+ /* Workaround for cache flush problems. First send a dummy EOP
+ * event down the pipe with seq one below.
+ */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+ DATA_SEL(1) | INT_SEL(0));
+ amdgpu_ring_write(ring, lower_32_bits(seq - 1));
+ amdgpu_ring_write(ring, upper_32_bits(seq - 1));
+
+ /* Then send the real EOP event down the pipe. */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+ DATA_SEL(write64bit ? 2 : 1) | INT_SEL(2));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+/**
+ * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
+ *
+ * @adev: amdgpu_device pointer
+ * @fence: amdgpu fence object
+ *
+ * Emits a fence sequnce number on the compute ring and flushes
+ * GPU caches.
+ */
+static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
+ u64 addr, u64 seq,
+ bool write64bits)
+{
+ /* RELEASE_MEM - flush caches, send int */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
+ amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ amdgpu_ring_write(ring, DATA_SEL(write64bits ? 2 : 1) | INT_SEL(2));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+/**
+ * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring
+ *
+ * @ring: amdgpu ring buffer object
+ * @semaphore: amdgpu semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
+static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
+ struct amdgpu_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+ unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+ amdgpu_ring_write(ring, addr & 0xffffffff);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
+
+ if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
+ /* Prevent the PFP from running ahead of the semaphore wait */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ amdgpu_ring_write(ring, 0x0);
+ }
+
+ return true;
+}
+
+/*
+ * IB stuff
+ */
+/**
+ * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ib: amdgpu indirect buffer object
+ *
+ * Emits an DE (drawing engine) or CE (constant engine) IB
+ * on the gfx ring. IBs are usually generated by userspace
+ * acceleration drivers and submitted to the kernel for
+ * sheduling on the ring. This function schedules the IB
+ * on the gfx ring for execution by the GPU.
+ */
+static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib)
+{
+ u32 header, control = 0;
+ u32 next_rptr = ring->wptr + 5;
+ if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
+ control |= INDIRECT_BUFFER_VALID;
+
+ if (ib->flush_hdp_writefifo)
+ next_rptr += 7;
+
+ if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+ next_rptr += 2;
+
+ next_rptr += 4;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+ amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, next_rptr);
+
+ if (ib->flush_hdp_writefifo)
+ gfx_v7_0_hdp_flush_cp_ring_emit(ring);
+
+ /* insert SWITCH_BUFFER packet before first IB in the ring frame */
+ if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ ring->need_ctx_switch = false;
+ }
+
+ if (ib->is_const_ib)
+ header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+ else
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+ control |= ib->length_dw |
+ (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ amdgpu_ring_write(ring, control);
+}
+
+/**
+ * gfx_v7_0_ring_test_ib - basic ring IB test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Allocate an IB and execute it on the gfx ring (CIK).
+ * Provides a basic gfx ring test to verify that IBs are working.
+ * Returns 0 on success, error on failure.
+ */
+static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = amdgpu_ib_get(ring, NULL, 256, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+ ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
+ ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
+ ib.ptr[2] = 0xDEADBEEF;
+ ib.length_dw = 3;
+ r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+ if (r) {
+ amdgpu_gfx_scratch_free(adev, scratch);
+ amdgpu_ib_free(adev, &ib);
+ DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = amdgpu_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ amdgpu_ib_free(adev, &ib);
+ return r;
+ }
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
+ ib.fence->ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ amdgpu_ib_free(adev, &ib);
+ return r;
+}
+
+/*
+ * CP.
+ * On CIK, gfx and compute now have independant command processors.
+ *
+ * GFX
+ * Gfx consists of a single ring and can process both gfx jobs and
+ * compute jobs. The gfx CP consists of three microengines (ME):
+ * PFP - Pre-Fetch Parser
+ * ME - Micro Engine
+ * CE - Constant Engine
+ * The PFP and ME make up what is considered the Drawing Engine (DE).
+ * The CE is an asynchronous engine used for updating buffer desciptors
+ * used by the DE so that they can be loaded into cache in parallel
+ * while the DE is processing state update packets.
+ *
+ * Compute
+ * The compute CP consists of two microengines (ME):
+ * MEC1 - Compute MicroEngine 1
+ * MEC2 - Compute MicroEngine 2
+ * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
+ * The queues are exposed to userspace and are programmed directly
+ * by the compute runtime.
+ */
+/**
+ * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable the MEs
+ *
+ * Halts or unhalts the gfx MEs.
+ */
+static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+{
+ int i;
+
+ if (enable) {
+ WREG32(mmCP_ME_CNTL, 0);
+ } else {
+ WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ adev->gfx.gfx_ring[i].ready = false;
+ }
+ udelay(50);
+}
+
+/**
+ * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Loads the gfx PFP, ME, and CE ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+{
+ const struct gfx_firmware_header_v1_0 *pfp_hdr;
+ const struct gfx_firmware_header_v1_0 *ce_hdr;
+ const struct gfx_firmware_header_v1_0 *me_hdr;
+ const __le32 *fw_data;
+ unsigned i, fw_size;
+
+ if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+ return -EINVAL;
+
+ pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+
+ amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+ amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+ amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+ adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
+ adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
+ adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
+
+ gfx_v7_0_cp_gfx_enable(adev, false);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(mmCP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(mmCP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
+
+ /* ME */
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(mmCP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
+
+ return 0;
+}
+
+/**
+ * gfx_v7_0_cp_gfx_start - start the gfx ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enables the ring and loads the clear state context and other
+ * packets required to init the ring.
+ * Returns 0 for success, error for failure.
+ */
+static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ int r, i;
+
+ /* init the CP */
+ WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
+ WREG32(mmCP_ENDIAN_SWAP, 0);
+ WREG32(mmCP_DEVICE_ID, 1);
+
+ gfx_v7_0_cp_gfx_enable(adev, true);
+
+ r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+
+ /* init the CE partitions. CE only used for gfx on CIK */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+ amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+ amdgpu_ring_write(ring, 0x8000);
+ amdgpu_ring_write(ring, 0x8000);
+
+ /* clear state buffer */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ amdgpu_ring_write(ring, 0x80000000);
+ amdgpu_ring_write(ring, 0x80000000);
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ amdgpu_ring_write(ring,
+ PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+ for (i = 0; i < ext->reg_count; i++)
+ amdgpu_ring_write(ring, ext->extent[i]);
+ }
+ }
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ amdgpu_ring_write(ring, 0x16000012);
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
+ case CHIP_KAVERI:
+ amdgpu_ring_write(ring, 0x00000000); /* XXX */
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ amdgpu_ring_write(ring, 0x00000000); /* XXX */
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
+ case CHIP_HAWAII:
+ amdgpu_ring_write(ring, 0x3a00161a);
+ amdgpu_ring_write(ring, 0x0000002e);
+ break;
+ default:
+ amdgpu_ring_write(ring, 0x00000000);
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ amdgpu_ring_write(ring, 0x00000316);
+ amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
+
+ amdgpu_ring_unlock_commit(ring);
+
+ return 0;
+}
+
+/**
+ * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Program the location and size of the gfx ring buffer
+ * and test it to make sure it's working.
+ * Returns 0 for success, error for failure.
+ */
+static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 tmp;
+ u32 rb_bufsz;
+ u64 rb_addr, rptr_addr;
+ int r;
+
+ WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
+ if (adev->asic_type != CHIP_HAWAII)
+ WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+ /* Set the write pointer delay */
+ WREG32(mmCP_RB_WPTR_DELAY, 0);
+
+ /* set the RB to use vmid 0 */
+ WREG32(mmCP_RB_VMID, 0);
+
+ WREG32(mmSCRATCH_ADDR, 0);
+
+ /* ring 0 - compute and gfx */
+ /* Set ring buffer size */
+ ring = &adev->gfx.gfx_ring[0];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(mmCP_RB0_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
+ ring->wptr = 0;
+ WREG32(mmCP_RB0_WPTR, ring->wptr);
+
+ /* set the wb address wether it's enabled or not */
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+ /* scratch register shadowing is no longer supported */
+ WREG32(mmSCRATCH_UMSK, 0);
+
+ mdelay(1);
+ WREG32(mmCP_RB0_CNTL, tmp);
+
+ rb_addr = ring->gpu_addr >> 8;
+ WREG32(mmCP_RB0_BASE, rb_addr);
+ WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
+
+ /* start the ring */
+ gfx_v7_0_cp_gfx_start(adev);
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ return 0;
+}
+
+static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+{
+ u32 rptr;
+
+ rptr = ring->adev->wb.wb[ring->rptr_offs];
+
+ return rptr;
+}
+
+static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 wptr;
+
+ wptr = RREG32(mmCP_RB0_WPTR);
+
+ return wptr;
+}
+
+static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WREG32(mmCP_RB0_WPTR, ring->wptr);
+ (void)RREG32(mmCP_RB0_WPTR);
+}
+
+static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
+{
+ u32 rptr;
+
+ rptr = ring->adev->wb.wb[ring->rptr_offs];
+
+ return rptr;
+}
+
+static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
+{
+ u32 wptr;
+
+ /* XXX check if swapping is necessary on BE */
+ wptr = ring->adev->wb.wb[ring->wptr_offs];
+
+ return wptr;
+}
+
+static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* XXX check if swapping is necessary on BE */
+ adev->wb.wb[ring->wptr_offs] = ring->wptr;
+ WDOORBELL32(ring->doorbell_index, ring->wptr);
+}
+
+/**
+ * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable the MEs
+ *
+ * Halts or unhalts the compute MEs.
+ */
+static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
+{
+ int i;
+
+ if (enable) {
+ WREG32(mmCP_MEC_CNTL, 0);
+ } else {
+ WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ adev->gfx.compute_ring[i].ready = false;
+ }
+ udelay(50);
+}
+
+/**
+ * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Loads the compute MEC1&2 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
+{
+ const struct gfx_firmware_header_v1_0 *mec_hdr;
+ const __le32 *fw_data;
+ unsigned i, fw_size;
+
+ if (!adev->gfx.mec_fw)
+ return -EINVAL;
+
+ mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+ adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
+
+ gfx_v7_0_cp_compute_enable(adev, false);
+
+ /* MEC1 */
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+ WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
+
+ if (adev->asic_type == CHIP_KAVERI) {
+ const struct gfx_firmware_header_v1_0 *mec2_hdr;
+
+ if (!adev->gfx.mec2_fw)
+ return -EINVAL;
+
+ mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
+ adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
+
+ /* MEC2 */
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
+ WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * gfx_v7_0_cp_compute_start - start the compute queues
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enable the compute queues.
+ * Returns 0 for success, error for failure.
+ */
+static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev)
+{
+ gfx_v7_0_cp_compute_enable(adev, true);
+
+ return 0;
+}
+
+/**
+ * gfx_v7_0_cp_compute_fini - stop the compute queues
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the compute queues and tear down the driver queue
+ * info.
+ */
+static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ if (ring->mqd_obj) {
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
+
+ amdgpu_bo_unpin(ring->mqd_obj);
+ amdgpu_bo_unreserve(ring->mqd_obj);
+
+ amdgpu_bo_unref(&ring->mqd_obj);
+ ring->mqd_obj = NULL;
+ }
+ }
+}
+
+static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->gfx.mec.hpd_eop_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+
+ amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
+ adev->gfx.mec.hpd_eop_obj = NULL;
+ }
+}
+
+#define MEC_HPD_SIZE 2048
+
+static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
+{
+ int r;
+ u32 *hpd;
+
+ /*
+ * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
+ * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
+ * Nonetheless, we assign only 1 pipe because all other pipes will
+ * be handled by KFD
+ */
+ adev->gfx.mec.num_mec = 1;
+ adev->gfx.mec.num_pipe = 1;
+ adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
+
+ if (adev->gfx.mec.hpd_eop_obj == NULL) {
+ r = amdgpu_bo_create(adev,
+ adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
+ PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ &adev->gfx.mec.hpd_eop_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+ if (unlikely(r != 0)) {
+ gfx_v7_0_mec_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_gpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
+ gfx_v7_0_mec_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
+ gfx_v7_0_mec_fini(adev);
+ return r;
+ }
+
+ /* clear memory. Not sure if this is required or not */
+ memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
+
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+
+ return 0;
+}
+
+struct hqd_registers
+{
+ u32 cp_mqd_base_addr;
+ u32 cp_mqd_base_addr_hi;
+ u32 cp_hqd_active;
+ u32 cp_hqd_vmid;
+ u32 cp_hqd_persistent_state;
+ u32 cp_hqd_pipe_priority;
+ u32 cp_hqd_queue_priority;
+ u32 cp_hqd_quantum;
+ u32 cp_hqd_pq_base;
+ u32 cp_hqd_pq_base_hi;
+ u32 cp_hqd_pq_rptr;
+ u32 cp_hqd_pq_rptr_report_addr;
+ u32 cp_hqd_pq_rptr_report_addr_hi;
+ u32 cp_hqd_pq_wptr_poll_addr;
+ u32 cp_hqd_pq_wptr_poll_addr_hi;
+ u32 cp_hqd_pq_doorbell_control;
+ u32 cp_hqd_pq_wptr;
+ u32 cp_hqd_pq_control;
+ u32 cp_hqd_ib_base_addr;
+ u32 cp_hqd_ib_base_addr_hi;
+ u32 cp_hqd_ib_rptr;
+ u32 cp_hqd_ib_control;
+ u32 cp_hqd_iq_timer;
+ u32 cp_hqd_iq_rptr;
+ u32 cp_hqd_dequeue_request;
+ u32 cp_hqd_dma_offload;
+ u32 cp_hqd_sema_cmd;
+ u32 cp_hqd_msg_type;
+ u32 cp_hqd_atomic0_preop_lo;
+ u32 cp_hqd_atomic0_preop_hi;
+ u32 cp_hqd_atomic1_preop_lo;
+ u32 cp_hqd_atomic1_preop_hi;
+ u32 cp_hqd_hq_scheduler0;
+ u32 cp_hqd_hq_scheduler1;
+ u32 cp_mqd_control;
+};
+
+struct bonaire_mqd
+{
+ u32 header;
+ u32 dispatch_initiator;
+ u32 dimensions[3];
+ u32 start_idx[3];
+ u32 num_threads[3];
+ u32 pipeline_stat_enable;
+ u32 perf_counter_enable;
+ u32 pgm[2];
+ u32 tba[2];
+ u32 tma[2];
+ u32 pgm_rsrc[2];
+ u32 vmid;
+ u32 resource_limits;
+ u32 static_thread_mgmt01[2];
+ u32 tmp_ring_size;
+ u32 static_thread_mgmt23[2];
+ u32 restart[3];
+ u32 thread_trace_enable;
+ u32 reserved1;
+ u32 user_data[16];
+ u32 vgtcs_invoke_count[2];
+ struct hqd_registers queue_state;
+ u32 dequeue_cntr;
+ u32 interrupt_queue[64];
+};
+
+/**
+ * gfx_v7_0_cp_compute_resume - setup the compute queue registers
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Program the compute queues and test them to make sure they
+ * are working.
+ * Returns 0 for success, error for failure.
+ */
+static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
+{
+ int r, i, j;
+ u32 tmp;
+ bool use_doorbell = true;
+ u64 hqd_gpu_addr;
+ u64 mqd_gpu_addr;
+ u64 eop_gpu_addr;
+ u64 wb_gpu_addr;
+ u32 *buf;
+ struct bonaire_mqd *mqd;
+
+ r = gfx_v7_0_cp_compute_start(adev);
+ if (r)
+ return r;
+
+ /* fix up chicken bits */
+ tmp = RREG32(mmCP_CPF_DEBUG);
+ tmp |= (1 << 23);
+ WREG32(mmCP_CPF_DEBUG, tmp);
+
+ /* init the pipes */
+ mutex_lock(&adev->srbm_mutex);
+ for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
+ int me = (i < 4) ? 1 : 2;
+ int pipe = (i < 4) ? i : (i - 4);
+
+ eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
+
+ cik_srbm_select(adev, me, pipe, 0, 0);
+
+ /* write the EOP addr */
+ WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
+ WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
+
+ /* set the VMID assigned */
+ WREG32(mmCP_HPD_EOP_VMID, 0);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ tmp = RREG32(mmCP_HPD_EOP_CONTROL);
+ tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
+ tmp |= order_base_2(MEC_HPD_SIZE / 8);
+ WREG32(mmCP_HPD_EOP_CONTROL, tmp);
+ }
+ cik_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+
+ /* init the queues. Just two for now. */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ if (ring->mqd_obj == NULL) {
+ r = amdgpu_bo_create(adev,
+ sizeof(struct bonaire_mqd),
+ PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ &ring->mqd_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ gfx_v7_0_cp_compute_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
+ &mqd_gpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
+ gfx_v7_0_cp_compute_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
+ gfx_v7_0_cp_compute_fini(adev);
+ return r;
+ }
+
+ /* init the mqd struct */
+ memset(buf, 0, sizeof(struct bonaire_mqd));
+
+ mqd = (struct bonaire_mqd *)buf;
+ mqd->header = 0xC0310800;
+ mqd->static_thread_mgmt01[0] = 0xffffffff;
+ mqd->static_thread_mgmt01[1] = 0xffffffff;
+ mqd->static_thread_mgmt23[0] = 0xffffffff;
+ mqd->static_thread_mgmt23[1] = 0xffffffff;
+
+ mutex_lock(&adev->srbm_mutex);
+ cik_srbm_select(adev, ring->me,
+ ring->pipe,
+ ring->queue, 0);
+
+ /* disable wptr polling */
+ tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
+ tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK;
+ WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
+
+ /* enable doorbell? */
+ mqd->queue_state.cp_hqd_pq_doorbell_control =
+ RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
+ if (use_doorbell)
+ mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
+ else
+ mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
+ mqd->queue_state.cp_hqd_pq_doorbell_control);
+
+ /* disable the queue if it's active */
+ mqd->queue_state.cp_hqd_dequeue_request = 0;
+ mqd->queue_state.cp_hqd_pq_rptr = 0;
+ mqd->queue_state.cp_hqd_pq_wptr= 0;
+ if (RREG32(mmCP_HQD_ACTIVE) & 1) {
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
+ WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
+ WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
+ }
+
+ /* set the pointer to the MQD */
+ mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
+ mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
+ WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
+ WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
+ /* set MQD vmid to 0 */
+ mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
+ mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
+ WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
+
+ /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+ hqd_gpu_addr = ring->gpu_addr >> 8;
+ mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
+ mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+ WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
+ WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
+
+ /* set up the HQD, this is similar to CP_RB0_CNTL */
+ mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
+ mqd->queue_state.cp_hqd_pq_control &=
+ ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
+ CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
+
+ mqd->queue_state.cp_hqd_pq_control |=
+ order_base_2(ring->ring_size / 8);
+ mqd->queue_state.cp_hqd_pq_control |=
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
+#ifdef __BIG_ENDIAN
+ mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
+#endif
+ mqd->queue_state.cp_hqd_pq_control &=
+ ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
+ CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
+ CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
+ mqd->queue_state.cp_hqd_pq_control |=
+ CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
+ CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
+ WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
+
+ /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+ wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
+ mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
+
+ /* set the wb address wether it's enabled or not */
+ wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
+ mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
+ upper_32_bits(wb_gpu_addr) & 0xffff;
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
+ mqd->queue_state.cp_hqd_pq_rptr_report_addr);
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
+
+ /* enable the doorbell if requested */
+ if (use_doorbell) {
+ mqd->queue_state.cp_hqd_pq_doorbell_control =
+ RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
+ mqd->queue_state.cp_hqd_pq_doorbell_control &=
+ ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
+ mqd->queue_state.cp_hqd_pq_doorbell_control |=
+ (ring->doorbell_index <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
+ mqd->queue_state.cp_hqd_pq_doorbell_control |=
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
+ mqd->queue_state.cp_hqd_pq_doorbell_control &=
+ ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
+
+ } else {
+ mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
+ }
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
+ mqd->queue_state.cp_hqd_pq_doorbell_control);
+
+ /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+ ring->wptr = 0;
+ mqd->queue_state.cp_hqd_pq_wptr = ring->wptr;
+ WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
+ mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
+
+ /* set the vmid for the queue */
+ mqd->queue_state.cp_hqd_vmid = 0;
+ WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
+
+ /* activate the queue */
+ mqd->queue_state.cp_hqd_active = 1;
+ WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
+
+ cik_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ amdgpu_bo_unreserve(ring->mqd_obj);
+
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ ring->ready = false;
+ }
+
+ return 0;
+}
+
+static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
+{
+ gfx_v7_0_cp_gfx_enable(adev, enable);
+ gfx_v7_0_cp_compute_enable(adev, enable);
+}
+
+static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = gfx_v7_0_cp_gfx_load_microcode(adev);
+ if (r)
+ return r;
+ r = gfx_v7_0_cp_compute_load_microcode(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
+
+ if (enable)
+ tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
+ else
+ tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
+ WREG32(mmCP_INT_CNTL_RING0, tmp);
+}
+
+static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
+{
+ int r;
+
+ gfx_v7_0_enable_gui_idle_interrupt(adev, false);
+
+ r = gfx_v7_0_cp_load_microcode(adev);
+ if (r)
+ return r;
+
+ r = gfx_v7_0_cp_gfx_resume(adev);
+ if (r)
+ return r;
+ r = gfx_v7_0_cp_compute_resume(adev);
+ if (r)
+ return r;
+
+ gfx_v7_0_enable_gui_idle_interrupt(adev, true);
+
+ return 0;
+}
+
+static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
+
+ /* instruct DE to set a magic number */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(5)));
+ amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, 1);
+
+ /* let CE wait till condition satisfied */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+ WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3) | /* == */
+ WAIT_REG_MEM_ENGINE(2))); /* ce */
+ amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, 1);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, 4); /* poll interval */
+
+ /* instruct CE to reset wb of ce_sync to zero */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
+ WRITE_DATA_DST_SEL(5) |
+ WR_CONFIRM));
+ amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, 0);
+}
+
+/*
+ * vm
+ * VMID 0 is the physical GPU addresses as used by the kernel.
+ * VMIDs 1-15 are used for userspace clients and are handled
+ * by the amdgpu vm/hsa code.
+ */
+/**
+ * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using the CP (CIK).
+ */
+static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+ WRITE_DATA_DST_SEL(0)));
+ if (vm_id < 8) {
+ amdgpu_ring_write(ring,
+ (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ } else {
+ amdgpu_ring_write(ring,
+ (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+ }
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ /* update SH_MEM_* regs */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, VMID(vm_id));
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, mmSH_MEM_BASES);
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, 0); /* SH_MEM_BASES */
+ amdgpu_ring_write(ring, 0); /* SH_MEM_CONFIG */
+ amdgpu_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
+ amdgpu_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, VMID(0));
+
+
+ /* bits 0-15 are the VM contexts0-15 */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 1 << vm_id);
+
+ /* wait for the invalidate to complete */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+ WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0); /* ref */
+ amdgpu_ring_write(ring, 0); /* mask */
+ amdgpu_ring_write(ring, 0x20); /* poll interval */
+
+ /* compute doesn't have PFP */
+ if (usepfp) {
+ /* sync PFP to ME, otherwise we might get invalid PFP reads */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ amdgpu_ring_write(ring, 0x0);
+
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ gfx_v7_0_ce_sync_me(ring);
+ }
+}
+
+/*
+ * RLC
+ * The RLC is a multi-purpose microengine that handles a
+ * variety of functions.
+ */
+static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* save restore block */
+ if (adev->gfx.rlc.save_restore_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
+ adev->gfx.rlc.save_restore_obj = NULL;
+ }
+
+ /* clear state block */
+ if (adev->gfx.rlc.clear_state_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+ adev->gfx.rlc.clear_state_obj = NULL;
+ }
+
+ /* clear state block */
+ if (adev->gfx.rlc.cp_table_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+ adev->gfx.rlc.cp_table_obj = NULL;
+ }
+}
+
+static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+{
+ const u32 *src_ptr;
+ volatile u32 *dst_ptr;
+ u32 dws, i;
+ const struct cs_section_def *cs_data;
+ int r;
+
+ /* allocate rlc buffers */
+ if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->asic_type == CHIP_KAVERI) {
+ adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
+ adev->gfx.rlc.reg_list_size =
+ (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
+ } else {
+ adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
+ adev->gfx.rlc.reg_list_size =
+ (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
+ }
+ }
+ adev->gfx.rlc.cs_data = ci_cs_data;
+ adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+
+ src_ptr = adev->gfx.rlc.reg_list;
+ dws = adev->gfx.rlc.reg_list_size;
+ dws += (5 * 16) + 48 + 48 + 64;
+
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (src_ptr) {
+ /* save restore block */
+ if (adev->gfx.rlc.save_restore_obj == NULL) {
+ r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+ if (unlikely(r != 0)) {
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+ /* write the sr buffer */
+ dst_ptr = adev->gfx.rlc.sr_ptr;
+ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ }
+
+ if (cs_data) {
+ /* clear state block */
+ adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
+
+ if (adev->gfx.rlc.clear_state_obj == NULL) {
+ r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+ }
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0)) {
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+ /* set up the cs buffer */
+ dst_ptr = adev->gfx.rlc.cs_ptr;
+ gfx_v7_0_get_csb_buffer(adev, dst_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ if (adev->gfx.rlc.cp_table_size) {
+ if (adev->gfx.rlc.cp_table_obj == NULL) {
+ r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0)) {
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+
+ gfx_v7_0_init_cp_pg_table(adev);
+
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ }
+
+ return 0;
+}
+
+static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp;
+
+ tmp = RREG32(mmRLC_LB_CNTL);
+ if (enable)
+ tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
+ else
+ tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
+ WREG32(mmRLC_LB_CNTL, tmp);
+}
+
+static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
+{
+ u32 i, j, k;
+ u32 mask;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v7_0_select_se_sh(adev, i, j);
+ for (k = 0; k < adev->usec_timeout; k++) {
+ if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
+ RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
+ RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
+ RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
+ for (k = 0; k < adev->usec_timeout; k++) {
+ if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
+{
+ u32 tmp;
+
+ tmp = RREG32(mmRLC_CNTL);
+ if (tmp != rlc)
+ WREG32(mmRLC_CNTL, rlc);
+}
+
+static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_CNTL);
+
+ if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
+ u32 i;
+
+ data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
+ WREG32(mmRLC_CNTL, data);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
+ break;
+ udelay(1);
+ }
+
+ gfx_v7_0_wait_for_rlc_serdes(adev);
+ }
+
+ return orig;
+}
+
+void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+{
+ u32 tmp, i, mask;
+
+ tmp = 0x1 | (1 << 1);
+ WREG32(mmRLC_GPR_REG2, tmp);
+
+ mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
+ RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
+ break;
+ udelay(1);
+ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = 0x1 | (0 << 1);
+ WREG32(mmRLC_GPR_REG2, tmp);
+}
+
+/**
+ * gfx_v7_0_rlc_stop - stop the RLC ME
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Halt the RLC ME (MicroEngine) (CIK).
+ */
+void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
+{
+ WREG32(mmRLC_CNTL, 0);
+
+ gfx_v7_0_enable_gui_idle_interrupt(adev, false);
+
+ gfx_v7_0_wait_for_rlc_serdes(adev);
+}
+
+/**
+ * gfx_v7_0_rlc_start - start the RLC ME
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unhalt the RLC ME (MicroEngine) (CIK).
+ */
+static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
+{
+ WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
+
+ gfx_v7_0_enable_gui_idle_interrupt(adev, true);
+
+ udelay(50);
+}
+
+static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(mmGRBM_SOFT_RESET);
+
+ tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ udelay(50);
+ tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ udelay(50);
+}
+
+/**
+ * gfx_v7_0_rlc_resume - setup the RLC hw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the RLC registers, load the ucode,
+ * and start the RLC (CIK).
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v1_0 *hdr;
+ const __le32 *fw_data;
+ unsigned i, fw_size;
+ u32 tmp;
+
+ if (!adev->gfx.rlc_fw)
+ return -EINVAL;
+
+ hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+ amdgpu_ucode_print_rlc_hdr(&hdr->header);
+ adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ gfx_v7_0_rlc_stop(adev);
+
+ /* disable CG */
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
+ WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
+
+ gfx_v7_0_rlc_reset(adev);
+
+ gfx_v7_0_init_pg(adev);
+
+ WREG32(mmRLC_LB_CNTR_INIT, 0);
+ WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
+ WREG32(mmRLC_LB_PARAMS, 0x00600408);
+ WREG32(mmRLC_LB_CNTL, 0x80000004);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ WREG32(mmRLC_MC_CNTL, 0);
+ WREG32(mmRLC_UCODE_CNTL, 0);
+
+ fw_data = (const __le32 *)
+ (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(mmRLC_GPM_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
+
+ /* XXX - find out what chips support lbpw */
+ gfx_v7_0_enable_lbpw(adev, false);
+
+ if (adev->asic_type == CHIP_BONAIRE)
+ WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
+
+ gfx_v7_0_rlc_start(adev);
+
+ return 0;
+}
+
+static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig, tmp, tmp2;
+
+ orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) {
+ gfx_v7_0_enable_gui_idle_interrupt(adev, true);
+
+ tmp = gfx_v7_0_halt_rlc(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
+ RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
+ RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
+ WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ gfx_v7_0_update_rlc(adev, tmp);
+
+ data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+ } else {
+ gfx_v7_0_enable_gui_idle_interrupt(adev, false);
+
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+
+ data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ }
+
+ if (orig != data)
+ WREG32(mmRLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig, tmp = 0;
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) {
+ if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) {
+ if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) {
+ orig = data = RREG32(mmCP_MEM_SLP_CNTL);
+ data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ if (orig != data)
+ WREG32(mmCP_MEM_SLP_CNTL, data);
+ }
+ }
+
+ orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000001;
+ data &= 0xfffffffd;
+ if (orig != data)
+ WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
+
+ tmp = gfx_v7_0_halt_rlc(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
+ RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
+ WREG32(mmRLC_SERDES_WR_CTRL, data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ gfx_v7_0_update_rlc(adev, tmp);
+
+ if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) {
+ orig = data = RREG32(mmCGTS_SM_CTRL_REG);
+ data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
+ data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
+ data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
+ data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
+ if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) &&
+ (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS))
+ data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
+ data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
+ data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
+ data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
+ if (orig != data)
+ WREG32(mmCGTS_SM_CTRL_REG, data);
+ }
+ } else {
+ orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000003;
+ if (orig != data)
+ WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
+
+ data = RREG32(mmRLC_MEM_SLP_CNTL);
+ if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
+ data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
+ WREG32(mmRLC_MEM_SLP_CNTL, data);
+ }
+
+ data = RREG32(mmCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+ data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ WREG32(mmCP_MEM_SLP_CNTL, data);
+ }
+
+ orig = data = RREG32(mmCGTS_SM_CTRL_REG);
+ data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
+ if (orig != data)
+ WREG32(mmCGTS_SM_CTRL_REG, data);
+
+ tmp = gfx_v7_0_halt_rlc(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
+ WREG32(mmRLC_SERDES_WR_CTRL, data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ gfx_v7_0_update_rlc(adev, tmp);
+ }
+}
+
+static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
+ bool enable)
+{
+ gfx_v7_0_enable_gui_idle_interrupt(adev, false);
+ /* order matters! */
+ if (enable) {
+ gfx_v7_0_enable_mgcg(adev, true);
+ gfx_v7_0_enable_cgcg(adev, true);
+ } else {
+ gfx_v7_0_enable_cgcg(adev, false);
+ gfx_v7_0_enable_mgcg(adev, false);
+ }
+ gfx_v7_0_enable_gui_idle_interrupt(adev, true);
+}
+
+static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
+ data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
+ data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP))
+ data &= ~0x8000;
+ else
+ data |= 0x8000;
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS))
+ data &= ~0x2000;
+ else
+ data |= 0x2000;
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ if (adev->asic_type == CHIP_KAVERI)
+ max_me = 5;
+
+ if (adev->gfx.rlc.cp_table_ptr == NULL)
+ return;
+
+ /* write the cp table buffer */
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+
+static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) {
+ orig = data = RREG32(mmRLC_PG_CNTL);
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+
+ orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
+ data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
+ if (orig != data)
+ WREG32(mmRLC_AUTO_PG_CTRL, data);
+ } else {
+ orig = data = RREG32(mmRLC_PG_CNTL);
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+
+ orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
+ data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
+ if (orig != data)
+ WREG32(mmRLC_AUTO_PG_CTRL, data);
+
+ data = RREG32(mmDB_RENDER_CONTROL);
+ }
+}
+
+static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev,
+ u32 se, u32 sh)
+{
+ u32 mask = 0, tmp, tmp1;
+ int i;
+
+ gfx_v7_0_select_se_sh(adev, se, sh);
+ tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+ tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+
+ tmp &= 0xffff0000;
+
+ tmp |= tmp1;
+ tmp >>= 16;
+
+ for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+
+ return (~tmp) & mask;
+}
+
+static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
+{
+ uint32_t tmp, active_cu_number;
+ struct amdgpu_cu_info cu_info;
+
+ gfx_v7_0_get_cu_info(adev, &cu_info);
+ tmp = cu_info.ao_cu_mask;
+ active_cu_number = cu_info.number;
+
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp);
+
+ tmp = RREG32(mmRLC_MAX_PG_CU);
+ tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
+ tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
+ WREG32(mmRLC_MAX_PG_CU, tmp);
+}
+
+static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG))
+ data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG))
+ data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
+
+static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+ u32 i;
+
+ if (adev->gfx.rlc.cs_data) {
+ WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+ WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
+ WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
+ WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
+ } else {
+ WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+ for (i = 0; i < 3; i++)
+ WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
+ }
+ if (adev->gfx.rlc.reg_list) {
+ WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
+ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+ WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
+ }
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+
+ WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+
+ data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
+ data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
+ data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
+ WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
+
+ data = 0x10101010;
+ WREG32(mmRLC_PG_DELAY, data);
+
+ data = RREG32(mmRLC_PG_DELAY_2);
+ data &= ~0xff;
+ data |= 0x3;
+ WREG32(mmRLC_PG_DELAY_2, data);
+
+ data = RREG32(mmRLC_AUTO_PG_CTRL);
+ data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
+ data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
+ WREG32(mmRLC_AUTO_PG_CTRL, data);
+
+}
+
+static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
+{
+ gfx_v7_0_enable_gfx_cgpg(adev, enable);
+ gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
+ gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
+}
+
+static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (adev->gfx.rlc.cs_data == NULL)
+ return 0;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
+ }
+ /* pa_sc_raster_config/pa_sc_raster_config1 */
+ count += 4;
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
+ volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (adev->gfx.rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] =
+ cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ } else {
+ return;
+ }
+ }
+ }
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ buffer[count++] = cpu_to_le32(0x16000012);
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ case CHIP_KAVERI:
+ buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ case CHIP_HAWAII:
+ buffer[count++] = cpu_to_le32(0x3a00161a);
+ buffer[count++] = cpu_to_le32(0x0000002e);
+ break;
+ default:
+ buffer[count++] = cpu_to_le32(0x00000000);
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ }
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
+{
+ if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
+ AMDGPU_PG_SUPPORT_GFX_SMG |
+ AMDGPU_PG_SUPPORT_GFX_DMG |
+ AMDGPU_PG_SUPPORT_CP |
+ AMDGPU_PG_SUPPORT_GDS |
+ AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
+ gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
+ if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+ gfx_v7_0_init_gfx_cgpg(adev);
+ gfx_v7_0_enable_cp_pg(adev, true);
+ gfx_v7_0_enable_gds_pg(adev, true);
+ }
+ gfx_v7_0_init_ao_cu_mask(adev);
+ gfx_v7_0_update_gfx_pg(adev, true);
+ }
+}
+
+static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
+{
+ if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
+ AMDGPU_PG_SUPPORT_GFX_SMG |
+ AMDGPU_PG_SUPPORT_GFX_DMG |
+ AMDGPU_PG_SUPPORT_CP |
+ AMDGPU_PG_SUPPORT_GDS |
+ AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v7_0_update_gfx_pg(adev, false);
+ if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+ gfx_v7_0_enable_cp_pg(adev, false);
+ gfx_v7_0_enable_gds_pg(adev, false);
+ }
+ }
+}
+
+/**
+ * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+ uint64_t clock;
+
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
+ WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ return clock;
+}
+
+static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
+ uint32_t vmid,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size)
+{
+ gds_base = gds_base >> AMDGPU_GDS_SHIFT;
+ gds_size = gds_size >> AMDGPU_GDS_SHIFT;
+
+ gws_base = gws_base >> AMDGPU_GWS_SHIFT;
+ gws_size = gws_size >> AMDGPU_GWS_SHIFT;
+
+ oa_base = oa_base >> AMDGPU_OA_SHIFT;
+ oa_size = oa_size >> AMDGPU_OA_SHIFT;
+
+ /* GDS Base */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, gds_base);
+
+ /* GDS Size */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, gds_size);
+
+ /* GWS */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
+
+ /* OA */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
+}
+
+static int gfx_v7_0_early_init(struct amdgpu_device *adev)
+{
+
+ adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
+ gfx_v7_0_set_ring_funcs(adev);
+ gfx_v7_0_set_irq_funcs(adev);
+ gfx_v7_0_set_gds_init(adev);
+
+ return 0;
+}
+
+static int gfx_v7_0_sw_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i, r;
+
+ /* EOP Event */
+ r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+ /* Privileged reg */
+ r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+ /* Privileged inst */
+ r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+
+ gfx_v7_0_scratch_init(adev);
+
+ r = gfx_v7_0_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load gfx firmware!\n");
+ return r;
+ }
+
+ r = gfx_v7_0_rlc_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init rlc BOs!\n");
+ return r;
+ }
+
+ /* allocate mec buffers */
+ r = gfx_v7_0_mec_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init MEC BOs!\n");
+ return r;
+ }
+
+ r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
+ if (r) {
+ DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
+ return r;
+ }
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ ring->ring_obj = NULL;
+ sprintf(ring->name, "gfx");
+ r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+ PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
+ AMDGPU_RING_TYPE_GFX);
+ if (r)
+ return r;
+ }
+
+ /* set up the compute queues */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ unsigned irq_type;
+
+ /* max 32 queues per MEC */
+ if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
+ DRM_ERROR("Too many (%d) compute rings!\n", i);
+ break;
+ }
+ ring = &adev->gfx.compute_ring[i];
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+ ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
+ ring->me = 1; /* first MEC */
+ ring->pipe = i / 8;
+ ring->queue = i % 8;
+ sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
+ /* type-2 packets are deprecated on MEC, use type-3 instead */
+ r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+ PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_TYPE_COMPUTE);
+ if (r)
+ return r;
+ }
+
+ /* reserve GDS, GWS and OA resource for gfx */
+ r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
+ PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_GDS, 0,
+ NULL, &adev->gds.gds_gfx_bo);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
+ PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_GWS, 0,
+ NULL, &adev->gds.gws_gfx_bo);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
+ PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_OA, 0,
+ NULL, &adev->gds.oa_gfx_bo);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int gfx_v7_0_sw_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
+ amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
+ amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+ amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
+
+ gfx_v7_0_cp_compute_fini(adev);
+ gfx_v7_0_rlc_fini(adev);
+ gfx_v7_0_mec_fini(adev);
+
+ return 0;
+}
+
+static int gfx_v7_0_hw_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ gfx_v7_0_gpu_init(adev);
+
+ /* init rlc */
+ r = gfx_v7_0_rlc_resume(adev);
+ if (r)
+ return r;
+
+ r = gfx_v7_0_cp_resume(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int gfx_v7_0_hw_fini(struct amdgpu_device *adev)
+{
+ gfx_v7_0_cp_enable(adev, false);
+ gfx_v7_0_rlc_stop(adev);
+ gfx_v7_0_fini_pg(adev);
+
+ return 0;
+}
+
+static int gfx_v7_0_suspend(struct amdgpu_device *adev)
+{
+ return gfx_v7_0_hw_fini(adev);
+}
+
+static int gfx_v7_0_resume(struct amdgpu_device *adev)
+{
+ return gfx_v7_0_hw_init(adev);
+}
+
+static bool gfx_v7_0_is_idle(struct amdgpu_device *adev)
+{
+ if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
+ return false;
+ else
+ return true;
+}
+
+static int gfx_v7_0_wait_for_idle(struct amdgpu_device *adev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
+
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static void gfx_v7_0_print_status(struct amdgpu_device *adev)
+{
+ int i;
+
+ dev_info(adev->dev, "GFX 7.x registers\n");
+ dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(mmGRBM_STATUS));
+ dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
+ RREG32(mmGRBM_STATUS2));
+ dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(mmGRBM_STATUS_SE0));
+ dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(mmGRBM_STATUS_SE1));
+ dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
+ RREG32(mmGRBM_STATUS_SE2));
+ dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
+ RREG32(mmGRBM_STATUS_SE3));
+ dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
+ dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
+ RREG32(mmCP_STALLED_STAT1));
+ dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
+ RREG32(mmCP_STALLED_STAT2));
+ dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
+ RREG32(mmCP_STALLED_STAT3));
+ dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
+ RREG32(mmCP_CPF_BUSY_STAT));
+ dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
+ RREG32(mmCP_CPF_STALLED_STAT1));
+ dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
+ dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
+ dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
+ RREG32(mmCP_CPC_STALLED_STAT1));
+ dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
+
+ for (i = 0; i < 32; i++) {
+ dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
+ i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
+ }
+ for (i = 0; i < 16; i++) {
+ dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
+ i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
+ }
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ dev_info(adev->dev, " se: %d\n", i);
+ gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
+ dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
+ RREG32(mmPA_SC_RASTER_CONFIG));
+ dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
+ RREG32(mmPA_SC_RASTER_CONFIG_1));
+ }
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+
+ dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmGB_ADDR_CONFIG));
+ dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmHDP_ADDR_CONFIG));
+ dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n",
+ RREG32(mmDMIF_ADDR_CALC));
+ dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n",
+ RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET));
+ dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n",
+ RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET));
+ dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
+
+ dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
+ RREG32(mmCP_MEQ_THRESHOLDS));
+ dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
+ RREG32(mmSX_DEBUG_1));
+ dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
+ RREG32(mmTA_CNTL_AUX));
+ dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
+ RREG32(mmSPI_CONFIG_CNTL));
+ dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
+ RREG32(mmSQ_CONFIG));
+ dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
+ RREG32(mmDB_DEBUG));
+ dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
+ RREG32(mmDB_DEBUG2));
+ dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
+ RREG32(mmDB_DEBUG3));
+ dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
+ RREG32(mmCB_HW_CONTROL));
+ dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
+ RREG32(mmSPI_CONFIG_CNTL_1));
+ dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
+ RREG32(mmPA_SC_FIFO_SIZE));
+ dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
+ RREG32(mmVGT_NUM_INSTANCES));
+ dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
+ RREG32(mmCP_PERFMON_CNTL));
+ dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
+ RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
+ dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
+ RREG32(mmVGT_CACHE_INVALIDATION));
+ dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
+ RREG32(mmVGT_GS_VERTEX_REUSE));
+ dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
+ RREG32(mmPA_SC_LINE_STIPPLE_STATE));
+ dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
+ RREG32(mmPA_CL_ENHANCE));
+ dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
+ RREG32(mmPA_SC_ENHANCE));
+
+ dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
+ RREG32(mmCP_ME_CNTL));
+ dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
+ RREG32(mmCP_MAX_CONTEXT));
+ dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n",
+ RREG32(mmCP_ENDIAN_SWAP));
+ dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
+ RREG32(mmCP_DEVICE_ID));
+
+ dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
+ RREG32(mmCP_SEM_WAIT_TIMER));
+ if (adev->asic_type != CHIP_HAWAII)
+ dev_info(adev->dev, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
+ RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL));
+
+ dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
+ RREG32(mmCP_RB_WPTR_DELAY));
+ dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
+ RREG32(mmCP_RB_VMID));
+ dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
+ RREG32(mmCP_RB0_CNTL));
+ dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
+ RREG32(mmCP_RB0_WPTR));
+ dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
+ RREG32(mmCP_RB0_RPTR_ADDR));
+ dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
+ RREG32(mmCP_RB0_RPTR_ADDR_HI));
+ dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
+ RREG32(mmCP_RB0_CNTL));
+ dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
+ RREG32(mmCP_RB0_BASE));
+ dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
+ RREG32(mmCP_RB0_BASE_HI));
+ dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
+ RREG32(mmCP_MEC_CNTL));
+ dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n",
+ RREG32(mmCP_CPF_DEBUG));
+
+ dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
+ RREG32(mmSCRATCH_ADDR));
+ dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
+ RREG32(mmSCRATCH_UMSK));
+
+ /* init the pipes */
+ mutex_lock(&adev->srbm_mutex);
+ for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
+ int me = (i < 4) ? 1 : 2;
+ int pipe = (i < 4) ? i : (i - 4);
+ int queue;
+
+ dev_info(adev->dev, " me: %d, pipe: %d\n", me, pipe);
+ cik_srbm_select(adev, me, pipe, 0, 0);
+ dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR=0x%08X\n",
+ RREG32(mmCP_HPD_EOP_BASE_ADDR));
+ dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n",
+ RREG32(mmCP_HPD_EOP_BASE_ADDR_HI));
+ dev_info(adev->dev, " CP_HPD_EOP_VMID=0x%08X\n",
+ RREG32(mmCP_HPD_EOP_VMID));
+ dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
+ RREG32(mmCP_HPD_EOP_CONTROL));
+
+ for (queue = 0; queue < 8; i++) {
+ cik_srbm_select(adev, me, pipe, queue, 0);
+ dev_info(adev->dev, " queue: %d\n", queue);
+ dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
+ RREG32(mmCP_PQ_WPTR_POLL_CNTL));
+ dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
+ dev_info(adev->dev, " CP_HQD_ACTIVE=0x%08X\n",
+ RREG32(mmCP_HQD_ACTIVE));
+ dev_info(adev->dev, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n",
+ RREG32(mmCP_HQD_DEQUEUE_REQUEST));
+ dev_info(adev->dev, " CP_HQD_PQ_RPTR=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_RPTR));
+ dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_WPTR));
+ dev_info(adev->dev, " CP_HQD_PQ_BASE=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_BASE));
+ dev_info(adev->dev, " CP_HQD_PQ_BASE_HI=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_BASE_HI));
+ dev_info(adev->dev, " CP_HQD_PQ_CONTROL=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_CONTROL));
+ dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR));
+ dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI));
+ dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR));
+ dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI));
+ dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
+ dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
+ RREG32(mmCP_HQD_PQ_WPTR));
+ dev_info(adev->dev, " CP_HQD_VMID=0x%08X\n",
+ RREG32(mmCP_HQD_VMID));
+ dev_info(adev->dev, " CP_MQD_BASE_ADDR=0x%08X\n",
+ RREG32(mmCP_MQD_BASE_ADDR));
+ dev_info(adev->dev, " CP_MQD_BASE_ADDR_HI=0x%08X\n",
+ RREG32(mmCP_MQD_BASE_ADDR_HI));
+ dev_info(adev->dev, " CP_MQD_CONTROL=0x%08X\n",
+ RREG32(mmCP_MQD_CONTROL));
+ }
+ }
+ cik_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+
+ dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
+ RREG32(mmCP_INT_CNTL_RING0));
+ dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
+ RREG32(mmRLC_LB_CNTL));
+ dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
+ RREG32(mmRLC_CNTL));
+ dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
+ RREG32(mmRLC_CGCG_CGLS_CTRL));
+ dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
+ RREG32(mmRLC_LB_CNTR_INIT));
+ dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
+ RREG32(mmRLC_LB_CNTR_MAX));
+ dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
+ RREG32(mmRLC_LB_INIT_CU_MASK));
+ dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
+ RREG32(mmRLC_LB_PARAMS));
+ dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
+ RREG32(mmRLC_LB_CNTL));
+ dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n",
+ RREG32(mmRLC_MC_CNTL));
+ dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
+ RREG32(mmRLC_UCODE_CNTL));
+
+ if (adev->asic_type == CHIP_BONAIRE)
+ dev_info(adev->dev, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n",
+ RREG32(mmRLC_DRIVER_CPDMA_STATUS));
+
+ mutex_lock(&adev->srbm_mutex);
+ for (i = 0; i < 16; i++) {
+ cik_srbm_select(adev, 0, 0, 0, i);
+ dev_info(adev->dev, " VM %d:\n", i);
+ dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
+ RREG32(mmSH_MEM_CONFIG));
+ dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n",
+ RREG32(mmSH_MEM_APE1_BASE));
+ dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n",
+ RREG32(mmSH_MEM_APE1_LIMIT));
+ dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
+ RREG32(mmSH_MEM_BASES));
+ }
+ cik_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static int gfx_v7_0_soft_reset(struct amdgpu_device *adev)
+{
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(mmGRBM_STATUS);
+ if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
+ GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
+ GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
+ GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
+ GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
+ GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
+ grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
+ GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
+
+ if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
+ grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
+ }
+
+ /* GRBM_STATUS2 */
+ tmp = RREG32(mmGRBM_STATUS2);
+ if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
+ grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(mmSRBM_STATUS);
+ if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
+
+ if (grbm_soft_reset || srbm_soft_reset) {
+ gfx_v7_0_print_status(adev);
+ /* disable CG/PG */
+ gfx_v7_0_fini_pg(adev);
+ gfx_v7_0_update_cg(adev, false);
+
+ /* stop the rlc */
+ gfx_v7_0_rlc_stop(adev);
+
+ /* Disable GFX parsing/prefetching */
+ WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
+
+ /* Disable MEC parsing/prefetching */
+ WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ }
+ /* Wait a little for things to settle down */
+ udelay(50);
+ gfx_v7_0_print_status(adev);
+ }
+ return 0;
+}
+
+static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
+ int me, int pipe,
+ enum amdgpu_interrupt_state state)
+{
+ u32 mec_int_cntl, mec_int_cntl_reg;
+
+ /*
+ * amdgpu controls only pipe 0 of MEC1. That's why this function only
+ * handles the setting of interrupts for this specific pipe. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+
+ if (me == 1) {
+ switch (pipe) {
+ case 0:
+ mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+ } else {
+ DRM_DEBUG("invalid me %d\n", me);
+ return;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mec_int_cntl_reg, mec_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mec_int_cntl_reg, mec_int_cntl);
+ break;
+ default:
+ break;
+ }
+}
+
+static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (type) {
+ case AMDGPU_CP_IRQ_GFX_EOP:
+ gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+ gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+ gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
+ gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
+ gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
+ gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
+ gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
+ gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
+ gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id;
+ struct amdgpu_ring *ring;
+ int i;
+
+ DRM_DEBUG("IH: CP EOP\n");
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ switch (me_id) {
+ case 0:
+ amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if ((ring->me == me_id) & (ring->pipe == pipe_id))
+ amdgpu_fence_process(ring);
+ }
+ break;
+ }
+ return 0;
+}
+
+static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal register access in command stream\n");
+ schedule_work(&adev->reset_work);
+ return 0;
+}
+
+static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal instruction in command stream\n");
+ // XXX soft reset the gfx block only
+ schedule_work(&adev->reset_work);
+ return 0;
+}
+
+static int gfx_v7_0_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ bool gate = false;
+
+ if (state == AMDGPU_CG_STATE_GATE)
+ gate = true;
+
+ gfx_v7_0_enable_gui_idle_interrupt(adev, false);
+ /* order matters! */
+ if (gate) {
+ gfx_v7_0_enable_mgcg(adev, true);
+ gfx_v7_0_enable_cgcg(adev, true);
+ } else {
+ gfx_v7_0_enable_cgcg(adev, false);
+ gfx_v7_0_enable_mgcg(adev, false);
+ }
+ gfx_v7_0_enable_gui_idle_interrupt(adev, true);
+
+ return 0;
+}
+
+static int gfx_v7_0_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ bool gate = false;
+
+ if (state == AMDGPU_PG_STATE_GATE)
+ gate = true;
+
+ if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
+ AMDGPU_PG_SUPPORT_GFX_SMG |
+ AMDGPU_PG_SUPPORT_GFX_DMG |
+ AMDGPU_PG_SUPPORT_CP |
+ AMDGPU_PG_SUPPORT_GDS |
+ AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v7_0_update_gfx_pg(adev, gate);
+ if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+ gfx_v7_0_enable_cp_pg(adev, gate);
+ gfx_v7_0_enable_gds_pg(adev, gate);
+ }
+ }
+
+ return 0;
+}
+
+const struct amdgpu_ip_funcs gfx_v7_0_ip_funcs = {
+ .early_init = gfx_v7_0_early_init,
+ .late_init = NULL,
+ .sw_init = gfx_v7_0_sw_init,
+ .sw_fini = gfx_v7_0_sw_fini,
+ .hw_init = gfx_v7_0_hw_init,
+ .hw_fini = gfx_v7_0_hw_fini,
+ .suspend = gfx_v7_0_suspend,
+ .resume = gfx_v7_0_resume,
+ .is_idle = gfx_v7_0_is_idle,
+ .wait_for_idle = gfx_v7_0_wait_for_idle,
+ .soft_reset = gfx_v7_0_soft_reset,
+ .print_status = gfx_v7_0_print_status,
+ .set_clockgating_state = gfx_v7_0_set_clockgating_state,
+ .set_powergating_state = gfx_v7_0_set_powergating_state,
+};
+
+/**
+ * gfx_v7_0_ring_is_lockup - check if the 3D engine is locked up
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Check if the 3D engine is locked up (CIK).
+ * Returns true if the engine is locked, false if not.
+ */
+static bool gfx_v7_0_ring_is_lockup(struct amdgpu_ring *ring)
+{
+ if (gfx_v7_0_is_idle(ring->adev)) {
+ amdgpu_ring_lockup_update(ring);
+ return false;
+ }
+ return amdgpu_ring_test_lockup(ring);
+}
+
+static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
+ .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
+ .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
+ .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
+ .parse_cs = NULL,
+ .emit_ib = gfx_v7_0_ring_emit_ib,
+ .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
+ .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
+ .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
+ .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
+ .test_ring = gfx_v7_0_ring_test_ring,
+ .test_ib = gfx_v7_0_ring_test_ib,
+ .is_lockup = gfx_v7_0_ring_is_lockup,
+};
+
+static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+ .get_rptr = gfx_v7_0_ring_get_rptr_compute,
+ .get_wptr = gfx_v7_0_ring_get_wptr_compute,
+ .set_wptr = gfx_v7_0_ring_set_wptr_compute,
+ .parse_cs = NULL,
+ .emit_ib = gfx_v7_0_ring_emit_ib,
+ .emit_fence = gfx_v7_0_ring_emit_fence_compute,
+ .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
+ .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
+ .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
+ .test_ring = gfx_v7_0_ring_test_ring,
+ .test_ib = gfx_v7_0_ring_test_ib,
+ .is_lockup = gfx_v7_0_ring_is_lockup,
+};
+
+static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
+ .set = gfx_v7_0_set_eop_interrupt_state,
+ .process = gfx_v7_0_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
+ .set = gfx_v7_0_set_priv_reg_fault_state,
+ .process = gfx_v7_0_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
+ .set = gfx_v7_0_set_priv_inst_fault_state,
+ .process = gfx_v7_0_priv_inst_irq,
+};
+
+static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+ adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
+
+ adev->gfx.priv_reg_irq.num_types = 1;
+ adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
+
+ adev->gfx.priv_inst_irq.num_types = 1;
+ adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
+}
+
+static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
+{
+ /* init asci gds info */
+ adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
+ adev->gds.gws.total_size = 64;
+ adev->gds.oa.total_size = 16;
+
+ if (adev->gds.mem.total_size == 64 * 1024) {
+ adev->gds.mem.gfx_partition_size = 4096;
+ adev->gds.mem.cs_partition_size = 4096;
+
+ adev->gds.gws.gfx_partition_size = 4;
+ adev->gds.gws.cs_partition_size = 4;
+
+ adev->gds.oa.gfx_partition_size = 4;
+ adev->gds.oa.cs_partition_size = 1;
+ } else {
+ adev->gds.mem.gfx_partition_size = 1024;
+ adev->gds.mem.cs_partition_size = 1024;
+
+ adev->gds.gws.gfx_partition_size = 16;
+ adev->gds.gws.cs_partition_size = 16;
+
+ adev->gds.oa.gfx_partition_size = 4;
+ adev->gds.oa.cs_partition_size = 4;
+ }
+}
+
+
+int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
+ struct amdgpu_cu_info *cu_info)
+{
+ int i, j, k, counter, active_cu_number = 0;
+ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+
+ if (!adev || !cu_info)
+ return -EINVAL;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ ao_bitmap = 0;
+ counter = 0;
+ bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j);
+ cu_info->bitmap[i][j] = bitmap;
+
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ if (bitmap & mask) {
+ if (counter < 2)
+ ao_bitmap |= mask;
+ counter ++;
+ }
+ mask <<= 1;
+ }
+ active_cu_number += counter;
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ }
+ }
+
+ cu_info->number = active_cu_number;
+ cu_info->ao_cu_mask = ao_cu_mask;
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
new file mode 100644
index 000000000000..668b91a89e1e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V7_0_H__
+#define __GFX_V7_0_H__
+
+extern const struct amdgpu_ip_funcs gfx_v7_0_ip_funcs;
+
+/* XXX these shouldn't be exported */
+void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev);
+void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
+void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
+uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
+void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
+int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
new file mode 100644
index 000000000000..b9b1df6bdaa4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -0,0 +1,1307 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "cikd.h"
+#include "cik.h"
+#include "gmc_v7_0.h"
+#include "amdgpu_ucode.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/boniare_mc.bin");
+MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+
+/**
+ * gmc8_mc_wait_for_idle - wait for MC idle callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Wait for the MC (memory controller) to be idle.
+ * (evergreen+).
+ * Returns 0 if the MC is idle, -1 if not.
+ */
+int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -1;
+}
+
+void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 blackout;
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_stop_mc_access(adev, save);
+
+ amdgpu_asic_wait_for_mc_idle(adev);
+
+ blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+ if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
+ /* Block CPU access */
+ WREG32(mmBIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout = REG_SET_FIELD(blackout,
+ MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
+ WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+ /* wait for the MC to settle */
+ udelay(100);
+}
+
+void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 tmp;
+
+ /* unblackout the MC */
+ tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
+ WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
+ /* allow CPU access */
+ tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
+ tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
+ WREG32(mmBIF_FB_EN, tmp);
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_resume_mc_access(adev, save);
+}
+
+/**
+ * gmc_v7_0_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ chip_name = "bonaire";
+ break;
+ case CHIP_HAWAII:
+ chip_name = "hawaii";
+ break;
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ return 0;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->mc.fw);
+
+out:
+ if (err) {
+ printk(KERN_ERR
+ "cik_mc: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
+ }
+ return err;
+}
+
+/**
+ * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Load the GDDR MC ucode into the hw (CIK).
+ * Returns 0 on success, error on failure.
+ */
+static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
+{
+ const struct mc_firmware_header_v1_0 *hdr;
+ const __le32 *fw_data = NULL;
+ const __le32 *io_mc_regs = NULL;
+ u32 running, blackout = 0;
+ int i, ucode_size, regs_size;
+
+ if (!adev->mc.fw)
+ return -EINVAL;
+
+ hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+ amdgpu_ucode_print_mc_hdr(&hdr->header);
+
+ adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ io_mc_regs = (const __le32 *)
+ (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ fw_data = (const __le32 *)
+ (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
+
+ if (running == 0) {
+ if (running) {
+ blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+ WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+
+ /* reset the engine and set to writable */
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
+
+ /* load mc io regs */
+ for (i = 0; i < regs_size; i++) {
+ WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
+ WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
+ }
+ /* load the MC ucode */
+ for (i = 0; i < ucode_size; i++)
+ WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
+
+ /* put the engine back into the active state */
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
+
+ /* wait for training to complete */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
+ MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
+ MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
+ break;
+ udelay(1);
+ }
+
+ if (running)
+ WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
+ }
+
+ return 0;
+}
+
+static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc)
+{
+ if (mc->mc_vram_size > 0xFFC0000000ULL) {
+ /* leave room for at least 1024M GTT */
+ dev_warn(adev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xFFC0000000ULL;
+ mc->mc_vram_size = 0xFFC0000000ULL;
+ }
+ amdgpu_vram_location(adev, &adev->mc, 0);
+ adev->mc.gtt_base_align = 0;
+ amdgpu_gtt_location(adev, mc);
+}
+
+/**
+ * gmc_v7_0_mc_program - program the GPU memory controller
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the location of vram, gart, and AGP in the GPU's
+ * physical address space (CIK).
+ */
+static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+ WREG32((0xb05 + j), 0x00000000);
+ WREG32((0xb06 + j), 0x00000000);
+ WREG32((0xb07 + j), 0x00000000);
+ WREG32((0xb08 + j), 0x00000000);
+ WREG32((0xb09 + j), 0x00000000);
+ }
+ WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_set_vga_render_state(adev, false);
+
+ gmc_v7_0_mc_stop(adev, &save);
+ if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Update configuration */
+ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->mc.vram_start >> 12);
+ WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->mc.vram_end >> 12);
+ WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+ adev->vram_scratch.gpu_addr >> 12);
+ tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(mmMC_VM_FB_LOCATION, tmp);
+ /* XXX double check these! */
+ WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+ WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+ WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ WREG32(mmMC_VM_AGP_BASE, 0);
+ WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
+ if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+ gmc_v7_0_mc_resume(adev, &save);
+
+ WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
+
+ tmp = RREG32(mmHDP_MISC_CNTL);
+ tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+ WREG32(mmHDP_MISC_CNTL, tmp);
+
+ tmp = RREG32(mmHDP_HOST_PATH_CNTL);
+ WREG32(mmHDP_HOST_PATH_CNTL, tmp);
+}
+
+/**
+ * gmc_v7_0_mc_init - initialize the memory controller driver params
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the amount of vram, vram width, and decide how to place
+ * vram and gart within the GPU's physical address space (CIK).
+ * Returns 0 for success.
+ */
+static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(mmMC_SHARED_CHMAP);
+ switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
+ /* size in MB on si */
+ adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.visible_vram_size = adev->mc.aper_size;
+
+ /* unless the user had overridden it, set the gart
+ * size equal to the 1024 or vram, whichever is larger.
+ */
+ if (amdgpu_gart_size == -1)
+ adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ else
+ adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+
+ gmc_v7_0_vram_gtt_location(adev, &adev->mc);
+
+ return 0;
+}
+
+/*
+ * GART
+ * VMID 0 is the physical GPU addresses as used by the kernel.
+ * VMIDs 1-15 are used for userspace clients and are handled
+ * by the amdgpu vm/hsa code.
+ */
+
+/**
+ * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
+ *
+ * @adev: amdgpu_device pointer
+ * @vmid: vm instance to flush
+ *
+ * Flush the TLB for the requested page table (CIK).
+ */
+static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+ uint32_t vmid)
+{
+ /* flush hdp cache */
+ WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+
+ /* bits 0-15 are the VM contexts0-15 */
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+/**
+ * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
+ *
+ * @adev: amdgpu_device pointer
+ * @cpu_pt_addr: cpu address of the page table
+ * @gpu_page_idx: entry in the page table to update
+ * @addr: dst addr to write into pte/pde
+ * @flags: access flags
+ *
+ * Update the page tables using the CPU.
+ */
+static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
+ void *cpu_pt_addr,
+ uint32_t gpu_page_idx,
+ uint64_t addr,
+ uint32_t flags)
+{
+ void __iomem *ptr = (void *)cpu_pt_addr;
+ uint64_t value;
+
+ value = addr & 0xFFFFFFFFFFFFF000ULL;
+ value |= flags;
+ writeq(value, ptr + (gpu_page_idx * 8));
+
+ return 0;
+}
+
+/**
+ * gmc_v7_0_gart_enable - gart enable
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This sets up the TLBs, programs the page tables for VMID0,
+ * sets up the hw for VMIDs 1-15 which are allocated on
+ * demand, and sets up the global locations for the LDS, GDS,
+ * and GPUVM for FSA64 clients (CIK).
+ * Returns 0 for success, errors for failure.
+ */
+static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+{
+ int r, i;
+ u32 tmp;
+
+ if (adev->gart.robj == NULL) {
+ dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = amdgpu_gart_table_vram_pin(adev);
+ if (r)
+ return r;
+ /* Setup TLB control */
+ tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
+ /* Setup L2 cache */
+ tmp = RREG32(mmVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ WREG32(mmVM_L2_CNTL, tmp);
+ tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32(mmVM_L2_CNTL2, tmp);
+ tmp = RREG32(mmVM_L2_CNTL3);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
+ WREG32(mmVM_L2_CNTL3, tmp);
+ /* setup context0 */
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+ WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(adev->dummy_page.addr >> 12));
+ WREG32(mmVM_CONTEXT0_CNTL2, 0);
+ tmp = RREG32(mmVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ WREG32(mmVM_CONTEXT0_CNTL, tmp);
+
+ WREG32(0x575, 0);
+ WREG32(0x576, 0);
+ WREG32(0x577, 0);
+
+ /* empty context1-15 */
+ /* FIXME start with 4G, once using 2 level pt switch to full
+ * vm size space
+ */
+ /* set vm size, must be a multiple of 4 */
+ WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+ WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn);
+ for (i = 1; i < 16; i++) {
+ if (i < 8)
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+ adev->gart.table_addr >> 12);
+ else
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+ adev->gart.table_addr >> 12);
+ }
+
+ /* enable context1-15 */
+ WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(adev->dummy_page.addr >> 12));
+ WREG32(mmVM_CONTEXT1_CNTL2, 4);
+ tmp = RREG32(mmVM_CONTEXT1_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ amdgpu_vm_block_size - 9);
+ WREG32(mmVM_CONTEXT1_CNTL, tmp);
+
+ if (adev->asic_type == CHIP_KAVERI) {
+ tmp = RREG32(mmCHUB_CONTROL);
+ tmp &= ~BYPASS_VM;
+ WREG32(mmCHUB_CONTROL, tmp);
+ }
+
+ gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned long long)adev->gart.table_addr);
+ adev->gart.ready = true;
+ return 0;
+}
+
+static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->gart.robj) {
+ WARN(1, "R600 PCIE GART already initialized\n");
+ return 0;
+ }
+ /* Initialize common gart structure */
+ r = amdgpu_gart_init(adev);
+ if (r)
+ return r;
+ adev->gart.table_size = adev->gart.num_gpu_pages * 8;
+ return amdgpu_gart_table_vram_alloc(adev);
+}
+
+/**
+ * gmc_v7_0_gart_disable - gart disable
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This disables all VM page table (CIK).
+ */
+static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ /* Disable all tables */
+ WREG32(mmVM_CONTEXT0_CNTL, 0);
+ WREG32(mmVM_CONTEXT1_CNTL, 0);
+ /* Setup TLB control */
+ tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
+ /* Setup L2 cache */
+ tmp = RREG32(mmVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32(mmVM_L2_CNTL, tmp);
+ WREG32(mmVM_L2_CNTL2, 0);
+ amdgpu_gart_table_vram_unpin(adev);
+}
+
+/**
+ * gmc_v7_0_gart_fini - vm fini callback
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tears down the driver GART/VM setup (CIK).
+ */
+static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
+{
+ amdgpu_gart_table_vram_free(adev);
+ amdgpu_gart_fini(adev);
+}
+
+/*
+ * vm
+ * VMID 0 is the physical GPU addresses as used by the kernel.
+ * VMIDs 1-15 are used for userspace clients and are handled
+ * by the amdgpu vm/hsa code.
+ */
+/**
+ * gmc_v7_0_vm_init - cik vm init callback
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Inits cik specific vm parameters (number of VMs, base of vram for
+ * VMIDs 1-15) (CIK).
+ * Returns 0 for success.
+ */
+static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
+{
+ /*
+ * number of VMs
+ * VMID 0 is reserved for System
+ * amdgpu graphics/compute will use VMIDs 1-7
+ * amdkfd will use VMIDs 8-15
+ */
+ adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
+
+ /* base offset of vram pages */
+ if (adev->flags & AMDGPU_IS_APU) {
+ u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
+ tmp <<= 22;
+ adev->vm_manager.vram_base_offset = tmp;
+ } else
+ adev->vm_manager.vram_base_offset = 0;
+
+ return 0;
+}
+
+/**
+ * gmc_v7_0_vm_fini - cik vm fini callback
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down any asic specific VM setup (CIK).
+ */
+static void gmc_v7_0_vm_fini(struct amdgpu_device *adev)
+{
+}
+
+/**
+ * gmc_v7_0_vm_decode_fault - print human readable fault info
+ *
+ * @adev: amdgpu_device pointer
+ * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
+ * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ *
+ * Print human readable fault information (CIK).
+ */
+static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
+ u32 status, u32 addr, u32 mc_client)
+{
+ u32 mc_id;
+ u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+ u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
+ char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+ (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+
+ mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
+
+ printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ protections, vmid, addr,
+ REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_RW) ?
+ "write" : "read", block, mc_client, mc_id);
+}
+
+
+static const u32 mc_cg_registers[] = {
+ mmMC_HUB_MISC_HUB_CG,
+ mmMC_HUB_MISC_SIP_CG,
+ mmMC_HUB_MISC_VM_CG,
+ mmMC_XPB_CLK_GAT,
+ mmATC_MISC_CG,
+ mmMC_CITF_MISC_WR_CG,
+ mmMC_CITF_MISC_RD_CG,
+ mmMC_CITF_MISC_VM_CG,
+ mmVM_L2_CG,
+};
+
+static const u32 mc_cg_ls_en[] = {
+ MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
+ MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
+ MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+ MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
+ ATC_MISC_CG__MEM_LS_ENABLE_MASK,
+ MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
+ MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
+ MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+ VM_L2_CG__MEM_LS_ENABLE_MASK,
+};
+
+static const u32 mc_cg_en[] = {
+ MC_HUB_MISC_HUB_CG__ENABLE_MASK,
+ MC_HUB_MISC_SIP_CG__ENABLE_MASK,
+ MC_HUB_MISC_VM_CG__ENABLE_MASK,
+ MC_XPB_CLK_GAT__ENABLE_MASK,
+ ATC_MISC_CG__ENABLE_MASK,
+ MC_CITF_MISC_WR_CG__ENABLE_MASK,
+ MC_CITF_MISC_RD_CG__ENABLE_MASK,
+ MC_CITF_MISC_VM_CG__ENABLE_MASK,
+ VM_L2_CG__ENABLE_MASK,
+};
+
+static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ data |= mc_cg_ls_en[i];
+ else
+ data &= ~mc_cg_ls_en[i];
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ data |= mc_cg_en[i];
+ else
+ data &= ~mc_cg_en[i];
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
+ } else {
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
+ }
+
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_CNTL2, data);
+}
+
+static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
+ else
+ data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
+
+ if (orig != data)
+ WREG32(mmHDP_HOST_PATH_CNTL, data);
+}
+
+static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(mmHDP_MEM_POWER_LS);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
+ else
+ data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
+
+ if (orig != data)
+ WREG32(mmHDP_MEM_POWER_LS, data);
+}
+
+static int gmc_v7_0_early_init(struct amdgpu_device *adev)
+{
+ gmc_v7_0_set_gart_funcs(adev);
+ gmc_v7_0_set_irq_funcs(adev);
+
+ if (adev->flags & AMDGPU_IS_APU) {
+ adev->mc.is_gddr5 = false;
+ } else {
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
+
+ if (((tmp & MC_SEQ_MISC0__GDDR5_MASK) >>
+ MC_SEQ_MISC0__GDDR5__SHIFT) == MC_SEQ_MISC0__GDDR5_VALUE)
+ adev->mc.is_gddr5 = true;
+ else
+ adev->mc.is_gddr5 = false;
+ }
+
+ return 0;
+}
+
+static int gmc_v7_0_sw_init(struct amdgpu_device *adev)
+{
+ int r;
+ int dma_bits;
+
+ r = amdgpu_gem_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+ if (r)
+ return r;
+
+ /* Adjust VM size here.
+ * Currently set to 4GB ((1 << 20) 4k pages).
+ * Max GPUVM size for cayman and SI is 40 bits.
+ */
+ adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+
+ /* Set the internal MC address mask
+ * This is the max address of the GPU's
+ * internal address space.
+ */
+ adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+
+ /* set DMA mask + need_dma32 flags.
+ * PCIE - can handle 40-bits.
+ * IGP - can handle 40-bits
+ * PCI - dma32 for legacy pci gart, 40 bits on newer asics
+ */
+ adev->need_dma32 = false;
+ dma_bits = adev->need_dma32 ? 32 : 40;
+ r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ adev->need_dma32 = true;
+ dma_bits = 32;
+ printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+ }
+ r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
+ printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
+ }
+
+ r = gmc_v7_0_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load mc firmware!\n");
+ return r;
+ }
+
+ r = gmc_v7_0_mc_init(adev);
+ if (r)
+ return r;
+
+ /* Memory manager */
+ r = amdgpu_bo_init(adev);
+ if (r)
+ return r;
+
+ r = gmc_v7_0_gart_init(adev);
+ if (r)
+ return r;
+
+ if (!adev->vm_manager.enabled) {
+ r = gmc_v7_0_vm_init(adev);
+ if (r) {
+ dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+ return r;
+ }
+ adev->vm_manager.enabled = true;
+ }
+
+ return r;
+}
+
+static int gmc_v7_0_sw_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->vm_manager.enabled) {
+ for (i = 0; i < AMDGPU_NUM_VM; ++i)
+ amdgpu_fence_unref(&adev->vm_manager.active[i]);
+ gmc_v7_0_vm_fini(adev);
+ adev->vm_manager.enabled = false;
+ }
+ gmc_v7_0_gart_fini(adev);
+ amdgpu_gem_fini(adev);
+ amdgpu_bo_fini(adev);
+
+ return 0;
+}
+
+static int gmc_v7_0_hw_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ gmc_v7_0_mc_program(adev);
+
+ if (!(adev->flags & AMDGPU_IS_APU)) {
+ r = gmc_v7_0_mc_load_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+ return r;
+ }
+ }
+
+ r = gmc_v7_0_gart_enable(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int gmc_v7_0_hw_fini(struct amdgpu_device *adev)
+{
+ gmc_v7_0_gart_disable(adev);
+
+ return 0;
+}
+
+static int gmc_v7_0_suspend(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->vm_manager.enabled) {
+ for (i = 0; i < AMDGPU_NUM_VM; ++i)
+ amdgpu_fence_unref(&adev->vm_manager.active[i]);
+ gmc_v7_0_vm_fini(adev);
+ adev->vm_manager.enabled = false;
+ }
+ gmc_v7_0_hw_fini(adev);
+
+ return 0;
+}
+
+static int gmc_v7_0_resume(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = gmc_v7_0_hw_init(adev);
+ if (r)
+ return r;
+
+ if (!adev->vm_manager.enabled) {
+ r = gmc_v7_0_vm_init(adev);
+ if (r) {
+ dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+ return r;
+ }
+ adev->vm_manager.enabled = true;
+ }
+
+ return r;
+}
+
+static bool gmc_v7_0_is_idle(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(mmSRBM_STATUS);
+
+ if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
+ return false;
+
+ return true;
+}
+
+static int gmc_v7_0_wait_for_idle(struct amdgpu_device *adev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+ SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK |
+ SRBM_STATUS__MCD_BUSY_MASK |
+ SRBM_STATUS__VMC_BUSY_MASK);
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+
+}
+
+static void gmc_v7_0_print_status(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ dev_info(adev->dev, "GMC 8.x registers\n");
+ dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(mmSRBM_STATUS));
+ dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
+ RREG32(mmSRBM_STATUS2));
+
+ dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
+ RREG32(mmMC_VM_MX_L1_TLB_CNTL));
+ dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
+ RREG32(mmVM_L2_CNTL));
+ dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
+ RREG32(mmVM_L2_CNTL2));
+ dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
+ RREG32(mmVM_L2_CNTL3));
+ dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
+ RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
+ dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
+ RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
+ dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
+ RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
+ dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
+ RREG32(mmVM_CONTEXT0_CNTL2));
+ dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
+ RREG32(mmVM_CONTEXT0_CNTL));
+ dev_info(adev->dev, " 0x15D4=0x%08X\n",
+ RREG32(0x575));
+ dev_info(adev->dev, " 0x15D8=0x%08X\n",
+ RREG32(0x576));
+ dev_info(adev->dev, " 0x15DC=0x%08X\n",
+ RREG32(0x577));
+ dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
+ RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
+ dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
+ RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
+ dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
+ RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
+ dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
+ RREG32(mmVM_CONTEXT1_CNTL2));
+ dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
+ RREG32(mmVM_CONTEXT1_CNTL));
+ for (i = 0; i < 16; i++) {
+ if (i < 8)
+ dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
+ i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
+ else
+ dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
+ i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
+ }
+ dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
+ RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
+ dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
+ RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
+ dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
+ RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
+ dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
+ RREG32(mmMC_VM_FB_LOCATION));
+ dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
+ RREG32(mmMC_VM_AGP_BASE));
+ dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
+ RREG32(mmMC_VM_AGP_TOP));
+ dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
+ RREG32(mmMC_VM_AGP_BOT));
+
+ if (adev->asic_type == CHIP_KAVERI) {
+ dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n",
+ RREG32(mmCHUB_CONTROL));
+ }
+
+ dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
+ RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
+ dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
+ RREG32(mmHDP_NONSURFACE_BASE));
+ dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
+ RREG32(mmHDP_NONSURFACE_INFO));
+ dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
+ RREG32(mmHDP_NONSURFACE_SIZE));
+ dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
+ RREG32(mmHDP_MISC_CNTL));
+ dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
+ RREG32(mmHDP_HOST_PATH_CNTL));
+
+ for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+ dev_info(adev->dev, " %d:\n", i);
+ dev_info(adev->dev, " 0x%04X=0x%08X\n",
+ 0xb05 + j, RREG32(0xb05 + j));
+ dev_info(adev->dev, " 0x%04X=0x%08X\n",
+ 0xb06 + j, RREG32(0xb06 + j));
+ dev_info(adev->dev, " 0x%04X=0x%08X\n",
+ 0xb07 + j, RREG32(0xb07 + j));
+ dev_info(adev->dev, " 0x%04X=0x%08X\n",
+ 0xb08 + j, RREG32(0xb08 + j));
+ dev_info(adev->dev, " 0x%04X=0x%08X\n",
+ 0xb09 + j, RREG32(0xb09 + j));
+ }
+
+ dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
+ RREG32(mmBIF_FB_EN));
+}
+
+static int gmc_v7_0_soft_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_mc_save save;
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(mmSRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
+
+ if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
+ if (!(adev->flags & AMDGPU_IS_APU))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
+ }
+
+ if (srbm_soft_reset) {
+ gmc_v7_0_print_status(adev);
+
+ gmc_v7_0_mc_stop(adev, &save);
+ if (gmc_v7_0_wait_for_idle(adev)) {
+ dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+ }
+
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ gmc_v7_0_mc_resume(adev, &save);
+ udelay(50);
+
+ gmc_v7_0_print_status(adev);
+ }
+
+ return 0;
+}
+
+static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp;
+ u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ /* system context */
+ tmp = RREG32(mmVM_CONTEXT0_CNTL);
+ tmp &= ~bits;
+ WREG32(mmVM_CONTEXT0_CNTL, tmp);
+ /* VMs */
+ tmp = RREG32(mmVM_CONTEXT1_CNTL);
+ tmp &= ~bits;
+ WREG32(mmVM_CONTEXT1_CNTL, tmp);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ /* system context */
+ tmp = RREG32(mmVM_CONTEXT0_CNTL);
+ tmp |= bits;
+ WREG32(mmVM_CONTEXT0_CNTL, tmp);
+ /* VMs */
+ tmp = RREG32(mmVM_CONTEXT1_CNTL);
+ tmp |= bits;
+ WREG32(mmVM_CONTEXT1_CNTL, tmp);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 addr, status, mc_client;
+
+ addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
+ status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ status);
+ gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
+ /* reset addr and status */
+ WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+ return 0;
+}
+
+static int gmc_v7_0_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ bool gate = false;
+
+ if (state == AMDGPU_CG_STATE_GATE)
+ gate = true;
+
+ if (!(adev->flags & AMDGPU_IS_APU)) {
+ gmc_v7_0_enable_mc_mgcg(adev, gate);
+ gmc_v7_0_enable_mc_ls(adev, gate);
+ }
+ gmc_v7_0_enable_bif_mgls(adev, gate);
+ gmc_v7_0_enable_hdp_mgcg(adev, gate);
+ gmc_v7_0_enable_hdp_ls(adev, gate);
+
+ return 0;
+}
+
+static int gmc_v7_0_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ return 0;
+}
+
+const struct amdgpu_ip_funcs gmc_v7_0_ip_funcs = {
+ .early_init = gmc_v7_0_early_init,
+ .late_init = NULL,
+ .sw_init = gmc_v7_0_sw_init,
+ .sw_fini = gmc_v7_0_sw_fini,
+ .hw_init = gmc_v7_0_hw_init,
+ .hw_fini = gmc_v7_0_hw_fini,
+ .suspend = gmc_v7_0_suspend,
+ .resume = gmc_v7_0_resume,
+ .is_idle = gmc_v7_0_is_idle,
+ .wait_for_idle = gmc_v7_0_wait_for_idle,
+ .soft_reset = gmc_v7_0_soft_reset,
+ .print_status = gmc_v7_0_print_status,
+ .set_clockgating_state = gmc_v7_0_set_clockgating_state,
+ .set_powergating_state = gmc_v7_0_set_powergating_state,
+};
+
+static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
+ .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
+ .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
+};
+
+static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
+ .set = gmc_v7_0_vm_fault_interrupt_state,
+ .process = gmc_v7_0_process_interrupt,
+};
+
+static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
+{
+ if (adev->gart.gart_funcs == NULL)
+ adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
+}
+
+static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->mc.vm_fault.num_types = 1;
+ adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
new file mode 100644
index 000000000000..ab1a2fa1afcd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GMC_V7_0_H__
+#define __GMC_V7_0_H__
+
+extern const struct amdgpu_ip_funcs gmc_v7_0_ip_funcs;
+
+/* XXX these shouldn't be exported */
+void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save);
+void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save);
+int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
new file mode 100644
index 000000000000..cd902419e6a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -0,0 +1,3336 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "cikd.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_dpm.h"
+#include "kv_dpm.h"
+#include "gfx_v7_0.h"
+#include <linux/seq_file.h>
+
+#include "smu/smu_7_0_0_d.h"
+#include "smu/smu_7_0_0_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define KV_MINIMUM_ENGINE_CLOCK 800
+#define SMC_RAM_END 0x40000
+
+static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev);
+static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
+static int kv_enable_nb_dpm(struct amdgpu_device *adev,
+ bool enable);
+static void kv_init_graphics_levels(struct amdgpu_device *adev);
+static int kv_calculate_ds_divider(struct amdgpu_device *adev);
+static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
+static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
+static void kv_enable_new_levels(struct amdgpu_device *adev);
+static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_rps);
+static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
+static int kv_set_enabled_levels(struct amdgpu_device *adev);
+static int kv_force_dpm_highest(struct amdgpu_device *adev);
+static int kv_force_dpm_lowest(struct amdgpu_device *adev);
+static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_rps,
+ struct amdgpu_ps *old_rps);
+static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
+ int min_temp, int max_temp);
+static int kv_init_fps_limits(struct amdgpu_device *adev);
+
+static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
+static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
+static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
+static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
+
+
+static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_2bit)
+{
+ struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 i;
+
+ if (vddc_sclk_table && vddc_sclk_table->count) {
+ if (vid_2bit < vddc_sclk_table->count)
+ return vddc_sclk_table->entries[vid_2bit].v;
+ else
+ return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
+ } else {
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
+ return vid_mapping_table->entries[i].vid_7bit;
+ }
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
+ }
+}
+
+static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_7bit)
+{
+ struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 i;
+
+ if (vddc_sclk_table && vddc_sclk_table->count) {
+ for (i = 0; i < vddc_sclk_table->count; i++) {
+ if (vddc_sclk_table->entries[i].v == vid_7bit)
+ return i;
+ }
+ return vddc_sclk_table->count - 1;
+ } else {
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+ return vid_mapping_table->entries[i].vid_2bit;
+ }
+
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+ }
+}
+
+static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
+{
+/* This bit selects who handles display phy powergating.
+ * Clear the bit to let atom handle it.
+ * Set it to let the driver handle it.
+ * For now we just let atom handle it.
+ */
+#if 0
+ u32 v = RREG32(mmDOUT_SCRATCH3);
+
+ if (enable)
+ v |= 0x4;
+ else
+ v &= 0xFFFFFFFB;
+
+ WREG32(mmDOUT_SCRATCH3, v);
+#endif
+}
+
+static u32 sumo_get_sleep_divider_from_id(u32 id)
+{
+ return 1 << id;
+}
+
+static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
+ struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
+ ATOM_AVAILABLE_SCLK_LIST *table)
+{
+ u32 i;
+ u32 n = 0;
+ u32 prev_sclk = 0;
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ if (table[i].ulSupportedSCLK > prev_sclk) {
+ sclk_voltage_mapping_table->entries[n].sclk_frequency =
+ table[i].ulSupportedSCLK;
+ sclk_voltage_mapping_table->entries[n].vid_2bit =
+ table[i].usVoltageIndex;
+ prev_sclk = table[i].ulSupportedSCLK;
+ n++;
+ }
+ }
+
+ sclk_voltage_mapping_table->num_max_dpm_entries = n;
+}
+
+static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ ATOM_AVAILABLE_SCLK_LIST *table)
+{
+ u32 i, j;
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ if (table[i].ulSupportedSCLK != 0) {
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
+ table[i].usVoltageID;
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+ table[i].usVoltageIndex;
+ }
+ }
+
+ for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
+ if (vid_mapping_table->entries[i].vid_7bit == 0) {
+ for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
+ if (vid_mapping_table->entries[j].vid_7bit != 0) {
+ vid_mapping_table->entries[i] =
+ vid_mapping_table->entries[j];
+ vid_mapping_table->entries[j].vid_7bit = 0;
+ break;
+ }
+ }
+
+ if (j == SUMO_MAX_NUMBER_VOLTAGES)
+ break;
+ }
+ }
+
+ vid_mapping_table->num_entries = i;
+}
+
+static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 2 },
+ { 4, 1, 1 },
+ { 5, 5, 2 },
+ { 6, 6, 1 },
+ { 7, 9, 2 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 1 },
+ { 4, 1, 1 },
+ { 5, 5, 1 },
+ { 6, 6, 1 },
+ { 7, 9, 1 },
+ { 8, 4, 1 },
+ { 9, 2, 1 },
+ { 10, 3, 1 },
+ { 11, 6, 1 },
+ { 12, 8, 2 },
+ { 13, 1, 1 },
+ { 14, 2, 1 },
+ { 15, 3, 1 },
+ { 16, 1, 1 },
+ { 17, 4, 1 },
+ { 18, 3, 1 },
+ { 19, 1, 1 },
+ { 20, 8, 1 },
+ { 21, 5, 1 },
+ { 22, 1, 1 },
+ { 23, 1, 1 },
+ { 24, 4, 1 },
+ { 27, 6, 1 },
+ { 28, 1, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
+{
+ { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
+{
+ { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
+{
+ { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
+{
+ { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
+{
+ { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
+{
+ { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_pt_config_reg didt_config_kv[] =
+{
+ { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
+{
+ struct kv_ps *ps = rps->ps_priv;
+
+ return ps;
+}
+
+static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = adev->pm.dpm.priv;
+
+ return pi;
+}
+
+#if 0
+static void kv_program_local_cac_table(struct amdgpu_device *adev,
+ const struct kv_lcac_config_values *local_cac_table,
+ const struct kv_lcac_config_reg *local_cac_reg)
+{
+ u32 i, count, data;
+ const struct kv_lcac_config_values *values = local_cac_table;
+
+ while (values->block_id != 0xffffffff) {
+ count = values->signal_id;
+ for (i = 0; i < count; i++) {
+ data = ((values->block_id << local_cac_reg->block_shift) &
+ local_cac_reg->block_mask);
+ data |= ((i << local_cac_reg->signal_shift) &
+ local_cac_reg->signal_mask);
+ data |= ((values->t << local_cac_reg->t_shift) &
+ local_cac_reg->t_mask);
+ data |= ((1 << local_cac_reg->enable_shift) &
+ local_cac_reg->enable_mask);
+ WREG32_SMC(local_cac_reg->cntl, data);
+ }
+ values++;
+ }
+}
+#endif
+
+static int kv_program_pt_config_registers(struct amdgpu_device *adev,
+ const struct kv_pt_config_reg *cac_config_regs)
+{
+ const struct kv_pt_config_reg *config_regs = cac_config_regs;
+ u32 data;
+ u32 cache = 0;
+
+ if (config_regs == NULL)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == KV_CONFIGREG_CACHE) {
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ } else {
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ data = RREG32_SMC(config_regs->offset);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ data = RREG32_DIDT(config_regs->offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+ cache = 0;
+
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ WREG32_SMC(config_regs->offset, data);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ WREG32_DIDT(config_regs->offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset, data);
+ break;
+ }
+ }
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 data;
+
+ if (pi->caps_sq_ramping) {
+ data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
+ if (enable)
+ data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
+ }
+
+ if (pi->caps_db_ramping) {
+ data = RREG32_DIDT(ixDIDT_DB_CTRL0);
+ if (enable)
+ data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_DB_CTRL0, data);
+ }
+
+ if (pi->caps_td_ramping) {
+ data = RREG32_DIDT(ixDIDT_TD_CTRL0);
+ if (enable)
+ data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_TD_CTRL0, data);
+ }
+
+ if (pi->caps_tcp_ramping) {
+ data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
+ if (enable)
+ data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ else
+ data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
+ }
+}
+
+static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ if (pi->caps_sq_ramping ||
+ pi->caps_db_ramping ||
+ pi->caps_td_ramping ||
+ pi->caps_tcp_ramping) {
+ gfx_v7_0_enter_rlc_safe_mode(adev);
+
+ if (enable) {
+ ret = kv_program_pt_config_registers(adev, didt_config_kv);
+ if (ret) {
+ gfx_v7_0_exit_rlc_safe_mode(adev);
+ return ret;
+ }
+ }
+
+ kv_do_enable_didt(adev, enable);
+
+ gfx_v7_0_exit_rlc_safe_mode(adev);
+ }
+
+ return 0;
+}
+
+#if 0
+static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ if (pi->caps_cac) {
+ WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
+
+ WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
+ WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
+ kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
+ }
+}
+#endif
+
+static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret = 0;
+
+ if (pi->caps_cac) {
+ if (enable) {
+ ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
+ if (ret)
+ pi->cac_enabled = false;
+ else
+ pi->cac_enabled = true;
+ } else if (pi->cac_enabled) {
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
+ pi->cac_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int kv_process_firmware_header(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->dpm_table_start = tmp;
+
+ ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->soft_regs_start = tmp;
+
+ return ret;
+}
+
+static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ pi->graphics_voltage_change_enable = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
+ &pi->graphics_voltage_change_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_interval(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ pi->graphics_interval = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
+ &pi->graphics_interval,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
+ &pi->graphics_boot_level,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static void kv_program_vc(struct amdgpu_device *adev)
+{
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
+}
+
+static void kv_clear_vc(struct amdgpu_device *adev)
+{
+ WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
+}
+
+static int kv_set_divider_value(struct amdgpu_device *adev,
+ u32 index, u32 sclk)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct atom_clock_dividers dividers;
+ int ret;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ sclk, false, &dividers);
+ if (ret)
+ return ret;
+
+ pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
+ pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
+
+ return 0;
+}
+
+static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
+ u16 voltage)
+{
+ return 6200 - (voltage * 25);
+}
+
+static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
+ u32 vid_2bit)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
+ &pi->sys_info.vid_mapping_table,
+ vid_2bit);
+
+ return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
+}
+
+
+static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
+ pi->graphics_level[index].MinVddNb =
+ cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
+
+ return 0;
+}
+
+static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->graphics_level[index].AT = cpu_to_be16((u16)at);
+
+ return 0;
+}
+
+static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
+ u32 index, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
+}
+
+static void kv_start_dpm(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+
+ tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
+ WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+
+ amdgpu_kv_smc_dpm_enable(adev, true);
+}
+
+static void kv_stop_dpm(struct amdgpu_device *adev)
+{
+ amdgpu_kv_smc_dpm_enable(adev, false);
+}
+
+static void kv_start_am(struct amdgpu_device *adev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
+ SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
+ sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
+
+ WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static void kv_reset_am(struct amdgpu_device *adev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
+ SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
+
+ WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
+{
+ return amdgpu_kv_notify_message_to_smu(adev, freeze ?
+ PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+}
+
+static int kv_force_lowest_valid(struct amdgpu_device *adev)
+{
+ return kv_force_dpm_lowest(adev);
+}
+
+static int kv_unforce_levels(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
+ return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
+ else
+ return kv_set_enabled_levels(adev);
+}
+
+static int kv_update_sclk_t(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 low_sclk_interrupt_t = 0;
+ int ret = 0;
+
+ if (pi->caps_sclk_throttle_low_notification) {
+ low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
+ (u8 *)&low_sclk_interrupt_t,
+ sizeof(u32), pi->sram_end);
+ }
+ return ret;
+}
+
+static int kv_program_bootup_state(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i;
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].clk == pi->boot_pl.sclk)
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(adev, i, true);
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ if (table->num_max_dpm_entries == 0)
+ return -EINVAL;
+
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(adev, i, true);
+ }
+ return 0;
+}
+
+static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ pi->graphics_therm_throttle_enable = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
+ &pi->graphics_therm_throttle_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_upload_dpm_settings(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
+ (u8 *)&pi->graphics_level,
+ sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
+ pi->sram_end);
+
+ if (ret)
+ return ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
+ &pi->graphics_dpm_level_count,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static u32 kv_get_clock_difference(u32 a, u32 b)
+{
+ return (a >= b) ? a - b : b - a;
+}
+
+static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 value;
+
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(clk, 40000) < 200)
+ value = 3;
+ else if (kv_get_clock_difference(clk, 30000) < 200)
+ value = 2;
+ else if (kv_get_clock_difference(clk, 20000) < 200)
+ value = 7;
+ else if (kv_get_clock_difference(clk, 15000) < 200)
+ value = 6;
+ else if (kv_get_clock_difference(clk, 10000) < 200)
+ value = 8;
+ else
+ value = 0;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static int kv_populate_uvd_table(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_uvd_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->uvd_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t < table->entries[i].v))
+ break;
+
+ pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
+ pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
+ pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
+
+ pi->uvd_level[i].VClkBypassCntl =
+ (u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
+ pi->uvd_level[i].DClkBypassCntl =
+ (u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].vclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].dclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
+
+ pi->uvd_level_count++;
+ }
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
+ (u8 *)&pi->uvd_level_count,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->uvd_interval = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UVDInterval),
+ &pi->uvd_interval,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdLevel),
+ (u8 *)&pi->uvd_level,
+ sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
+ pi->sram_end);
+
+ return ret;
+
+}
+
+static int kv_populate_vce_table(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+ u32 i;
+ struct amdgpu_vce_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->vce_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t < table->entries[i].v)
+ break;
+
+ pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
+ pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ pi->vce_level[i].ClkBypassCntl =
+ (u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].evclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->vce_level[i].Divider = (u8)dividers.post_div;
+
+ pi->vce_level_count++;
+ }
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
+ (u8 *)&pi->vce_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->vce_interval = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VCEInterval),
+ (u8 *)&pi->vce_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceLevel),
+ (u8 *)&pi->vce_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
+ pi->sram_end);
+
+ return ret;
+}
+
+static int kv_populate_samu_table(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->samu_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t < table->entries[i].v)
+ break;
+
+ pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+ pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ pi->samu_level[i].ClkBypassCntl =
+ (u8)kv_get_clk_bypass(adev, table->entries[i].clk);
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].clk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->samu_level[i].Divider = (u8)dividers.post_div;
+
+ pi->samu_level_count++;
+ }
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
+ (u8 *)&pi->samu_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->samu_interval = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
+ (u8 *)&pi->samu_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuLevel),
+ (u8 *)&pi->samu_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+static int kv_populate_acp_table(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->acp_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+ pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].clk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->acp_level[i].Divider = (u8)dividers.post_div;
+
+ pi->acp_level_count++;
+ }
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
+ (u8 *)&pi->acp_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->acp_interval = 1;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, ACPInterval),
+ (u8 *)&pi->acp_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpLevel),
+ (u8 *)&pi->acp_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i;
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 3;
+ else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 2;
+ else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 7;
+ else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 6;
+ else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 8;
+ else
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ } else {
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 3;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 2;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 7;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 6;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 8;
+ else
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ } else {
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ }
+ }
+ }
+}
+
+static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)
+{
+ return amdgpu_kv_notify_message_to_smu(adev, enable ?
+ PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
+}
+
+static void kv_reset_acp_boot_level(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->acp_boot_level = 0xff;
+}
+
+static void kv_update_current_ps(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(rps);
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->current_rps = *rps;
+ pi->current_ps = *new_ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void kv_update_requested_ps(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(rps);
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->requested_rps = *rps;
+ pi->requested_ps = *new_ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ if (pi->bapm_enable) {
+ ret = amdgpu_kv_smc_bapm_enable(adev, enable);
+ if (ret)
+ DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ }
+}
+
+static int kv_dpm_enable(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ ret = kv_process_firmware_header(adev);
+ if (ret) {
+ DRM_ERROR("kv_process_firmware_header failed\n");
+ return ret;
+ }
+ kv_init_fps_limits(adev);
+ kv_init_graphics_levels(adev);
+ ret = kv_program_bootup_state(adev);
+ if (ret) {
+ DRM_ERROR("kv_program_bootup_state failed\n");
+ return ret;
+ }
+ kv_calculate_dfs_bypass_settings(adev);
+ ret = kv_upload_dpm_settings(adev);
+ if (ret) {
+ DRM_ERROR("kv_upload_dpm_settings failed\n");
+ return ret;
+ }
+ ret = kv_populate_uvd_table(adev);
+ if (ret) {
+ DRM_ERROR("kv_populate_uvd_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_vce_table(adev);
+ if (ret) {
+ DRM_ERROR("kv_populate_vce_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_samu_table(adev);
+ if (ret) {
+ DRM_ERROR("kv_populate_samu_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_acp_table(adev);
+ if (ret) {
+ DRM_ERROR("kv_populate_acp_table failed\n");
+ return ret;
+ }
+ kv_program_vc(adev);
+#if 0
+ kv_initialize_hardware_cac_manager(adev);
+#endif
+ kv_start_am(adev);
+ if (pi->enable_auto_thermal_throttling) {
+ ret = kv_enable_auto_thermal_throttling(adev);
+ if (ret) {
+ DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
+ return ret;
+ }
+ }
+ ret = kv_enable_dpm_voltage_scaling(adev);
+ if (ret) {
+ DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
+ return ret;
+ }
+ ret = kv_set_dpm_interval(adev);
+ if (ret) {
+ DRM_ERROR("kv_set_dpm_interval failed\n");
+ return ret;
+ }
+ ret = kv_set_dpm_boot_state(adev);
+ if (ret) {
+ DRM_ERROR("kv_set_dpm_boot_state failed\n");
+ return ret;
+ }
+ ret = kv_enable_ulv(adev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_ulv failed\n");
+ return ret;
+ }
+ kv_start_dpm(adev);
+ ret = kv_enable_didt(adev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_didt failed\n");
+ return ret;
+ }
+ ret = kv_enable_smc_cac(adev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_smc_cac failed\n");
+ return ret;
+ }
+
+ kv_reset_acp_boot_level(adev);
+
+ ret = amdgpu_kv_smc_bapm_enable(adev, false);
+ if (ret) {
+ DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+
+ kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
+
+ if (adev->irq.installed &&
+ amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
+ ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
+ if (ret) {
+ DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ return ret;
+ }
+ amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+ }
+
+ return ret;
+}
+
+static void kv_dpm_disable(struct amdgpu_device *adev)
+{
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+
+ amdgpu_kv_smc_bapm_enable(adev, false);
+
+ if (adev->asic_type == CHIP_MULLINS)
+ kv_enable_nb_dpm(adev, false);
+
+ /* powerup blocks */
+ kv_dpm_powergate_acp(adev, false);
+ kv_dpm_powergate_samu(adev, false);
+ kv_dpm_powergate_vce(adev, false);
+ kv_dpm_powergate_uvd(adev, false);
+
+ kv_enable_smc_cac(adev, false);
+ kv_enable_didt(adev, false);
+ kv_clear_vc(adev);
+ kv_stop_dpm(adev);
+ kv_enable_ulv(adev, false);
+ kv_reset_am(adev);
+
+ kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
+}
+
+#if 0
+static int kv_write_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 value)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,
+ (u8 *)&value, sizeof(u16), pi->sram_end);
+}
+
+static int kv_read_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 *value)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+#endif
+
+static void kv_init_sclk_t(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->low_sclk_interrupt_t = 0;
+}
+
+static int kv_init_fps_limits(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret = 0;
+
+ if (pi->caps_fps) {
+ u16 tmp;
+
+ tmp = 45;
+ pi->fps_high_t = cpu_to_be16(tmp);
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, FpsHighT),
+ (u8 *)&pi->fps_high_t,
+ sizeof(u16), pi->sram_end);
+
+ tmp = 30;
+ pi->fps_low_t = cpu_to_be16(tmp);
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, FpsLowT),
+ (u8 *)&pi->fps_low_t,
+ sizeof(u16), pi->sram_end);
+
+ }
+ return ret;
+}
+
+static void kv_init_powergate_state(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->uvd_power_gated = false;
+ pi->vce_power_gated = false;
+ pi->samu_power_gated = false;
+ pi->acp_power_gated = false;
+
+}
+
+static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
+{
+ return amdgpu_kv_notify_message_to_smu(adev, enable ?
+ PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
+}
+
+static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
+{
+ return amdgpu_kv_notify_message_to_smu(adev, enable ?
+ PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
+}
+
+static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
+{
+ return amdgpu_kv_notify_message_to_smu(adev, enable ?
+ PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
+}
+
+static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
+{
+ return amdgpu_kv_notify_message_to_smu(adev, enable ?
+ PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
+}
+
+static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_uvd_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ int ret;
+ u32 mask;
+
+ if (!gate) {
+ if (table->count)
+ pi->uvd_boot_level = table->count - 1;
+ else
+ pi->uvd_boot_level = 0;
+
+ if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
+ mask = 1 << pi->uvd_boot_level;
+ } else {
+ mask = 0x1f;
+ }
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
+ (uint8_t *)&pi->uvd_boot_level,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ amdgpu_kv_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ mask);
+ }
+
+ return kv_enable_uvd_dpm(adev, !gate);
+}
+
+static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)
+{
+ u8 i;
+ struct amdgpu_vce_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].evclk >= evclk)
+ break;
+ }
+
+ return i;
+}
+
+static int kv_update_vce_dpm(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_vce_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ int ret;
+
+ if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
+ kv_dpm_powergate_vce(adev, false);
+ /* turn the clocks on when encoding */
+ ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE,
+ AMDGPU_CG_STATE_UNGATE);
+ if (ret)
+ return ret;
+ if (pi->caps_stable_p_state)
+ pi->vce_boot_level = table->count - 1;
+ else
+ pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
+ (u8 *)&pi->vce_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ amdgpu_kv_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (1 << pi->vce_boot_level));
+
+ kv_enable_vce_dpm(adev, true);
+ } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
+ kv_enable_vce_dpm(adev, false);
+ /* turn the clocks off when not encoding */
+ ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE,
+ AMDGPU_CG_STATE_GATE);
+ if (ret)
+ return ret;
+ kv_dpm_powergate_vce(adev, true);
+ }
+
+ return 0;
+}
+
+static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (pi->caps_stable_p_state)
+ pi->samu_boot_level = table->count - 1;
+ else
+ pi->samu_boot_level = 0;
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
+ (u8 *)&pi->samu_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ amdgpu_kv_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (1 << pi->samu_boot_level));
+ }
+
+ return kv_enable_samu_dpm(adev, !gate);
+}
+
+static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
+{
+ u8 i;
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].clk >= 0) /* XXX */
+ break;
+ }
+
+ if (i >= table->count)
+ i = table->count - 1;
+
+ return i;
+}
+
+static void kv_update_acp_boot_level(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u8 acp_boot_level;
+
+ if (!pi->caps_stable_p_state) {
+ acp_boot_level = kv_get_acp_boot_level(adev);
+ if (acp_boot_level != pi->acp_boot_level) {
+ pi->acp_boot_level = acp_boot_level;
+ amdgpu_kv_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ (1 << pi->acp_boot_level));
+ }
+ }
+}
+
+static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (pi->caps_stable_p_state)
+ pi->acp_boot_level = table->count - 1;
+ else
+ pi->acp_boot_level = kv_get_acp_boot_level(adev);
+
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
+ (u8 *)&pi->acp_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ amdgpu_kv_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ (1 << pi->acp_boot_level));
+ }
+
+ return kv_enable_acp_dpm(adev, !gate);
+}
+
+static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ if (pi->uvd_power_gated == gate)
+ return;
+
+ pi->uvd_power_gated = gate;
+
+ if (gate) {
+ if (pi->caps_uvd_pg) {
+ /* disable clockgating so we can properly shut down the block */
+ ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD,
+ AMDGPU_CG_STATE_UNGATE);
+ /* shutdown the UVD block */
+ ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD,
+ AMDGPU_PG_STATE_GATE);
+ /* XXX: check for errors */
+ }
+ kv_update_uvd_dpm(adev, gate);
+ if (pi->caps_uvd_pg)
+ /* power off the UVD block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
+ } else {
+ if (pi->caps_uvd_pg) {
+ /* power on the UVD block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
+ /* re-init the UVD block */
+ ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD,
+ AMDGPU_PG_STATE_UNGATE);
+ /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
+ ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD,
+ AMDGPU_CG_STATE_GATE);
+ /* XXX: check for errors */
+ }
+ kv_update_uvd_dpm(adev, gate);
+ }
+}
+
+static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret;
+
+ if (pi->vce_power_gated == gate)
+ return;
+
+ pi->vce_power_gated = gate;
+
+ if (gate) {
+ if (pi->caps_vce_pg) {
+ /* shutdown the VCE block */
+ ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE,
+ AMDGPU_PG_STATE_GATE);
+ /* XXX: check for errors */
+ /* power off the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+ }
+ } else {
+ if (pi->caps_vce_pg) {
+ /* power on the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ /* re-init the VCE block */
+ ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE,
+ AMDGPU_PG_STATE_UNGATE);
+ /* XXX: check for errors */
+ }
+ }
+}
+
+static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ if (pi->samu_power_gated == gate)
+ return;
+
+ pi->samu_power_gated = gate;
+
+ if (gate) {
+ kv_update_samu_dpm(adev, true);
+ if (pi->caps_samu_pg)
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);
+ } else {
+ if (pi->caps_samu_pg)
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);
+ kv_update_samu_dpm(adev, false);
+ }
+}
+
+static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ if (pi->acp_power_gated == gate)
+ return;
+
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
+ return;
+
+ pi->acp_power_gated = gate;
+
+ if (gate) {
+ kv_update_acp_dpm(adev, true);
+ if (pi->caps_acp_pg)
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);
+ } else {
+ if (pi->caps_acp_pg)
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);
+ kv_update_acp_dpm(adev, false);
+ }
+}
+
+static void kv_set_valid_clock_range(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i;
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
+ (i == (pi->graphics_dpm_level_count - 1))) {
+ pi->lowest_valid = i;
+ break;
+ }
+ }
+
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
+ break;
+ }
+ pi->highest_valid = i;
+
+ if (pi->lowest_valid > pi->highest_valid) {
+ if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
+ (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
+ pi->highest_valid = pi->lowest_valid;
+ else
+ pi->lowest_valid = pi->highest_valid;
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
+ if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
+ i == (int)(pi->graphics_dpm_level_count - 1)) {
+ pi->lowest_valid = i;
+ break;
+ }
+ }
+
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].sclk_frequency <=
+ new_ps->levels[new_ps->num_levels - 1].sclk)
+ break;
+ }
+ pi->highest_valid = i;
+
+ if (pi->lowest_valid > pi->highest_valid) {
+ if ((new_ps->levels[0].sclk -
+ table->entries[pi->highest_valid].sclk_frequency) >
+ (table->entries[pi->lowest_valid].sclk_frequency -
+ new_ps->levels[new_ps->num_levels -1].sclk))
+ pi->highest_valid = pi->lowest_valid;
+ else
+ pi->lowest_valid = pi->highest_valid;
+ }
+ }
+}
+
+static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret = 0;
+ u8 clk_bypass_cntl;
+
+ if (pi->caps_enable_dfs_bypass) {
+ clk_bypass_cntl = new_ps->need_dfs_bypass ?
+ pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
+ ret = amdgpu_kv_copy_bytes_to_smc(adev,
+ (pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
+ (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
+ offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
+ &clk_bypass_cntl,
+ sizeof(u8), pi->sram_end);
+ }
+
+ return ret;
+}
+
+static int kv_enable_nb_dpm(struct amdgpu_device *adev,
+ bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ int ret = 0;
+
+ if (enable) {
+ if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
+ ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable);
+ if (ret == 0)
+ pi->nb_dpm_enabled = true;
+ }
+ } else {
+ if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
+ ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable);
+ if (ret == 0)
+ pi->nb_dpm_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
+ enum amdgpu_dpm_forced_level level)
+{
+ int ret;
+
+ if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+ ret = kv_force_dpm_highest(adev);
+ if (ret)
+ return ret;
+ } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+ ret = kv_force_dpm_lowest(adev);
+ if (ret)
+ return ret;
+ } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+ ret = kv_unforce_levels(adev);
+ if (ret)
+ return ret;
+ }
+
+ adev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+ struct amdgpu_ps *new_ps = &requested_ps;
+
+ kv_update_requested_ps(adev, new_ps);
+
+ kv_apply_state_adjust_rules(adev,
+ &pi->requested_rps,
+ &pi->current_rps);
+
+ return 0;
+}
+
+static int kv_dpm_set_power_state(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_ps *new_ps = &pi->requested_rps;
+ struct amdgpu_ps *old_ps = &pi->current_rps;
+ int ret;
+
+ if (pi->bapm_enable) {
+ ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power);
+ if (ret) {
+ DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+ }
+
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
+ if (pi->enable_dpm) {
+ kv_set_valid_clock_range(adev, new_ps);
+ kv_update_dfs_bypass_settings(adev, new_ps);
+ ret = kv_calculate_ds_divider(adev);
+ if (ret) {
+ DRM_ERROR("kv_calculate_ds_divider failed\n");
+ return ret;
+ }
+ kv_calculate_nbps_level_settings(adev);
+ kv_calculate_dpm_settings(adev);
+ kv_force_lowest_valid(adev);
+ kv_enable_new_levels(adev);
+ kv_upload_dpm_settings(adev);
+ kv_program_nbps_index_settings(adev, new_ps);
+ kv_unforce_levels(adev);
+ kv_set_enabled_levels(adev);
+ kv_force_lowest_valid(adev);
+ kv_unforce_levels(adev);
+
+ ret = kv_update_vce_dpm(adev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("kv_update_vce_dpm failed\n");
+ return ret;
+ }
+ kv_update_sclk_t(adev);
+ if (adev->asic_type == CHIP_MULLINS)
+ kv_enable_nb_dpm(adev, true);
+ }
+ } else {
+ if (pi->enable_dpm) {
+ kv_set_valid_clock_range(adev, new_ps);
+ kv_update_dfs_bypass_settings(adev, new_ps);
+ ret = kv_calculate_ds_divider(adev);
+ if (ret) {
+ DRM_ERROR("kv_calculate_ds_divider failed\n");
+ return ret;
+ }
+ kv_calculate_nbps_level_settings(adev);
+ kv_calculate_dpm_settings(adev);
+ kv_freeze_sclk_dpm(adev, true);
+ kv_upload_dpm_settings(adev);
+ kv_program_nbps_index_settings(adev, new_ps);
+ kv_freeze_sclk_dpm(adev, false);
+ kv_set_enabled_levels(adev);
+ ret = kv_update_vce_dpm(adev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("kv_update_vce_dpm failed\n");
+ return ret;
+ }
+ kv_update_acp_boot_level(adev);
+ kv_update_sclk_t(adev);
+ kv_enable_nb_dpm(adev, true);
+ }
+ }
+
+ return 0;
+}
+
+static void kv_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_ps *new_ps = &pi->requested_rps;
+
+ kv_update_current_ps(adev, new_ps);
+}
+
+static void kv_dpm_setup_asic(struct amdgpu_device *adev)
+{
+ sumo_take_smu_control(adev, true);
+ kv_init_powergate_state(adev);
+ kv_init_sclk_t(adev);
+}
+
+#if 0
+static void kv_dpm_reset_asic(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
+ kv_force_lowest_valid(adev);
+ kv_init_graphics_levels(adev);
+ kv_program_bootup_state(adev);
+ kv_upload_dpm_settings(adev);
+ kv_force_lowest_valid(adev);
+ kv_unforce_levels(adev);
+ } else {
+ kv_init_graphics_levels(adev);
+ kv_program_bootup_state(adev);
+ kv_freeze_sclk_dpm(adev, true);
+ kv_upload_dpm_settings(adev);
+ kv_freeze_sclk_dpm(adev, false);
+ kv_set_enabled_level(adev, pi->graphics_boot_level);
+ }
+}
+#endif
+
+static void kv_construct_max_power_limits_table(struct amdgpu_device *adev,
+ struct amdgpu_clock_and_voltage_limits *table)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
+ int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
+ table->sclk =
+ pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
+ table->vddc =
+ kv_convert_2bit_index_to_voltage(adev,
+ pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
+ }
+
+ table->mclk = pi->sys_info.nbp_memory_clock[0];
+}
+
+static void kv_patch_voltage_values(struct amdgpu_device *adev)
+{
+ int i;
+ struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+ &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table *samu_table =
+ &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table *acp_table =
+ &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+ if (uvd_table->count) {
+ for (i = 0; i < uvd_table->count; i++)
+ uvd_table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(adev,
+ uvd_table->entries[i].v);
+ }
+
+ if (vce_table->count) {
+ for (i = 0; i < vce_table->count; i++)
+ vce_table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(adev,
+ vce_table->entries[i].v);
+ }
+
+ if (samu_table->count) {
+ for (i = 0; i < samu_table->count; i++)
+ samu_table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(adev,
+ samu_table->entries[i].v);
+ }
+
+ if (acp_table->count) {
+ for (i = 0; i < acp_table->count; i++)
+ acp_table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(adev,
+ acp_table->entries[i].v);
+ }
+
+}
+
+static void kv_construct_boot_state(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
+ pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
+ pi->boot_pl.ds_divider_index = 0;
+ pi->boot_pl.ss_divider_index = 0;
+ pi->boot_pl.allow_gnb_slow = 1;
+ pi->boot_pl.force_nbp_state = 0;
+ pi->boot_pl.display_wm = 0;
+ pi->boot_pl.vce_wm = 0;
+}
+
+static int kv_force_dpm_highest(struct amdgpu_device *adev)
+{
+ int ret;
+ u32 enable_mask, i;
+
+ ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
+ if (ret)
+ return ret;
+
+ for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
+ if (enable_mask & (1 << i))
+ break;
+ }
+
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
+ return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
+ else
+ return kv_set_enabled_level(adev, i);
+}
+
+static int kv_force_dpm_lowest(struct amdgpu_device *adev)
+{
+ int ret;
+ u32 enable_mask, i;
+
+ ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+ if (enable_mask & (1 << i))
+ break;
+ }
+
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
+ return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
+ else
+ return kv_set_enabled_level(adev, i);
+}
+
+static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
+ u32 sclk, u32 min_sclk_in_sr)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i;
+ u32 temp;
+ u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
+ min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
+
+ if (sclk < min)
+ return 0;
+
+ if (!pi->caps_sclk_ds)
+ return 0;
+
+ for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
+ temp = sclk / sumo_get_sleep_divider_from_id(i);
+ if (temp >= min)
+ break;
+ }
+
+ return (u8)i;
+}
+
+static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ int i;
+
+ if (table && table->count) {
+ for (i = table->count - 1; i >= 0; i--) {
+ if (pi->high_voltage_t &&
+ (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=
+ pi->high_voltage_t)) {
+ *limit = i;
+ return 0;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
+ if (pi->high_voltage_t &&
+ (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=
+ pi->high_voltage_t)) {
+ *limit = i;
+ return 0;
+ }
+ }
+ }
+
+ *limit = 0;
+ return 0;
+}
+
+static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_rps,
+ struct amdgpu_ps *old_rps)
+{
+ struct kv_ps *ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 min_sclk = 10000; /* ??? */
+ u32 sclk, mclk = 0;
+ int i, limit;
+ bool force_high;
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 stable_p_state_sclk = 0;
+ struct amdgpu_clock_and_voltage_limits *max_limits =
+ &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ if (new_rps->vce_active) {
+ new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+ new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+ } else {
+ new_rps->evclk = 0;
+ new_rps->ecclk = 0;
+ }
+
+ mclk = max_limits->mclk;
+ sclk = min_sclk;
+
+ if (pi->caps_stable_p_state) {
+ stable_p_state_sclk = (max_limits->sclk * 75) / 100;
+
+ for (i = table->count - 1; i >= 0; i++) {
+ if (stable_p_state_sclk >= table->entries[i].clk) {
+ stable_p_state_sclk = table->entries[i].clk;
+ break;
+ }
+ }
+
+ if (i > 0)
+ stable_p_state_sclk = table->entries[0].clk;
+
+ sclk = stable_p_state_sclk;
+ }
+
+ if (new_rps->vce_active) {
+ if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
+ sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
+ }
+
+ ps->need_dfs_bypass = true;
+
+ for (i = 0; i < ps->num_levels; i++) {
+ if (ps->levels[i].sclk < sclk)
+ ps->levels[i].sclk = sclk;
+ }
+
+ if (table && table->count) {
+ for (i = 0; i < ps->num_levels; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
+ kv_get_high_voltage_limit(adev, &limit);
+ ps->levels[i].sclk = table->entries[limit].clk;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = 0; i < ps->num_levels; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
+ kv_get_high_voltage_limit(adev, &limit);
+ ps->levels[i].sclk = table->entries[limit].sclk_frequency;
+ }
+ }
+ }
+
+ if (pi->caps_stable_p_state) {
+ for (i = 0; i < ps->num_levels; i++) {
+ ps->levels[i].sclk = stable_p_state_sclk;
+ }
+ }
+
+ pi->video_start = new_rps->dclk || new_rps->vclk ||
+ new_rps->evclk || new_rps->ecclk;
+
+ if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+ ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+ pi->battery_state = true;
+ else
+ pi->battery_state = false;
+
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
+ ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_hi = 0x0;
+ ps->dpmx_nb_ps_lo = 0x1;
+ ps->dpmx_nb_ps_hi = 0x0;
+ } else {
+ ps->dpm0_pg_nb_ps_lo = 0x3;
+ ps->dpm0_pg_nb_ps_hi = 0x0;
+ ps->dpmx_nb_ps_lo = 0x3;
+ ps->dpmx_nb_ps_hi = 0x0;
+
+ if (pi->sys_info.nb_dpm_enable) {
+ force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+ pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
+ pi->disable_nb_ps3_in_battery;
+ ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
+ ps->dpm0_pg_nb_ps_hi = 0x2;
+ ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
+ ps->dpmx_nb_ps_hi = 0x2;
+ }
+ }
+}
+
+static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev,
+ u32 index, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
+}
+
+static int kv_calculate_ds_divider(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 sclk_in_sr = 10000; /* ??? */
+ u32 i;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].DeepSleepDivId =
+ kv_get_sleep_divider_id_from_clock(adev,
+ be32_to_cpu(pi->graphics_level[i].SclkFrequency),
+ sclk_in_sr);
+ }
+ return 0;
+}
+
+static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i;
+ bool force_high;
+ struct amdgpu_clock_and_voltage_limits *max_limits =
+ &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ u32 mclk = max_limits->mclk;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].GnbSlow = 1;
+ pi->graphics_level[i].ForceNbPs1 = 0;
+ pi->graphics_level[i].UpH = 0;
+ }
+
+ if (!pi->sys_info.nb_dpm_enable)
+ return 0;
+
+ force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+ (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+
+ if (force_high) {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ pi->graphics_level[i].GnbSlow = 0;
+ } else {
+ if (pi->battery_state)
+ pi->graphics_level[0].ForceNbPs1 = 1;
+
+ pi->graphics_level[1].GnbSlow = 0;
+ pi->graphics_level[2].GnbSlow = 0;
+ pi->graphics_level[3].GnbSlow = 0;
+ pi->graphics_level[4].GnbSlow = 0;
+ }
+ } else {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].GnbSlow = 1;
+ pi->graphics_level[i].ForceNbPs1 = 0;
+ pi->graphics_level[i].UpH = 0;
+ }
+
+ if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ pi->graphics_level[pi->lowest_valid].UpH = 0x28;
+ pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
+ if (pi->lowest_valid != pi->highest_valid)
+ pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
+ }
+ }
+ return 0;
+}
+
+static int kv_calculate_dpm_settings(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
+
+ return 0;
+}
+
+static void kv_init_graphics_levels(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i;
+ struct amdgpu_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ u32 vid_2bit;
+
+ pi->graphics_dpm_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))
+ break;
+
+ kv_set_divider_value(adev, i, table->entries[i].clk);
+ vid_2bit = kv_convert_vid7_to_vid2(adev,
+ &pi->sys_info.vid_mapping_table,
+ table->entries[i].v);
+ kv_set_vid(adev, i, vid_2bit);
+ kv_set_at(adev, i, pi->at[i]);
+ kv_dpm_power_level_enabled_for_throttle(adev, i, true);
+ pi->graphics_dpm_level_count++;
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ pi->graphics_dpm_level_count = 0;
+ for (i = 0; i < table->num_max_dpm_entries; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t <
+ kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))
+ break;
+
+ kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);
+ kv_set_vid(adev, i, table->entries[i].vid_2bit);
+ kv_set_at(adev, i, pi->at[i]);
+ kv_dpm_power_level_enabled_for_throttle(adev, i, true);
+ pi->graphics_dpm_level_count++;
+ }
+ }
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+ kv_dpm_power_level_enable(adev, i, false);
+}
+
+static void kv_enable_new_levels(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+ if (i >= pi->lowest_valid && i <= pi->highest_valid)
+ kv_dpm_power_level_enable(adev, i, true);
+ }
+}
+
+static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level)
+{
+ u32 new_mask = (1 << level);
+
+ return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ new_mask);
+}
+
+static int kv_set_enabled_levels(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 i, new_mask = 0;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ new_mask |= (1 << i);
+
+ return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ new_mask);
+}
+
+static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 nbdpmconfig1;
+
+ if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
+ return;
+
+ if (pi->sys_info.nb_dpm_enable) {
+ nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1);
+ nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK |
+ NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK |
+ NB_DPM_CONFIG_1__DpmXNbPsLo_MASK |
+ NB_DPM_CONFIG_1__DpmXNbPsHi_MASK);
+ nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) |
+ (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) |
+ (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) |
+ (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT);
+ WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1);
+ }
+}
+
+static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+ u32 tmp;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
+ tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK |
+ CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
+ tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) |
+ ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
+ WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp);
+
+ adev->pm.dpm.thermal.min_temp = low_temp;
+ adev->pm.dpm.thermal.max_temp = high_temp;
+
+ return 0;
+}
+
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static int kv_parse_sys_info_table(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+ int i;
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ if (crev != 8) {
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ return -EINVAL;
+ }
+ pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
+ pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
+ pi->sys_info.bootup_nb_voltage_index =
+ le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
+ if (igp_info->info_8.ucHtcTmpLmt == 0)
+ pi->sys_info.htc_tmp_lmt = 203;
+ else
+ pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
+ if (igp_info->info_8.ucHtcHystLmt == 0)
+ pi->sys_info.htc_hyst_lmt = 5;
+ else
+ pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
+ if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
+ DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+ }
+
+ if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
+ pi->sys_info.nb_dpm_enable = true;
+ else
+ pi->sys_info.nb_dpm_enable = false;
+
+ for (i = 0; i < KV_NUM_NBPSTATES; i++) {
+ pi->sys_info.nbp_memory_clock[i] =
+ le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
+ pi->sys_info.nbp_n_clock[i] =
+ le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
+ }
+ if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
+ SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+ pi->caps_enable_dfs_bypass = true;
+
+ sumo_construct_sclk_voltage_mapping_table(adev,
+ &pi->sys_info.sclk_voltage_mapping_table,
+ igp_info->info_8.sAvail_SCLK);
+
+ sumo_construct_vid_mapping_table(adev,
+ &pi->sys_info.vid_mapping_table,
+ igp_info->info_8.sAvail_SCLK);
+
+ kv_construct_max_power_limits_table(adev,
+ &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+ }
+ return 0;
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void kv_patch_boot_state(struct amdgpu_device *adev,
+ struct kv_ps *ps)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ ps->num_levels = 1;
+ ps->levels[0] = pi->boot_pl;
+}
+
+static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ struct kv_ps *ps = kv_get_ps(rps);
+
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ adev->pm.dpm.boot_ps = rps;
+ kv_patch_boot_state(adev, ps);
+ }
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ adev->pm.dpm.uvd_ps = rps;
+}
+
+static void kv_parse_pplib_clock_info(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct kv_ps *ps = kv_get_ps(rps);
+ struct kv_pl *pl = &ps->levels[index];
+ u32 sclk;
+
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ pl->sclk = sclk;
+ pl->vddc_index = clock_info->sumo.vddcIndex;
+
+ ps->num_levels = index + 1;
+
+ if (pi->caps_sclk_ds) {
+ pl->ds_divider_index = 5;
+ pl->ss_divider_index = 5;
+ }
+}
+
+static int kv_parse_power_table(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct kv_ps *ps;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ amdgpu_add_thermal_controller(adev);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!adev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(adev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.ps[i].ps_priv = ps;
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ kv_parse_pplib_clock_info(adev,
+ &adev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+ /* fill in the vce power states */
+ for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ u32 sclk;
+ clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ adev->pm.dpm.vce_states[i].sclk = sclk;
+ adev->pm.dpm.vce_states[i].mclk = 0;
+ }
+
+ return 0;
+}
+
+static int kv_dpm_init(struct amdgpu_device *adev)
+{
+ struct kv_power_info *pi;
+ int ret, i;
+
+ pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
+ if (pi == NULL)
+ return -ENOMEM;
+ adev->pm.dpm.priv = pi;
+
+ ret = amdgpu_get_platform_caps(adev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_parse_extended_power_table(adev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
+ pi->at[i] = TRINITY_AT_DFLT;
+
+ pi->sram_end = SMC_RAM_END;
+
+ pi->enable_nb_dpm = true;
+
+ pi->caps_power_containment = true;
+ pi->caps_cac = true;
+ pi->enable_didt = false;
+ if (pi->enable_didt) {
+ pi->caps_sq_ramping = true;
+ pi->caps_db_ramping = true;
+ pi->caps_td_ramping = true;
+ pi->caps_tcp_ramping = true;
+ }
+
+ pi->caps_sclk_ds = true;
+ pi->enable_auto_thermal_throttling = true;
+ pi->disable_nb_ps3_in_battery = false;
+ if (amdgpu_bapm == 0)
+ pi->bapm_enable = false;
+ else
+ pi->bapm_enable = true;
+ pi->voltage_drop_t = 0;
+ pi->caps_sclk_throttle_low_notification = false;
+ pi->caps_fps = false; /* true? */
+ pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
+ pi->caps_uvd_dpm = true;
+ pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
+ pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false;
+ pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
+ pi->caps_stable_p_state = false;
+
+ ret = kv_parse_sys_info_table(adev);
+ if (ret)
+ return ret;
+
+ kv_patch_voltage_values(adev);
+ kv_construct_boot_state(adev);
+
+ ret = kv_parse_power_table(adev);
+ if (ret)
+ return ret;
+
+ pi->enable_dpm = true;
+
+ return 0;
+}
+
+static void
+kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ struct seq_file *m)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ u32 current_index =
+ (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
+ u32 sclk, tmp;
+ u16 vddc;
+
+ if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
+ tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
+ SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
+ SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
+ vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp);
+ seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+ seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
+ seq_printf(m, "power level %d sclk: %u vddc: %u\n",
+ current_index, sclk, vddc);
+ }
+}
+
+static void
+kv_dpm_print_power_state(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ int i;
+ struct kv_ps *ps = kv_get_ps(rps);
+
+ amdgpu_dpm_print_class_info(rps->class, rps->class2);
+ amdgpu_dpm_print_cap_info(rps->caps);
+ printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->num_levels; i++) {
+ struct kv_pl *pl = &ps->levels[i];
+ printk("\t\tpower level %d sclk: %u vddc: %u\n",
+ i, pl->sclk,
+ kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
+ }
+ amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static void kv_dpm_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+ kfree(adev->pm.dpm.ps[i].ps_priv);
+ }
+ kfree(adev->pm.dpm.ps);
+ kfree(adev->pm.dpm.priv);
+ amdgpu_free_extended_power_table(adev);
+}
+
+static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+
+}
+
+static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+ struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->levels[0].sclk;
+ else
+ return requested_state->levels[requested_state->num_levels - 1].sclk;
+}
+
+static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
+/* get temperature in millidegrees */
+static int kv_dpm_get_temp(struct amdgpu_device *adev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = RREG32_SMC(0xC0300E0C);
+
+ if (temp)
+ actual_temp = (temp / 8) - 49;
+ else
+ actual_temp = 0;
+
+ actual_temp = actual_temp * 1000;
+
+ return actual_temp;
+}
+
+static int kv_dpm_early_init(struct amdgpu_device *adev)
+{
+ kv_dpm_set_dpm_funcs(adev);
+ kv_dpm_set_irq_funcs(adev);
+
+ return 0;
+}
+
+static int kv_dpm_late_init(struct amdgpu_device *adev)
+{
+ if (!amdgpu_dpm)
+ return 0;
+
+ /* powerdown unused blocks for now */
+ kv_dpm_powergate_acp(adev, true);
+ kv_dpm_powergate_samu(adev, true);
+ kv_dpm_powergate_vce(adev, true);
+ kv_dpm_powergate_uvd(adev, true);
+
+ return 0;
+}
+
+static int kv_dpm_sw_init(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+ /* default to balanced state */
+ adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+ adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+ adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+ adev->pm.default_sclk = adev->clock.default_sclk;
+ adev->pm.default_mclk = adev->clock.default_mclk;
+ adev->pm.current_sclk = adev->clock.default_sclk;
+ adev->pm.current_mclk = adev->clock.default_mclk;
+ adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+ if (amdgpu_dpm == 0)
+ return 0;
+
+ INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
+ mutex_lock(&adev->pm.mutex);
+ ret = kv_dpm_init(adev);
+ if (ret)
+ goto dpm_failed;
+ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ if (amdgpu_dpm == 1)
+ amdgpu_pm_print_power_states(adev);
+ ret = amdgpu_pm_sysfs_init(adev);
+ if (ret)
+ goto dpm_failed;
+ mutex_unlock(&adev->pm.mutex);
+ DRM_INFO("amdgpu: dpm initialized\n");
+
+ return 0;
+
+dpm_failed:
+ kv_dpm_fini(adev);
+ mutex_unlock(&adev->pm.mutex);
+ DRM_ERROR("amdgpu: dpm initialization failed\n");
+ return ret;
+}
+
+static int kv_dpm_sw_fini(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_pm_sysfs_fini(adev);
+ kv_dpm_fini(adev);
+ mutex_unlock(&adev->pm.mutex);
+
+ return 0;
+}
+
+static int kv_dpm_hw_init(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (!amdgpu_dpm)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ kv_dpm_setup_asic(adev);
+ ret = kv_dpm_enable(adev);
+ if (ret)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+static int kv_dpm_hw_fini(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ kv_dpm_disable(adev);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return 0;
+}
+
+static int kv_dpm_suspend(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ /* disable dpm */
+ kv_dpm_disable(adev);
+ /* reset the power state */
+ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
+ }
+ return 0;
+}
+
+static int kv_dpm_resume(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (adev->pm.dpm_enabled) {
+ /* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
+ kv_dpm_setup_asic(adev);
+ ret = kv_dpm_enable(adev);
+ if (ret)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+ if (adev->pm.dpm_enabled)
+ amdgpu_pm_compute_clocks(adev);
+ }
+ return 0;
+}
+
+static bool kv_dpm_is_idle(struct amdgpu_device *adev)
+{
+ /* XXX */
+ return true;
+}
+
+static int kv_dpm_wait_for_idle(struct amdgpu_device *adev)
+{
+ /* XXX */
+ return 0;
+}
+
+static void kv_dpm_print_status(struct amdgpu_device *adev)
+{
+ dev_info(adev->dev, "KV/KB DPM registers\n");
+ dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n",
+ RREG32_DIDT(ixDIDT_SQ_CTRL0));
+ dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n",
+ RREG32_DIDT(ixDIDT_DB_CTRL0));
+ dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n",
+ RREG32_DIDT(ixDIDT_TD_CTRL0));
+ dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n",
+ RREG32_DIDT(ixDIDT_TCP_CTRL0));
+ dev_info(adev->dev, " LCAC_SX0_OVR_SEL=0x%08X\n",
+ RREG32_SMC(ixLCAC_SX0_OVR_SEL));
+ dev_info(adev->dev, " LCAC_SX0_OVR_VAL=0x%08X\n",
+ RREG32_SMC(ixLCAC_SX0_OVR_VAL));
+ dev_info(adev->dev, " LCAC_MC0_OVR_SEL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC0_OVR_SEL));
+ dev_info(adev->dev, " LCAC_MC0_OVR_VAL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC0_OVR_VAL));
+ dev_info(adev->dev, " LCAC_MC1_OVR_SEL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC1_OVR_SEL));
+ dev_info(adev->dev, " LCAC_MC1_OVR_VAL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC1_OVR_VAL));
+ dev_info(adev->dev, " LCAC_MC2_OVR_SEL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC2_OVR_SEL));
+ dev_info(adev->dev, " LCAC_MC2_OVR_VAL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC2_OVR_VAL));
+ dev_info(adev->dev, " LCAC_MC3_OVR_SEL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC3_OVR_SEL));
+ dev_info(adev->dev, " LCAC_MC3_OVR_VAL=0x%08X\n",
+ RREG32_SMC(ixLCAC_MC3_OVR_VAL));
+ dev_info(adev->dev, " LCAC_CPL_OVR_SEL=0x%08X\n",
+ RREG32_SMC(ixLCAC_CPL_OVR_SEL));
+ dev_info(adev->dev, " LCAC_CPL_OVR_VAL=0x%08X\n",
+ RREG32_SMC(ixLCAC_CPL_OVR_VAL));
+ dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n",
+ RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
+ dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n",
+ RREG32_SMC(ixGENERAL_PWRMGT));
+ dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n",
+ RREG32_SMC(ixSCLK_PWRMGT_CNTL));
+ dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n",
+ RREG32(mmSMC_MESSAGE_0));
+ dev_info(adev->dev, " SMC_RESP_0=0x%08X\n",
+ RREG32(mmSMC_RESP_0));
+ dev_info(adev->dev, " SMC_MSG_ARG_0=0x%08X\n",
+ RREG32(mmSMC_MSG_ARG_0));
+ dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n",
+ RREG32(mmSMC_IND_INDEX_0));
+ dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n",
+ RREG32(mmSMC_IND_DATA_0));
+ dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n",
+ RREG32(mmSMC_IND_ACCESS_CNTL));
+}
+
+static int kv_dpm_soft_reset(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cg_thermal_int;
+
+ switch (type) {
+ case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
+ cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
+ cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
+ cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+ WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
+ cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+ WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ bool queue_thermal = false;
+
+ if (entry == NULL)
+ return -EINVAL;
+
+ switch (entry->src_id) {
+ case 230: /* thermal low to high */
+ DRM_DEBUG("IH: thermal low to high\n");
+ adev->pm.dpm.thermal.high_to_low = false;
+ queue_thermal = true;
+ break;
+ case 231: /* thermal high to low */
+ DRM_DEBUG("IH: thermal high to low\n");
+ adev->pm.dpm.thermal.high_to_low = true;
+ queue_thermal = true;
+ break;
+ default:
+ break;
+ }
+
+ if (queue_thermal)
+ schedule_work(&adev->pm.dpm.thermal.work);
+
+ return 0;
+}
+
+static int kv_dpm_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ return 0;
+}
+
+static int kv_dpm_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ return 0;
+}
+
+const struct amdgpu_ip_funcs kv_dpm_ip_funcs = {
+ .early_init = kv_dpm_early_init,
+ .late_init = kv_dpm_late_init,
+ .sw_init = kv_dpm_sw_init,
+ .sw_fini = kv_dpm_sw_fini,
+ .hw_init = kv_dpm_hw_init,
+ .hw_fini = kv_dpm_hw_fini,
+ .suspend = kv_dpm_suspend,
+ .resume = kv_dpm_resume,
+ .is_idle = kv_dpm_is_idle,
+ .wait_for_idle = kv_dpm_wait_for_idle,
+ .soft_reset = kv_dpm_soft_reset,
+ .print_status = kv_dpm_print_status,
+ .set_clockgating_state = kv_dpm_set_clockgating_state,
+ .set_powergating_state = kv_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
+ .get_temperature = &kv_dpm_get_temp,
+ .pre_set_power_state = &kv_dpm_pre_set_power_state,
+ .set_power_state = &kv_dpm_set_power_state,
+ .post_set_power_state = &kv_dpm_post_set_power_state,
+ .display_configuration_changed = &kv_dpm_display_configuration_changed,
+ .get_sclk = &kv_dpm_get_sclk,
+ .get_mclk = &kv_dpm_get_mclk,
+ .print_power_state = &kv_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &kv_dpm_force_performance_level,
+ .powergate_uvd = &kv_dpm_powergate_uvd,
+ .enable_bapm = &kv_dpm_enable_bapm,
+};
+
+static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
+{
+ if (adev->pm.funcs == NULL)
+ adev->pm.funcs = &kv_dpm_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
+ .set = kv_dpm_set_interrupt_state,
+ .process = kv_dpm_process_interrupt,
+};
+
+static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
+ adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h b/drivers/gpu/drm/amd/amdgpu/kv_dpm.h
new file mode 100644
index 000000000000..6df0ed41317c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __KV_DPM_H__
+#define __KV_DPM_H__
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */
+#include "smu7_fusion.h"
+#include "ppsmc.h"
+
+#define SUMO_MAX_HARDWARE_POWERLEVELS 5
+
+#define SUMO_MAX_NUMBER_VOLTAGES 4
+
+struct sumo_vid_mapping_entry {
+ u16 vid_2bit;
+ u16 vid_7bit;
+};
+
+struct sumo_vid_mapping_table {
+ u32 num_entries;
+ struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES];
+};
+
+struct sumo_sclk_voltage_mapping_entry {
+ u32 sclk_frequency;
+ u16 vid_2bit;
+ u16 rsv;
+};
+
+struct sumo_sclk_voltage_mapping_table {
+ u32 num_max_dpm_entries;
+ struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS];
+};
+
+#define TRINITY_AT_DFLT 30
+
+#define KV_NUM_NBPSTATES 4
+
+enum kv_pt_config_reg_type {
+ KV_CONFIGREG_MMR = 0,
+ KV_CONFIGREG_SMC_IND,
+ KV_CONFIGREG_DIDT_IND,
+ KV_CONFIGREG_CACHE,
+ KV_CONFIGREG_MAX
+};
+
+struct kv_pt_config_reg {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum kv_pt_config_reg_type type;
+};
+
+struct kv_lcac_config_values {
+ u32 block_id;
+ u32 signal_id;
+ u32 t;
+};
+
+struct kv_lcac_config_reg {
+ u32 cntl;
+ u32 block_mask;
+ u32 block_shift;
+ u32 signal_mask;
+ u32 signal_shift;
+ u32 t_mask;
+ u32 t_shift;
+ u32 enable_mask;
+ u32 enable_shift;
+};
+
+struct kv_pl {
+ u32 sclk;
+ u8 vddc_index;
+ u8 ds_divider_index;
+ u8 ss_divider_index;
+ u8 allow_gnb_slow;
+ u8 force_nbp_state;
+ u8 display_wm;
+ u8 vce_wm;
+};
+
+struct kv_ps {
+ struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
+ u32 num_levels;
+ bool need_dfs_bypass;
+ u8 dpm0_pg_nb_ps_lo;
+ u8 dpm0_pg_nb_ps_hi;
+ u8 dpmx_nb_ps_lo;
+ u8 dpmx_nb_ps_hi;
+};
+
+struct kv_sys_info {
+ u32 bootup_uma_clk;
+ u32 bootup_sclk;
+ u32 dentist_vco_freq;
+ u32 nb_dpm_enable;
+ u32 nbp_memory_clock[KV_NUM_NBPSTATES];
+ u32 nbp_n_clock[KV_NUM_NBPSTATES];
+ u16 bootup_nb_voltage_index;
+ u8 htc_tmp_lmt;
+ u8 htc_hyst_lmt;
+ struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
+ struct sumo_vid_mapping_table vid_mapping_table;
+ u32 uma_channel_number;
+};
+
+struct kv_power_info {
+ u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
+ u32 voltage_drop_t;
+ struct kv_sys_info sys_info;
+ struct kv_pl boot_pl;
+ bool enable_nb_ps_policy;
+ bool disable_nb_ps3_in_battery;
+ bool video_start;
+ bool battery_state;
+ u32 lowest_valid;
+ u32 highest_valid;
+ u16 high_voltage_t;
+ bool cac_enabled;
+ bool bapm_enable;
+ /* smc offsets */
+ u32 sram_end;
+ u32 dpm_table_start;
+ u32 soft_regs_start;
+ /* dpm SMU tables */
+ u8 graphics_dpm_level_count;
+ u8 uvd_level_count;
+ u8 vce_level_count;
+ u8 acp_level_count;
+ u8 samu_level_count;
+ u16 fps_high_t;
+ SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_ACPILevel acpi_level;
+ SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU];
+ u8 uvd_boot_level;
+ u8 vce_boot_level;
+ u8 acp_boot_level;
+ u8 samu_boot_level;
+ u8 uvd_interval;
+ u8 vce_interval;
+ u8 acp_interval;
+ u8 samu_interval;
+ u8 graphics_boot_level;
+ u8 graphics_interval;
+ u8 graphics_therm_throttle_enable;
+ u8 graphics_voltage_change_enable;
+ u8 graphics_clk_slow_enable;
+ u8 graphics_clk_slow_divider;
+ u8 fps_low_t;
+ u32 low_sclk_interrupt_t;
+ bool uvd_power_gated;
+ bool vce_power_gated;
+ bool acp_power_gated;
+ bool samu_power_gated;
+ bool nb_dpm_enabled;
+ /* flags */
+ bool enable_didt;
+ bool enable_dpm;
+ bool enable_auto_thermal_throttling;
+ bool enable_nb_dpm;
+ /* caps */
+ bool caps_cac;
+ bool caps_power_containment;
+ bool caps_sq_ramping;
+ bool caps_db_ramping;
+ bool caps_td_ramping;
+ bool caps_tcp_ramping;
+ bool caps_sclk_throttle_low_notification;
+ bool caps_fps;
+ bool caps_uvd_dpm;
+ bool caps_uvd_pg;
+ bool caps_vce_pg;
+ bool caps_samu_pg;
+ bool caps_acp_pg;
+ bool caps_stable_p_state;
+ bool caps_enable_dfs_bypass;
+ bool caps_sclk_ds;
+ struct amdgpu_ps current_rps;
+ struct kv_ps current_ps;
+ struct amdgpu_ps requested_rps;
+ struct kv_ps requested_ps;
+};
+
+/* XXX are these ok? */
+#define KV_TEMP_RANGE_MIN (90 * 1000)
+#define KV_TEMP_RANGE_MAX (120 * 1000)
+
+/* kv_smc.c */
+int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id);
+int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask);
+int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+ PPSMC_Msg msg, u32 parameter);
+int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 *value, u32 limit);
+int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable);
+int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable);
+int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_smc.c b/drivers/gpu/drm/amd/amdgpu/kv_smc.c
new file mode 100644
index 000000000000..e6b7b42acfe1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/kv_smc.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include "drmP.h"
+#include "amdgpu.h"
+#include "cikd.h"
+#include "kv_dpm.h"
+
+#include "smu/smu_7_0_0_d.h"
+#include "smu/smu_7_0_0_sh_mask.h"
+
+int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id)
+{
+ u32 i;
+ u32 tmp = 0;
+
+ WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK;
+
+ if (tmp != 1) {
+ if (tmp == 0xFF)
+ return -EINVAL;
+ else if (tmp == 0xFE)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask)
+{
+ int ret;
+
+ ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
+
+ if (ret == 0)
+ *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0);
+
+ return ret;
+}
+
+int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+ PPSMC_Msg msg, u32 parameter)
+{
+
+ WREG32(mmSMC_MSG_ARG_0, parameter);
+
+ return amdgpu_kv_notify_message_to_smu(adev, msg);
+}
+
+static int kv_set_smc_sram_address(struct amdgpu_device *adev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(mmSMC_IND_INDEX_0, smc_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0,
+ ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
+
+ return 0;
+}
+
+int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 *value, u32 limit)
+{
+ int ret;
+
+ ret = kv_set_smc_sram_address(adev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ *value = RREG32(mmSMC_IND_DATA_0);
+ return 0;
+}
+
+int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable)
+{
+ if (enable)
+ return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable);
+ else
+ return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable);
+}
+
+int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable)
+{
+ if (enable)
+ return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM);
+ else
+ return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM);
+}
+
+int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ int ret;
+ u32 data, original_data, addr, extra_shift, t_byte, count, mask;
+
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+ t_byte = addr & 3;
+
+ /* RMW for the initial bytes */
+ if (t_byte != 0) {
+ addr -= t_byte;
+
+ ret = kv_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data = RREG32(mmSMC_IND_DATA_0);
+
+ data = 0;
+ mask = 0;
+ count = 4;
+ while (count > 0) {
+ if (t_byte > 0) {
+ mask = (mask << 8) | 0xff;
+ t_byte--;
+ } else if (byte_count > 0) {
+ data = (data << 8) + *src++;
+ byte_count--;
+ mask <<= 8;
+ } else {
+ data <<= 8;
+ mask = (mask << 8) | 0xff;
+ }
+ count--;
+ }
+
+ data |= original_data & mask;
+
+ ret = kv_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(mmSMC_IND_DATA_0, data);
+
+ addr += 4;
+ }
+
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+ ret = kv_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(mmSMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = kv_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data = RREG32(mmSMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* SMC address space is BE */
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = kv_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(mmSMC_IND_DATA_0, data);
+ }
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/smu7.h b/drivers/gpu/drm/amd/amdgpu/smu7.h
new file mode 100644
index 000000000000..75a380a15292
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smu7.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_H
+#define SMU7_H
+
+#pragma pack(push, 1)
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+#define SMU7_MAX_LEVELS_VDDC 8
+#define SMU7_MAX_LEVELS_VDDCI 4
+#define SMU7_MAX_LEVELS_MVDD 4
+#define SMU7_MAX_LEVELS_VDDNB 8
+
+#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV
+#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM
+#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels
+#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes.
+#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD.
+#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE.
+#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP.
+#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU.
+#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table.
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
+
+#define GPIO_CLAMP_MODE_VRHOT 1
+#define GPIO_CLAMP_MODE_THERM 2
+#define GPIO_CLAMP_MODE_DC 4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+struct SMU7_PIDController
+{
+ uint32_t Ki;
+ int32_t LFWindupUL;
+ int32_t LFWindupLL;
+ uint32_t StatePrecision;
+ uint32_t LfPrecision;
+ uint32_t LfOffset;
+ uint32_t MaxState;
+ uint32_t MaxLfFraction;
+ uint32_t StateShift;
+};
+
+typedef struct SMU7_PIDController SMU7_PIDController;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
+#define SMU7_UVD_DPM_CONFIG_MASK 0x10
+#define SMU7_VCE_DPM_CONFIG_MASK 0x20
+#define SMU7_ACP_DPM_CONFIG_MASK 0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
+
+struct SMU7_Firmware_Header
+{
+ uint32_t Digest[5];
+ uint32_t Version;
+ uint32_t HeaderSize;
+ uint32_t Flags;
+ uint32_t EntryPoint;
+ uint32_t CodeSize;
+ uint32_t ImageSize;
+
+ uint32_t Rtos;
+ uint32_t SoftRegisters;
+ uint32_t DpmTable;
+ uint32_t FanTable;
+ uint32_t CacConfigTable;
+ uint32_t CacStatusTable;
+
+ uint32_t mcRegisterTable;
+
+ uint32_t mcArbDramTimingTable;
+
+ uint32_t PmFuseTable;
+ uint32_t Globals;
+ uint32_t Reserved[42];
+ uint32_t Signature;
+};
+
+typedef struct SMU7_Firmware_Header SMU7_Firmware_Header;
+
+#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum DisplayConfig {
+ PowerDown = 1,
+ DP54x4,
+ DP54x2,
+ DP54x1,
+ DP27x4,
+ DP27x2,
+ DP27x1,
+ HDMI297,
+ HDMI162,
+ LVDS,
+ DP324x4,
+ DP324x2,
+ DP324x1
+};
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/smu7_discrete.h b/drivers/gpu/drm/amd/amdgpu/smu7_discrete.h
new file mode 100644
index 000000000000..0b0b404ff091
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smu7_discrete.h
@@ -0,0 +1,514 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_DISCRETE_H
+#define SMU7_DISCRETE_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 3
+#define SMU7_DTE_SINKS 1
+#define SMU7_NUM_CPU_TES 0
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+struct SMU7_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerP;
+ uint32_t FeatureEnables;
+ uint32_t PreVBlankGap;
+ uint32_t VBlankTimeout;
+ uint32_t TrainTimeGap;
+
+ uint32_t MvddSwitchTime;
+ uint32_t LongestAcpiTrainTime;
+ uint32_t AcpiDelay;
+ uint32_t G5TrainTime;
+ uint32_t DelayMpllPwron;
+ uint32_t VoltageChangeTimeout;
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsA;
+ uint32_t AverageMemoryA;
+ uint32_t AverageGioA;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterC;
+ uint32_t UlvTime;
+ uint32_t Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Discrete_VoltageLevel
+{
+ uint16_t Voltage;
+ uint16_t StdVoltageHiSidd;
+ uint16_t StdVoltageLoSidd;
+ uint8_t Smio;
+ uint8_t padding;
+};
+
+typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
+
+struct SMU7_Discrete_GraphicsLevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+
+ uint32_t SclkFrequency;
+
+ uint8_t padding1[2];
+ uint16_t ActivityLevel;
+
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpH;
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t PowerThrottle;
+ uint8_t DeepSleepDivId;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
+
+struct SMU7_Discrete_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding;
+ uint32_t CgSpllFuncCntl;
+ uint32_t CgSpllFuncCntl2;
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+};
+
+typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
+
+struct SMU7_Discrete_Ulv
+{
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint16_t VddcOffset;
+ uint8_t VddcOffsetVid;
+ uint8_t VddcPhase;
+ uint32_t Reserved;
+};
+
+typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
+
+struct SMU7_Discrete_MemoryLevel
+{
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t MinVddci;
+ uint32_t MinMvdd;
+
+ uint32_t MclkFrequency;
+
+ uint8_t EdcReadEnable;
+ uint8_t EdcWriteEnable;
+ uint8_t RttEnable;
+ uint8_t StutterEnable;
+
+ uint8_t StrobeEnable;
+ uint8_t StrobeRatio;
+ uint8_t EnabledForThrottle;
+ uint8_t EnabledForActivity;
+
+ uint8_t UpH;
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t padding;
+
+ uint16_t ActivityLevel;
+ uint8_t DisplayWatermark;
+ uint8_t padding1;
+
+ uint32_t MpllFuncCntl;
+ uint32_t MpllFuncCntl_1;
+ uint32_t MpllFuncCntl_2;
+ uint32_t MpllAdFuncCntl;
+ uint32_t MpllDqFuncCntl;
+ uint32_t MclkPwrmgtCntl;
+ uint32_t DllCntl;
+ uint32_t MpllSs1;
+ uint32_t MpllSs2;
+};
+
+typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
+
+struct SMU7_Discrete_LinkLevel
+{
+ uint8_t PcieGenSpeed;
+ uint8_t PcieLaneCount;
+ uint8_t EnabledForActivity;
+ uint8_t Padding;
+ uint32_t DownT;
+ uint32_t UpT;
+ uint32_t Reserved;
+};
+
+typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
+
+
+struct SMU7_Discrete_MCArbDramTimingTableEntry
+{
+ uint32_t McArbDramTiming;
+ uint32_t McArbDramTiming2;
+ uint8_t McArbBurstTime;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU7_Discrete_MCArbDramTimingTable
+{
+ SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
+
+struct SMU7_Discrete_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddc;
+ uint8_t MinVddcPhases;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
+
+struct SMU7_Discrete_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t MinPhases;
+ uint8_t Divider;
+};
+
+typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
+
+struct SMU7_Discrete_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t MclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint16_t MvddVoltage;
+ uint16_t padding16;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ uint8_t McRegIndex;
+ uint8_t SeqIndex;
+ uint8_t SclkDid;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+ uint8_t PCIeGen;
+
+};
+
+typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
+
+
+struct SMU7_Discrete_DpmTable
+{
+ SMU7_PIDController GraphicsPIDController;
+ SMU7_PIDController MemoryPIDController;
+ SMU7_PIDController LinkPIDController;
+
+ uint32_t SystemFlags;
+
+
+ uint32_t SmioMaskVddcVid;
+ uint32_t SmioMaskVddcPhase;
+ uint32_t SmioMaskVddciVid;
+ uint32_t SmioMaskMvddVid;
+
+ uint32_t VddcLevelCount;
+ uint32_t VddciLevelCount;
+ uint32_t MvddLevelCount;
+
+ SMU7_Discrete_VoltageLevel VddcLevel [SMU7_MAX_LEVELS_VDDC];
+// SMU7_Discrete_VoltageLevel VddcStandardReference [SMU7_MAX_LEVELS_VDDC];
+ SMU7_Discrete_VoltageLevel VddciLevel [SMU7_MAX_LEVELS_VDDCI];
+ SMU7_Discrete_VoltageLevel MvddLevel [SMU7_MAX_LEVELS_MVDD];
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t MemoryDpmLevelCount;
+ uint8_t LinkLevelCount;
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+ uint8_t MasterDeepSleepControl;
+ uint32_t Reserved[5];
+// uint32_t SamuDefaultLevel;
+
+ SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS];
+ SMU7_Discrete_MemoryLevel MemoryACPILevel;
+ SMU7_Discrete_MemoryLevel MemoryLevel [SMU7_MAX_LEVELS_MEMORY];
+ SMU7_Discrete_LinkLevel LinkLevel [SMU7_MAX_LEVELS_LINK];
+ SMU7_Discrete_ACPILevel ACPILevel;
+ SMU7_Discrete_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
+ SMU7_Discrete_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
+ SMU7_Discrete_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
+ SMU7_Discrete_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+ SMU7_Discrete_Ulv Ulv;
+
+ uint32_t SclkStepSize;
+ uint32_t Smio [SMU7_MAX_ENTRIES_SMIO];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+
+ uint8_t UVDInterval;
+ uint8_t VCEInterval;
+ uint8_t ACPInterval;
+ uint8_t SAMUInterval;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsVoltageChangeEnable;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsInterval;
+
+ uint8_t VoltageInterval;
+ uint8_t ThermalInterval;
+ uint16_t TemperatureLimitHigh;
+
+ uint16_t TemperatureLimitLow;
+ uint8_t MemoryBootLevel;
+ uint8_t MemoryVoltageChangeEnable;
+
+ uint8_t MemoryInterval;
+ uint8_t MemoryThermThrottleEnable;
+ uint16_t VddcVddciDelta;
+
+ uint16_t VoltageResponseTime;
+ uint16_t PhaseResponseTime;
+
+ uint8_t PCIeBootLinkLevel;
+ uint8_t PCIeGenInterval;
+ uint8_t DTEInterval;
+ uint8_t DTEMode;
+
+ uint8_t SVI2Enable;
+ uint8_t VRHotGpio;
+ uint8_t AcDcGpio;
+ uint8_t ThermGpio;
+
+ uint16_t PPM_PkgPwrLimit;
+ uint16_t PPM_TemperatureLimit;
+
+ uint16_t DefaultTdp;
+ uint16_t TargetTdp;
+
+ uint16_t FpsHighT;
+ uint16_t FpsLowT;
+
+ uint16_t BAPMTI_R [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+ uint16_t BAPMTI_RC [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+
+ uint8_t DTEAmbientTempBase;
+ uint8_t DTETjOffset;
+ uint8_t GpuTjMax;
+ uint8_t GpuTjHyst;
+
+ uint16_t BootVddc;
+ uint16_t BootVddci;
+
+ uint16_t BootMVdd;
+ uint16_t padding;
+
+ uint32_t BAPM_TEMP_GRADIENT;
+
+ uint32_t LowSclkInterruptT;
+};
+
+typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
+
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
+
+struct SMU7_Discrete_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
+
+struct SMU7_Discrete_MCRegisterSet
+{
+ uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
+
+struct SMU7_Discrete_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+ SMU7_Discrete_MCRegisterSet data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
+
+struct SMU7_Discrete_FanTable
+{
+ uint16_t FdoMode;
+ int16_t TempMin;
+ int16_t TempMed;
+ int16_t TempMax;
+ int16_t Slope1;
+ int16_t Slope2;
+ int16_t FdoMin;
+ int16_t HystUp;
+ int16_t HystDown;
+ int16_t HystSlope;
+ int16_t TempRespLim;
+ int16_t TempCurr;
+ int16_t SlopeCurr;
+ int16_t PwmCurr;
+ uint32_t RefreshPeriod;
+ int16_t FdoMax;
+ uint8_t TempSrc;
+ int8_t Padding;
+};
+
+typedef struct SMU7_Discrete_FanTable SMU7_Discrete_FanTable;
+
+
+struct SMU7_Discrete_PmFuses {
+ // dw0-dw1
+ uint8_t BapmVddCVidHiSidd[8];
+
+ // dw2-dw3
+ uint8_t BapmVddCVidLoSidd[8];
+
+ // dw4-dw5
+ uint8_t VddCVid[8];
+
+ // dw6
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t SviLoadLineTrimVddC;
+ uint8_t SviLoadLineOffsetVddC;
+
+ // dw7
+ uint16_t TDC_VDDC_PkgLimit;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+
+ // dw8
+ uint8_t TdcWaterfallCtl;
+ uint8_t LPMLTemperatureMin;
+ uint8_t LPMLTemperatureMax;
+ uint8_t Reserved;
+
+ // dw9-dw10
+ uint8_t BapmVddCVidHiSidd2[8];
+
+ // dw11-dw12
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t CalcMeasPowerBlend;
+
+ // dw13-dw16
+ uint8_t GnbLPML[16];
+
+ // dw17
+ uint8_t GnbLPMLMaxVid;
+ uint8_t GnbLPMLMinVid;
+ uint8_t Reserved1[2];
+
+ // dw18
+ uint16_t BapmVddCBaseLeakageHiSidd;
+ uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU7_Discrete_PmFuses SMU7_Discrete_PmFuses;
+
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/smu7_fusion.h b/drivers/gpu/drm/amd/amdgpu/smu7_fusion.h
new file mode 100644
index 000000000000..78ada9ffd508
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smu7_fusion.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_FUSION_H
+#define SMU7_FUSION_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+// All 'soft registers' should be uint32_t.
+struct SMU7_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerP;
+ uint32_t FeatureEnables;
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsA;
+ uint32_t AverageMemoryA;
+ uint32_t AverageGioA;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterC;
+ uint32_t UlvTime;
+ uint32_t Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Fusion_GraphicsLevel
+{
+ uint32_t MinVddNb;
+
+ uint32_t SclkFrequency;
+
+ uint8_t Vid;
+ uint8_t VidOffset;
+ uint16_t AT;
+
+ uint8_t PowerThrottle;
+ uint8_t GnbSlow;
+ uint8_t ForceNbPs1;
+ uint8_t SclkDid;
+
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpH;
+
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t DeepSleepDivId;
+
+ uint8_t ClkBypassCntl;
+
+ uint32_t reserved;
+};
+
+typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
+
+struct SMU7_Fusion_GIOLevel
+{
+ uint8_t EnabledForActivity;
+ uint8_t LclkDid;
+ uint8_t Vid;
+ uint8_t VoltageDownH;
+
+ uint32_t MinVddNb;
+
+ uint16_t ResidencyCounter;
+ uint8_t UpH;
+ uint8_t DownH;
+
+ uint32_t LclkFrequency;
+
+ uint8_t ActivityLevel;
+ uint8_t EnabledForThrottle;
+
+ uint8_t ClkBypassCntl;
+
+ uint8_t padding;
+};
+
+typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU7_Fusion_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddNb;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+
+ uint8_t VClkBypassCntl;
+ uint8_t DClkBypassCntl;
+
+ uint8_t padding[2];
+
+};
+
+typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU7_Fusion_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t Divider;
+ uint8_t ClkBypassCntl;
+
+ uint32_t Reserved;
+};
+typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
+
+struct SMU7_Fusion_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddNb;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t GnbSlow;
+ uint8_t ForceNbPs1;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
+
+struct SMU7_Fusion_NbDpm
+{
+ uint8_t DpmXNbPsHi;
+ uint8_t DpmXNbPsLo;
+ uint8_t Dpm0PgNbPsHi;
+ uint8_t Dpm0PgNbPsLo;
+ uint8_t EnablePsi1;
+ uint8_t SkipDPM0;
+ uint8_t SkipPG;
+ uint8_t Hysteresis;
+ uint8_t EnableDpmPstatePoll;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
+
+struct SMU7_Fusion_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t LclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+};
+
+typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
+
+struct SMU7_Fusion_DpmTable
+{
+ uint32_t SystemFlags;
+
+ SMU7_PIDController GraphicsPIDController;
+ SMU7_PIDController GioPIDController;
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t GIOLevelCount;
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+ uint16_t FpsHighT;
+
+ SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_ACPILevel ACPILevel;
+ SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+ uint8_t UVDInterval;
+ uint8_t VCEInterval;
+ uint8_t ACPInterval;
+ uint8_t SAMUInterval;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsInterval;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsVoltageChangeEnable;
+
+ uint8_t GraphicsClkSlowEnable;
+ uint8_t GraphicsClkSlowDivider;
+ uint16_t FpsLowT;
+
+ uint32_t DisplayCac;
+ uint32_t LowSclkInterruptT;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+
+};
+
+struct SMU7_Fusion_GIODpmTable
+{
+
+ SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO];
+
+ SMU7_PIDController GioPIDController;
+
+ uint32_t GIOLevelCount;
+
+ uint8_t Enable;
+ uint8_t GIOVoltageChangeEnable;
+ uint8_t GIOBootLevel;
+ uint8_t padding;
+ uint8_t padding1[2];
+ uint8_t TargetState;
+ uint8_t CurrenttState;
+ uint8_t ThrottleOnHtc;
+ uint8_t ThermThrottleStatus;
+ uint8_t ThermThrottleTempSelect;
+ uint8_t ThermThrottleEnable;
+ uint16_t TemperatureLimitHigh;
+ uint16_t TemperatureLimitLow;
+
+};
+
+typedef struct SMU7_Fusion_DpmTable SMU7_Fusion_DpmTable;
+typedef struct SMU7_Fusion_GIODpmTable SMU7_Fusion_GIODpmTable;
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
new file mode 100644
index 000000000000..6e4cb604f928
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -0,0 +1,888 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_uvd.h"
+#include "cikd.h"
+
+#include "uvd/uvd_4_2_d.h"
+#include "uvd/uvd_4_2_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
+static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
+static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
+static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
+static int uvd_v4_2_start(struct amdgpu_device *adev);
+static void uvd_v4_2_stop(struct amdgpu_device *adev);
+
+/**
+ * uvd_v4_2_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32(mmUVD_RBC_RB_RPTR);
+}
+
+/**
+ * uvd_v4_2_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32(mmUVD_RBC_RB_WPTR);
+}
+
+/**
+ * uvd_v4_2_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+}
+
+static int uvd_v4_2_early_init(struct amdgpu_device *adev)
+{
+ uvd_v4_2_set_ring_funcs(adev);
+ uvd_v4_2_set_irq_funcs(adev);
+
+ return 0;
+}
+
+static int uvd_v4_2_sw_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ /* UVD TRAP */
+ r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
+ ring = &adev->uvd.ring;
+ sprintf(ring->name, "uvd");
+ r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
+ &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+
+ return r;
+}
+
+static int uvd_v4_2_sw_fini(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_uvd_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_sw_fini(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+/**
+ * uvd_v4_2_hw_init - start and test UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int uvd_v4_2_hw_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &adev->uvd.ring;
+ uint32_t tmp;
+ int r;
+
+ /* raise clocks while booting up the VCPU */
+ amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+
+ r = uvd_v4_2_start(adev);
+ if (r)
+ goto done;
+
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+
+ r = amdgpu_ring_lock(ring, 10);
+ if (r) {
+ DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
+ goto done;
+ }
+
+ tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+ amdgpu_ring_write(ring, tmp);
+ amdgpu_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+ amdgpu_ring_write(ring, tmp);
+ amdgpu_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+ amdgpu_ring_write(ring, tmp);
+ amdgpu_ring_write(ring, 0xFFFFF);
+
+ /* Clear timeout status bits */
+ amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
+ amdgpu_ring_write(ring, 3);
+
+ amdgpu_ring_unlock_commit(ring);
+
+done:
+ /* lower clocks again */
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+
+ if (!r)
+ DRM_INFO("UVD initialized successfully.\n");
+
+ return r;
+}
+
+/**
+ * uvd_v4_2_hw_fini - stop the hardware block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the UVD block, mark ring as not ready any more
+ */
+static int uvd_v4_2_hw_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &adev->uvd.ring;
+
+ uvd_v4_2_stop(adev);
+ ring->ready = false;
+
+ return 0;
+}
+
+static int uvd_v4_2_suspend(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = uvd_v4_2_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_suspend(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int uvd_v4_2_resume(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
+ r = uvd_v4_2_hw_init(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+/**
+ * uvd_v4_2_start - start UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the UVD block
+ */
+static int uvd_v4_2_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &adev->uvd.ring;
+ uint32_t rb_bufsz;
+ int i, j, r;
+
+ /* disable byte swapping */
+ u32 lmi_swap_cntl = 0;
+ u32 mp_swap_cntl = 0;
+
+ uvd_v4_2_mc_resume(adev);
+
+ /* disable clock gating */
+ WREG32(mmUVD_CGC_GATE, 0);
+
+ /* disable interupt */
+ WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ mdelay(1);
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+ mdelay(5);
+
+ /* take UVD block out of reset */
+ WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+ mdelay(5);
+
+ /* initialize UVD memory controller */
+ WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+ (1 << 21) | (1 << 9) | (1 << 20));
+
+#ifdef __BIG_ENDIAN
+ /* swap (8 in 32) RB and IB */
+ lmi_swap_cntl = 0xa;
+ mp_swap_cntl = 0;
+#endif
+ WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+ WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+ WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
+ WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
+ WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
+ WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
+ WREG32(mmUVD_MPC_SET_ALU, 0);
+ WREG32(mmUVD_MPC_SET_MUX, 0x88);
+
+ /* take all subblocks out of reset, except VCPU */
+ WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ mdelay(5);
+
+ /* enable VCPU clock */
+ WREG32(mmUVD_VCPU_CNTL, 1 << 9);
+
+ /* enable UMC */
+ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ /* boot up the VCPU */
+ WREG32(mmUVD_SOFT_RESET, 0);
+ mdelay(10);
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(mmUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+ ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ mdelay(10);
+ r = -1;
+ }
+
+ if (r) {
+ DRM_ERROR("UVD not responding, giving up!!!\n");
+ return r;
+ }
+
+ /* enable interupt */
+ WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+ /* force RBC into idle state */
+ WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Set the write pointer delay */
+ WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* programm the 4GB memory segment for rptr and ring buffer */
+ WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
+ (0x7 << 16) | (0x1 << 31));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(mmUVD_RBC_RB_RPTR, 0x0);
+
+ ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
+ WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+
+ /* set the ring address */
+ WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
+
+ /* Set ring buffer size */
+ rb_bufsz = order_base_2(ring->ring_size);
+ rb_bufsz = (0x1 << 8) | rb_bufsz;
+ WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
+
+ return 0;
+}
+
+/**
+ * uvd_v4_2_stop - stop UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the UVD block
+ */
+static void uvd_v4_2_stop(struct amdgpu_device *adev)
+{
+ /* force RBC into idle state */
+ WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ mdelay(1);
+
+ /* put VCPU into reset */
+ WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ mdelay(5);
+
+ /* disable VCPU clock */
+ WREG32(mmUVD_VCPU_CNTL, 0x0);
+
+ /* Unstall UMC and register bus */
+ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+}
+
+/**
+ * uvd_v4_2_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ bool write64bit)
+{
+ WARN_ON(write64bit);
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, addr & 0xffffffff);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 2);
+}
+
+/**
+ * uvd_v4_2_ring_emit_semaphore - emit semaphore command
+ *
+ * @ring: amdgpu_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+static bool uvd_v4_2_ring_emit_semaphore(struct amdgpu_ring *ring,
+ struct amdgpu_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
+ amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
+ amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
+ amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+
+ return true;
+}
+
+/**
+ * uvd_v4_2_ring_test_ring - register write test
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully write to the context register
+ */
+static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
+ r = amdgpu_ring_lock(ring, 3);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_unlock_commit(ring);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(mmUVD_CONTEXT_ID);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * uvd_v4_2_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer
+ */
+static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib)
+{
+ amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
+ amdgpu_ring_write(ring, ib->gpu_addr);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
+ amdgpu_ring_write(ring, ib->length_dw);
+}
+
+/**
+ * uvd_v4_2_ring_test_ib - test ib execution
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_fence *fence = NULL;
+ int r;
+
+ r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
+ return r;
+ }
+
+ r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
+ goto error;
+ }
+
+ r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
+ goto error;
+ }
+
+ r = amdgpu_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ goto error;
+ }
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+error:
+ amdgpu_fence_unref(&fence);
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ return r;
+}
+
+/**
+ * uvd_v4_2_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
+{
+ uint64_t addr;
+ uint32_t size;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3;
+ WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = AMDGPU_UVD_STACK_SIZE >> 3;
+ WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = AMDGPU_UVD_HEAP_SIZE >> 3;
+ WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (adev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (adev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ uvd_v4_2_init_cg(adev);
+}
+
+static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data = 0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ } else {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data &= ~0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ }
+}
+
+static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
+ bool sw_mode)
+{
+ u32 tmp, tmp2;
+
+ tmp = RREG32(mmUVD_CGC_CTRL);
+ tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+ tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
+ (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
+ (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
+
+ if (sw_mode) {
+ tmp &= ~0x7ffff800;
+ tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
+ UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
+ (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
+ } else {
+ tmp |= 0x7ffff800;
+ tmp2 = 0;
+ }
+
+ WREG32(mmUVD_CGC_CTRL, tmp);
+ WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
+}
+
+static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
+{
+ bool hw_mode = true;
+
+ if (hw_mode) {
+ uvd_v4_2_set_dcm(adev, false);
+ } else {
+ u32 tmp = RREG32(mmUVD_CGC_CTRL);
+ tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ WREG32(mmUVD_CGC_CTRL, tmp);
+ }
+}
+
+static bool uvd_v4_2_is_idle(struct amdgpu_device *adev)
+{
+ return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
+}
+
+static int uvd_v4_2_wait_for_idle(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
+ return 0;
+ }
+ return -ETIMEDOUT;
+}
+
+static int uvd_v4_2_soft_reset(struct amdgpu_device *adev)
+{
+ uvd_v4_2_stop(adev);
+
+ WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
+ ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+ mdelay(5);
+
+ return uvd_v4_2_start(adev);
+}
+
+static void uvd_v4_2_print_status(struct amdgpu_device *adev)
+{
+ dev_info(adev->dev, "UVD 4.2 registers\n");
+ dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n",
+ RREG32(mmUVD_SEMA_ADDR_LOW));
+ dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n",
+ RREG32(mmUVD_SEMA_ADDR_HIGH));
+ dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n",
+ RREG32(mmUVD_SEMA_CMD));
+ dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n",
+ RREG32(mmUVD_GPCOM_VCPU_CMD));
+ dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n",
+ RREG32(mmUVD_GPCOM_VCPU_DATA0));
+ dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n",
+ RREG32(mmUVD_GPCOM_VCPU_DATA1));
+ dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n",
+ RREG32(mmUVD_ENGINE_CNTL));
+ dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n",
+ RREG32(mmUVD_SEMA_CNTL));
+ dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n",
+ RREG32(mmUVD_LMI_EXT40_ADDR));
+ dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n",
+ RREG32(mmUVD_CTX_INDEX));
+ dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n",
+ RREG32(mmUVD_CTX_DATA));
+ dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n",
+ RREG32(mmUVD_CGC_GATE));
+ dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n",
+ RREG32(mmUVD_CGC_CTRL));
+ dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n",
+ RREG32(mmUVD_LMI_CTRL2));
+ dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n",
+ RREG32(mmUVD_MASTINT_EN));
+ dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n",
+ RREG32(mmUVD_LMI_ADDR_EXT));
+ dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n",
+ RREG32(mmUVD_LMI_CTRL));
+ dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n",
+ RREG32(mmUVD_LMI_SWAP_CNTL));
+ dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n",
+ RREG32(mmUVD_MP_SWAP_CNTL));
+ dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n",
+ RREG32(mmUVD_MPC_SET_MUXA0));
+ dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n",
+ RREG32(mmUVD_MPC_SET_MUXA1));
+ dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n",
+ RREG32(mmUVD_MPC_SET_MUXB0));
+ dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n",
+ RREG32(mmUVD_MPC_SET_MUXB1));
+ dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n",
+ RREG32(mmUVD_MPC_SET_MUX));
+ dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n",
+ RREG32(mmUVD_MPC_SET_ALU));
+ dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
+ RREG32(mmUVD_VCPU_CACHE_OFFSET0));
+ dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n",
+ RREG32(mmUVD_VCPU_CACHE_SIZE0));
+ dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
+ RREG32(mmUVD_VCPU_CACHE_OFFSET1));
+ dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n",
+ RREG32(mmUVD_VCPU_CACHE_SIZE1));
+ dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
+ RREG32(mmUVD_VCPU_CACHE_OFFSET2));
+ dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n",
+ RREG32(mmUVD_VCPU_CACHE_SIZE2));
+ dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n",
+ RREG32(mmUVD_VCPU_CNTL));
+ dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n",
+ RREG32(mmUVD_SOFT_RESET));
+ dev_info(adev->dev, " UVD_RBC_IB_BASE=0x%08X\n",
+ RREG32(mmUVD_RBC_IB_BASE));
+ dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n",
+ RREG32(mmUVD_RBC_IB_SIZE));
+ dev_info(adev->dev, " UVD_RBC_RB_BASE=0x%08X\n",
+ RREG32(mmUVD_RBC_RB_BASE));
+ dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n",
+ RREG32(mmUVD_RBC_RB_RPTR));
+ dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n",
+ RREG32(mmUVD_RBC_RB_WPTR));
+ dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
+ RREG32(mmUVD_RBC_RB_WPTR_CNTL));
+ dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n",
+ RREG32(mmUVD_RBC_RB_CNTL));
+ dev_info(adev->dev, " UVD_STATUS=0x%08X\n",
+ RREG32(mmUVD_STATUS));
+ dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
+ RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
+ dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
+ RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
+ dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
+ RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
+ dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
+ RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
+ dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
+ RREG32(mmUVD_CONTEXT_ID));
+}
+
+static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ // TODO
+ return 0;
+}
+
+static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("IH: UVD TRAP\n");
+ amdgpu_fence_process(&adev->uvd.ring);
+ return 0;
+}
+
+static int uvd_v4_2_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ bool gate = false;
+
+ if (state == AMDGPU_CG_STATE_GATE)
+ gate = true;
+
+ uvd_v4_2_enable_mgcg(adev, gate);
+
+ return 0;
+}
+
+static int uvd_v4_2_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ /* This doesn't actually powergate the UVD block.
+ * That's done in the dpm code via the SMC. This
+ * just re-inits the block as necessary. The actual
+ * gating still happens in the dpm code. We should
+ * revisit this when there is a cleaner line between
+ * the smc and the hw blocks
+ */
+ if (state == AMDGPU_PG_STATE_GATE) {
+ uvd_v4_2_stop(adev);
+ return 0;
+ } else {
+ return uvd_v4_2_start(adev);
+ }
+}
+
+const struct amdgpu_ip_funcs uvd_v4_2_ip_funcs = {
+ .early_init = uvd_v4_2_early_init,
+ .late_init = NULL,
+ .sw_init = uvd_v4_2_sw_init,
+ .sw_fini = uvd_v4_2_sw_fini,
+ .hw_init = uvd_v4_2_hw_init,
+ .hw_fini = uvd_v4_2_hw_fini,
+ .suspend = uvd_v4_2_suspend,
+ .resume = uvd_v4_2_resume,
+ .is_idle = uvd_v4_2_is_idle,
+ .wait_for_idle = uvd_v4_2_wait_for_idle,
+ .soft_reset = uvd_v4_2_soft_reset,
+ .print_status = uvd_v4_2_print_status,
+ .set_clockgating_state = uvd_v4_2_set_clockgating_state,
+ .set_powergating_state = uvd_v4_2_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+ .get_rptr = uvd_v4_2_ring_get_rptr,
+ .get_wptr = uvd_v4_2_ring_get_wptr,
+ .set_wptr = uvd_v4_2_ring_set_wptr,
+ .parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_ib = uvd_v4_2_ring_emit_ib,
+ .emit_fence = uvd_v4_2_ring_emit_fence,
+ .emit_semaphore = uvd_v4_2_ring_emit_semaphore,
+ .test_ring = uvd_v4_2_ring_test_ring,
+ .test_ib = uvd_v4_2_ring_test_ib,
+ .is_lockup = amdgpu_ring_test_lockup,
+};
+
+static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
+ .set = uvd_v4_2_set_interrupt_state,
+ .process = uvd_v4_2_process_interrupt,
+};
+
+static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->uvd.irq.num_types = 1;
+ adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
new file mode 100644
index 000000000000..323a6d828dfe
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __UVD_V4_2_H__
+#define __UVD_V4_2_H__
+
+extern const struct amdgpu_ip_funcs uvd_v4_2_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
new file mode 100644
index 000000000000..b47c16da6bf8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -0,0 +1,642 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_vce.h"
+#include "cikd.h"
+
+#include "vce/vce_2_0_d.h"
+#include "vce/vce_2_0_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
+static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
+static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
+
+/**
+ * vce_v2_0_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->vce.ring[0])
+ return RREG32(mmVCE_RB_RPTR);
+ else
+ return RREG32(mmVCE_RB_RPTR2);
+}
+
+/**
+ * vce_v2_0_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->vce.ring[0])
+ return RREG32(mmVCE_RB_WPTR);
+ else
+ return RREG32(mmVCE_RB_WPTR2);
+}
+
+/**
+ * vce_v2_0_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->vce.ring[0])
+ WREG32(mmVCE_RB_WPTR, ring->wptr);
+ else
+ WREG32(mmVCE_RB_WPTR2, ring->wptr);
+}
+
+/**
+ * vce_v2_0_start - start VCE block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the VCE block
+ */
+static int vce_v2_0_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i, j, r;
+
+ vce_v2_0_mc_resume(adev);
+
+ /* set BUSY flag */
+ WREG32_P(mmVCE_STATUS, 1, ~1);
+
+ ring = &adev->vce.ring[0];
+ WREG32(mmVCE_RB_RPTR, ring->wptr);
+ WREG32(mmVCE_RB_WPTR, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[1];
+ WREG32(mmVCE_RB_RPTR2, ring->wptr);
+ WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+ WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+ mdelay(100);
+
+ WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(mmVCE_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+ WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ r = -1;
+ }
+
+ /* clear BUSY flag */
+ WREG32_P(mmVCE_STATUS, 0, ~1);
+
+ if (r) {
+ DRM_ERROR("VCE not responding, giving up!!!\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static int vce_v2_0_early_init(struct amdgpu_device *adev)
+{
+ vce_v2_0_set_ring_funcs(adev);
+ vce_v2_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+static int vce_v2_0_sw_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ /* VCE */
+ r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+
+ ring = &adev->vce.ring[0];
+ sprintf(ring->name, "vce0");
+ r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+ &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ if (r)
+ return r;
+
+ ring = &adev->vce.ring[1];
+ sprintf(ring->name, "vce1");
+ r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+ &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int vce_v2_0_sw_fini(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_vce_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_sw_fini(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int vce_v2_0_hw_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ r = vce_v2_0_start(adev);
+ if (r)
+ return r;
+
+ ring = &adev->vce.ring[0];
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ ring = &adev->vce.ring[1];
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ DRM_INFO("VCE initialized successfully.\n");
+
+ return 0;
+}
+
+static int vce_v2_0_hw_fini(struct amdgpu_device *adev)
+{
+ // TODO
+ return 0;
+}
+
+static int vce_v2_0_suspend(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = vce_v2_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_suspend(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int vce_v2_0_resume(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+
+ r = vce_v2_0_hw_init(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
+{
+ u32 tmp;
+
+ if (gated) {
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0xe70000;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp |= 0xff000000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3fc;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+
+ WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+ } else {
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0xe7;
+ tmp &= ~0xe70000;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp |= 0x1fe000;
+ tmp &= ~0xff000000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp |= 0x3fc;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ }
+}
+
+static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
+{
+ u32 orig, tmp;
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp &= ~0x00060006;
+ if (gated) {
+ tmp |= 0xe10000;
+ } else {
+ tmp |= 0xe1;
+ tmp &= ~0xe10000;
+ }
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+ orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0x1fe000;
+ tmp &= ~0xff000000;
+ if (tmp != orig)
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3fc;
+ if (tmp != orig)
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+
+ if (gated)
+ WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+}
+
+static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
+{
+ WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
+}
+
+static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+ bool sw_cg = false;
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) {
+ if (sw_cg)
+ vce_v2_0_set_sw_cg(adev, true);
+ else
+ vce_v2_0_set_dyn_cg(adev, true);
+ } else {
+ vce_v2_0_disable_cg(adev);
+
+ if (sw_cg)
+ vce_v2_0_set_sw_cg(adev, false);
+ else
+ vce_v2_0_set_dyn_cg(adev, false);
+ }
+}
+
+static void vce_v2_0_init_cg(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp &= ~0xfff;
+ tmp |= ((0 << 0) | (4 << 4));
+ tmp |= 0x40000;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0xfff;
+ tmp |= ((0 << 0) | (4 << 4));
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0x10;
+ tmp &= ~0x100000;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+}
+
+static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
+{
+ uint64_t addr = adev->vce.gpu_addr;
+ uint32_t size;
+
+ WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
+ WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+ WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+ WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+
+ WREG32(mmVCE_LMI_CTRL, 0x00398000);
+ WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+ WREG32(mmVCE_LMI_SWAP_CNTL, 0);
+ WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
+ WREG32(mmVCE_LMI_VM_CTRL, 0);
+
+ addr += AMDGPU_VCE_FIRMWARE_OFFSET;
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vce.fw->size);
+ WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = AMDGPU_VCE_STACK_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = AMDGPU_VCE_HEAP_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+
+ WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
+ ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+
+ vce_v2_0_init_cg(adev);
+}
+
+static bool vce_v2_0_is_idle(struct amdgpu_device *adev)
+{
+ return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+}
+
+static int vce_v2_0_wait_for_idle(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+ return 0;
+ }
+ return -ETIMEDOUT;
+}
+
+static int vce_v2_0_soft_reset(struct amdgpu_device *adev)
+{
+ WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
+ ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+ mdelay(5);
+
+ return vce_v2_0_start(adev);
+}
+
+static void vce_v2_0_print_status(struct amdgpu_device *adev)
+{
+ dev_info(adev->dev, "VCE 2.0 registers\n");
+ dev_info(adev->dev, " VCE_STATUS=0x%08X\n",
+ RREG32(mmVCE_STATUS));
+ dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n",
+ RREG32(mmVCE_VCPU_CNTL));
+ dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
+ RREG32(mmVCE_VCPU_CACHE_OFFSET0));
+ dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
+ RREG32(mmVCE_VCPU_CACHE_SIZE0));
+ dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
+ RREG32(mmVCE_VCPU_CACHE_OFFSET1));
+ dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
+ RREG32(mmVCE_VCPU_CACHE_SIZE1));
+ dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
+ RREG32(mmVCE_VCPU_CACHE_OFFSET2));
+ dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
+ RREG32(mmVCE_VCPU_CACHE_SIZE2));
+ dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n",
+ RREG32(mmVCE_SOFT_RESET));
+ dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n",
+ RREG32(mmVCE_RB_BASE_LO2));
+ dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n",
+ RREG32(mmVCE_RB_BASE_HI2));
+ dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n",
+ RREG32(mmVCE_RB_SIZE2));
+ dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n",
+ RREG32(mmVCE_RB_RPTR2));
+ dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n",
+ RREG32(mmVCE_RB_WPTR2));
+ dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n",
+ RREG32(mmVCE_RB_BASE_LO));
+ dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n",
+ RREG32(mmVCE_RB_BASE_HI));
+ dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n",
+ RREG32(mmVCE_RB_SIZE));
+ dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n",
+ RREG32(mmVCE_RB_RPTR));
+ dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n",
+ RREG32(mmVCE_RB_WPTR));
+ dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n",
+ RREG32(mmVCE_CLOCK_GATING_A));
+ dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n",
+ RREG32(mmVCE_CLOCK_GATING_B));
+ dev_info(adev->dev, " VCE_CGTT_CLK_OVERRIDE=0x%08X\n",
+ RREG32(mmVCE_CGTT_CLK_OVERRIDE));
+ dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n",
+ RREG32(mmVCE_UENC_CLOCK_GATING));
+ dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
+ RREG32(mmVCE_UENC_REG_CLOCK_GATING));
+ dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n",
+ RREG32(mmVCE_SYS_INT_EN));
+ dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n",
+ RREG32(mmVCE_LMI_CTRL2));
+ dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n",
+ RREG32(mmVCE_LMI_CTRL));
+ dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n",
+ RREG32(mmVCE_LMI_VM_CTRL));
+ dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n",
+ RREG32(mmVCE_LMI_SWAP_CNTL));
+ dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
+ RREG32(mmVCE_LMI_SWAP_CNTL1));
+ dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n",
+ RREG32(mmVCE_LMI_CACHE_CTRL));
+}
+
+static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+
+ WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ return 0;
+}
+
+static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("IH: VCE\n");
+ switch (entry->src_data) {
+ case 0:
+ amdgpu_fence_process(&adev->vce.ring[0]);
+ break;
+ case 1:
+ amdgpu_fence_process(&adev->vce.ring[1]);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data);
+ break;
+ }
+
+ return 0;
+}
+
+static int vce_v2_0_set_clockgating_state(struct amdgpu_device *adev,
+ enum amdgpu_clockgating_state state)
+{
+ bool gate = false;
+
+ if (state == AMDGPU_CG_STATE_GATE)
+ gate = true;
+
+ vce_v2_0_enable_mgcg(adev, gate);
+
+ return 0;
+}
+
+static int vce_v2_0_set_powergating_state(struct amdgpu_device *adev,
+ enum amdgpu_powergating_state state)
+{
+ /* This doesn't actually powergate the VCE block.
+ * That's done in the dpm code via the SMC. This
+ * just re-inits the block as necessary. The actual
+ * gating still happens in the dpm code. We should
+ * revisit this when there is a cleaner line between
+ * the smc and the hw blocks
+ */
+ if (state == AMDGPU_PG_STATE_GATE)
+ /* XXX do we need a vce_v2_0_stop()? */
+ return 0;
+ else
+ return vce_v2_0_start(adev);
+}
+
+const struct amdgpu_ip_funcs vce_v2_0_ip_funcs = {
+ .early_init = vce_v2_0_early_init,
+ .late_init = NULL,
+ .sw_init = vce_v2_0_sw_init,
+ .sw_fini = vce_v2_0_sw_fini,
+ .hw_init = vce_v2_0_hw_init,
+ .hw_fini = vce_v2_0_hw_fini,
+ .suspend = vce_v2_0_suspend,
+ .resume = vce_v2_0_resume,
+ .is_idle = vce_v2_0_is_idle,
+ .wait_for_idle = vce_v2_0_wait_for_idle,
+ .soft_reset = vce_v2_0_soft_reset,
+ .print_status = vce_v2_0_print_status,
+ .set_clockgating_state = vce_v2_0_set_clockgating_state,
+ .set_powergating_state = vce_v2_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
+ .get_rptr = vce_v2_0_ring_get_rptr,
+ .get_wptr = vce_v2_0_ring_get_wptr,
+ .set_wptr = vce_v2_0_ring_set_wptr,
+ .parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_ib = amdgpu_vce_ring_emit_ib,
+ .emit_fence = amdgpu_vce_ring_emit_fence,
+ .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
+ .test_ring = amdgpu_vce_ring_test_ring,
+ .test_ib = amdgpu_vce_ring_test_ib,
+ .is_lockup = amdgpu_ring_test_lockup,
+};
+
+static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
+ adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
+ .set = vce_v2_0_set_interrupt_state,
+ .process = vce_v2_0_process_interrupt,
+};
+
+static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->vce.irq.num_types = 1;
+ adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
new file mode 100644
index 000000000000..8eb1cf227ea6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCE_V2_0_H__
+#define __VCE_V2_0_H__
+
+extern const struct amdgpu_ip_funcs vce_v2_0_ip_funcs;
+
+#endif