summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2024-04-22 14:28:10 +1000
committerDave Airlie <airlied@redhat.com>2024-04-22 14:28:10 +1000
commitf1c7bbe33c7aa31fea3ff30136642dc1599ff886 (patch)
tree821f6aebea8c14d2c32842fe96ad1fae4d47a1a3
parentae924acffb1beacaff8519bfa48b030abc59aebf (diff)
2024y-04m-22d-04h-27m-19s UTC: drm-tip rerere cache update
git version 2.44.0
-rw-r--r--rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/postimage2781
-rw-r--r--rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/preimage2784
-rw-r--r--rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/preimage.12784
-rw-r--r--rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/postimage1844
-rw-r--r--rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/preimage1847
-rw-r--r--rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/preimage.11847
-rw-r--r--rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/postimage232
-rw-r--r--rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/preimage236
-rw-r--r--rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/preimage.1236
9 files changed, 14591 insertions, 0 deletions
diff --git a/rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/postimage b/rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/postimage
new file mode 100644
index 000000000000..0e31bdb4b7cb
--- /dev/null
+++ b/rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/postimage
@@ -0,0 +1,2781 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_discovery.h"
+#include "soc15_hw_ip.h"
+#include "discovery.h"
+#include "amdgpu_ras.h"
+
+#include "soc15.h"
+#include "gfx_v9_0.h"
+#include "gfx_v9_4_3.h"
+#include "gmc_v9_0.h"
+#include "df_v1_7.h"
+#include "df_v3_6.h"
+#include "df_v4_3.h"
+#include "df_v4_6_2.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
+#include "nbio_v7_9.h"
+#include "nbio_v7_11.h"
+#include "hdp_v4_0.h"
+#include "vega10_ih.h"
+#include "vega20_ih.h"
+#include "sdma_v4_0.h"
+#include "sdma_v4_4_2.h"
+#include "uvd_v7_0.h"
+#include "vce_v4_0.h"
+#include "vcn_v1_0.h"
+#include "vcn_v2_5.h"
+#include "jpeg_v2_5.h"
+#include "smuio_v9_0.h"
+#include "gmc_v10_0.h"
+#include "gmc_v11_0.h"
+#include "gfxhub_v2_0.h"
+#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
+#include "nbio_v4_3.h"
+#include "nbio_v7_2.h"
+#include "nbio_v7_7.h"
+#include "nbif_v6_3_1.h"
+#include "hdp_v5_0.h"
+#include "hdp_v5_2.h"
+#include "hdp_v6_0.h"
+#include "hdp_v7_0.h"
+#include "nv.h"
+#include "soc21.h"
+#include "navi10_ih.h"
+#include "ih_v6_0.h"
+#include "ih_v6_1.h"
+#include "ih_v7_0.h"
+#include "gfx_v10_0.h"
+#include "gfx_v11_0.h"
+#include "sdma_v5_0.h"
+#include "sdma_v5_2.h"
+#include "sdma_v6_0.h"
+#include "lsdma_v6_0.h"
+#include "lsdma_v7_0.h"
+#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
+#include "vcn_v3_0.h"
+#include "jpeg_v3_0.h"
+#include "vcn_v4_0.h"
+#include "jpeg_v4_0.h"
+#include "vcn_v4_0_3.h"
+#include "jpeg_v4_0_3.h"
+#include "vcn_v4_0_5.h"
+#include "jpeg_v4_0_5.h"
+#include "amdgpu_vkms.h"
+#include "mes_v10_1.h"
+#include "mes_v11_0.h"
+#include "smuio_v11_0.h"
+#include "smuio_v11_0_6.h"
+#include "smuio_v13_0.h"
+#include "smuio_v13_0_3.h"
+#include "smuio_v13_0_6.h"
+#include "smuio_v14_0_2.h"
+#include "vcn_v5_0_0.h"
+#include "jpeg_v5_0_0.h"
+
+#include "amdgpu_vpe.h"
+
+#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
+MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+
+#define mmIP_DISCOVERY_VERSION 0x16A00
+#define mmRCC_CONFIG_MEMSIZE 0xde3
+#define mmMP0_SMN_C2PMSG_33 0x16061
+#define mmMM_INDEX 0x0
+#define mmMM_INDEX_HI 0x6
+#define mmMM_DATA 0x1
+
+static const char *hw_id_names[HW_ID_MAX] = {
+ [MP1_HWID] = "MP1",
+ [MP2_HWID] = "MP2",
+ [THM_HWID] = "THM",
+ [SMUIO_HWID] = "SMUIO",
+ [FUSE_HWID] = "FUSE",
+ [CLKA_HWID] = "CLKA",
+ [PWR_HWID] = "PWR",
+ [GC_HWID] = "GC",
+ [UVD_HWID] = "UVD",
+ [AUDIO_AZ_HWID] = "AUDIO_AZ",
+ [ACP_HWID] = "ACP",
+ [DCI_HWID] = "DCI",
+ [DMU_HWID] = "DMU",
+ [DCO_HWID] = "DCO",
+ [DIO_HWID] = "DIO",
+ [XDMA_HWID] = "XDMA",
+ [DCEAZ_HWID] = "DCEAZ",
+ [DAZ_HWID] = "DAZ",
+ [SDPMUX_HWID] = "SDPMUX",
+ [NTB_HWID] = "NTB",
+ [IOHC_HWID] = "IOHC",
+ [L2IMU_HWID] = "L2IMU",
+ [VCE_HWID] = "VCE",
+ [MMHUB_HWID] = "MMHUB",
+ [ATHUB_HWID] = "ATHUB",
+ [DBGU_NBIO_HWID] = "DBGU_NBIO",
+ [DFX_HWID] = "DFX",
+ [DBGU0_HWID] = "DBGU0",
+ [DBGU1_HWID] = "DBGU1",
+ [OSSSYS_HWID] = "OSSSYS",
+ [HDP_HWID] = "HDP",
+ [SDMA0_HWID] = "SDMA0",
+ [SDMA1_HWID] = "SDMA1",
+ [SDMA2_HWID] = "SDMA2",
+ [SDMA3_HWID] = "SDMA3",
+ [LSDMA_HWID] = "LSDMA",
+ [ISP_HWID] = "ISP",
+ [DBGU_IO_HWID] = "DBGU_IO",
+ [DF_HWID] = "DF",
+ [CLKB_HWID] = "CLKB",
+ [FCH_HWID] = "FCH",
+ [DFX_DAP_HWID] = "DFX_DAP",
+ [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
+ [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
+ [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
+ [L1IMU3_HWID] = "L1IMU3",
+ [L1IMU4_HWID] = "L1IMU4",
+ [L1IMU5_HWID] = "L1IMU5",
+ [L1IMU6_HWID] = "L1IMU6",
+ [L1IMU7_HWID] = "L1IMU7",
+ [L1IMU8_HWID] = "L1IMU8",
+ [L1IMU9_HWID] = "L1IMU9",
+ [L1IMU10_HWID] = "L1IMU10",
+ [L1IMU11_HWID] = "L1IMU11",
+ [L1IMU12_HWID] = "L1IMU12",
+ [L1IMU13_HWID] = "L1IMU13",
+ [L1IMU14_HWID] = "L1IMU14",
+ [L1IMU15_HWID] = "L1IMU15",
+ [WAFLC_HWID] = "WAFLC",
+ [FCH_USB_PD_HWID] = "FCH_USB_PD",
+ [PCIE_HWID] = "PCIE",
+ [PCS_HWID] = "PCS",
+ [DDCL_HWID] = "DDCL",
+ [SST_HWID] = "SST",
+ [IOAGR_HWID] = "IOAGR",
+ [NBIF_HWID] = "NBIF",
+ [IOAPIC_HWID] = "IOAPIC",
+ [SYSTEMHUB_HWID] = "SYSTEMHUB",
+ [NTBCCP_HWID] = "NTBCCP",
+ [UMC_HWID] = "UMC",
+ [SATA_HWID] = "SATA",
+ [USB_HWID] = "USB",
+ [CCXSEC_HWID] = "CCXSEC",
+ [XGMI_HWID] = "XGMI",
+ [XGBE_HWID] = "XGBE",
+ [MP0_HWID] = "MP0",
+ [VPE_HWID] = "VPE",
+};
+
+static int hw_id_map[MAX_HWIP] = {
+ [GC_HWIP] = GC_HWID,
+ [HDP_HWIP] = HDP_HWID,
+ [SDMA0_HWIP] = SDMA0_HWID,
+ [SDMA1_HWIP] = SDMA1_HWID,
+ [SDMA2_HWIP] = SDMA2_HWID,
+ [SDMA3_HWIP] = SDMA3_HWID,
+ [LSDMA_HWIP] = LSDMA_HWID,
+ [MMHUB_HWIP] = MMHUB_HWID,
+ [ATHUB_HWIP] = ATHUB_HWID,
+ [NBIO_HWIP] = NBIF_HWID,
+ [MP0_HWIP] = MP0_HWID,
+ [MP1_HWIP] = MP1_HWID,
+ [UVD_HWIP] = UVD_HWID,
+ [VCE_HWIP] = VCE_HWID,
+ [DF_HWIP] = DF_HWID,
+ [DCE_HWIP] = DMU_HWID,
+ [OSSSYS_HWIP] = OSSSYS_HWID,
+ [SMUIO_HWIP] = SMUIO_HWID,
+ [PWR_HWIP] = PWR_HWID,
+ [NBIF_HWIP] = NBIF_HWID,
+ [THM_HWIP] = THM_HWID,
+ [CLK_HWIP] = CLKA_HWID,
+ [UMC_HWIP] = UMC_HWID,
+ [XGMI_HWIP] = XGMI_HWID,
+ [DCI_HWIP] = DCI_HWID,
+ [PCIE_HWIP] = PCIE_HWID,
+ [VPE_HWIP] = VPE_HWID,
+};
+
+static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
+{
+ u64 tmr_offset, tmr_size, pos;
+ void *discv_regn;
+ int ret;
+
+ ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
+ if (ret)
+ return ret;
+
+ pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
+
+ /* This region is read-only and reserved from system use */
+ discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ if (discv_regn) {
+ memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memunmap(discv_regn);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+#define IP_DISCOVERY_V2 2
+#define IP_DISCOVERY_V4 4
+
+static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary)
+{
+ uint64_t vram_size;
+ u32 msg;
+ int i, ret = 0;
+
+ /* It can take up to a second for IFWI init to complete on some dGPUs,
+ * but generally it should be in the 60-100ms range. Normally this starts
+ * as soon as the device gets power so by the time the OS loads this has long
+ * completed. However, when a card is hotplugged via e.g., USB4, we need to
+ * wait for this to complete. Once the C2PMSG is updated, we can
+ * continue.
+ */
+
+ for (i = 0; i < 1000; i++) {
+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ if (msg & 0x80000000)
+ break;
+ usleep_range(1000, 1100);
+ }
+
+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+
+ if (vram_size) {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->mman.discovery_tmr_size, false);
+ } else {
+ ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
+ }
+
+ return ret;
+}
+
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
+{
+ const struct firmware *fw;
+ const char *fw_name;
+ int r;
+
+ switch (amdgpu_discovery) {
+ case 2:
+ fw_name = FIRMWARE_IP_DISCOVERY;
+ break;
+ default:
+ dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
+ return -EINVAL;
+ }
+
+ r = request_firmware(&fw, fw_name, adev->dev);
+ if (r) {
+ dev_err(adev->dev, "can't load firmware \"%s\"\n",
+ fw_name);
+ return r;
+ }
+
+ memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
+ release_firmware(fw);
+
+ return 0;
+}
+
+static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
+{
+ uint16_t checksum = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ checksum += data[i];
+
+ return checksum;
+}
+
+static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
+ uint16_t expected)
+{
+ return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
+}
+
+static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
+{
+ struct binary_header *bhdr;
+ bhdr = (struct binary_header *)binary;
+
+ return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
+}
+
+static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
+{
+ /*
+ * So far, apply this quirk only on those Navy Flounder boards which
+ * have a bad harvest table of VCN config.
+ */
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
+ switch (adev->pdev->revision) {
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ case 0xC5:
+ case 0xC7:
+ case 0xCF:
+ case 0xDF:
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
+{
+ struct table_info *info;
+ struct binary_header *bhdr;
+ uint16_t offset;
+ uint16_t size;
+ uint16_t checksum;
+ int r;
+
+ adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
+ adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
+ if (!adev->mman.discovery_bin)
+ return -ENOMEM;
+
+ /* Read from file if it is the preferred option */
+ if (amdgpu_discovery == 2) {
+ dev_info(adev->dev, "use ip discovery information from file");
+ r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
+
+ if (r) {
+ dev_err(adev->dev, "failed to read ip discovery binary from file\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ } else {
+ r = amdgpu_discovery_read_binary_from_mem(
+ adev, adev->mman.discovery_bin);
+ if (r)
+ goto out;
+ }
+
+ /* check the ip discovery binary signature */
+ if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ dev_err(adev->dev,
+ "get invalid ip discovery binary signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+
+ offset = offsetof(struct binary_header, binary_checksum) +
+ sizeof(bhdr->binary_checksum);
+ size = le16_to_cpu(bhdr->binary_size) - offset;
+ checksum = le16_to_cpu(bhdr->binary_checksum);
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ size, checksum)) {
+ dev_err(adev->dev, "invalid ip discovery binary checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ info = &bhdr->table_list[IP_DISCOVERY];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct ip_discovery_header *ihdr =
+ (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
+ if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
+ dev_err(adev->dev, "invalid ip discovery data table signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le16_to_cpu(ihdr->size), checksum)) {
+ dev_err(adev->dev, "invalid ip discovery data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[GC];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct gpu_info_header *ghdr =
+ (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery gc table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(ghdr->size), checksum)) {
+ dev_err(adev->dev, "invalid gc data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[HARVEST_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct harvest_info_header *hhdr =
+ (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
+ dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ sizeof(struct harvest_table), checksum)) {
+ dev_err(adev->dev, "invalid harvest data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[VCN_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct vcn_info_header *vhdr =
+ (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery vcn table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(vhdr->size_bytes), checksum)) {
+ dev_err(adev->dev, "invalid vcn data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[MALL_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (0 && offset) {
+ struct mall_info_header *mhdr =
+ (struct mall_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery mall table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(mhdr->size_bytes), checksum)) {
+ dev_err(adev->dev, "invalid mall data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ return 0;
+
+out:
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
+ if ((amdgpu_discovery != 2) &&
+ (RREG32(mmIP_DISCOVERY_VERSION) == 4))
+ amdgpu_ras_query_boot_status(adev, 4);
+ return r;
+}
+
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
+
+void amdgpu_discovery_fini(struct amdgpu_device *adev)
+{
+ amdgpu_discovery_sysfs_fini(adev);
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
+}
+
+static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
+{
+ if (ip->instance_number >= HWIP_MAX_INSTANCE) {
+ DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
+ ip->instance_number);
+ return -EINVAL;
+ }
+ if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
+ DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
+ le16_to_cpu(ip->hw_id));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct ip_v4 *ip;
+ uint16_t die_offset, ip_offset, num_dies, num_ips;
+ int i, j;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ /* scan harvest bit of all IP data structures */
+ for (i = 0; i < num_dies; i++) {
+ die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ for (j = 0; j < num_ips; j++) {
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+
+ if (amdgpu_discovery_validate_ip(ip))
+ goto next_ip;
+
+ if (le16_to_cpu(ip->variant) == 1) {
+ switch (le16_to_cpu(ip->hw_id)) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ if (ip->instance_number == 0) {
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ } else {
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ }
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+next_ip:
+ if (ihdr->base_addr_64_bit)
+ ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ }
+ }
+}
+
+static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count,
+ uint32_t *umc_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct harvest_table *harvest_info;
+ u16 offset;
+ int i;
+ uint32_t umc_harvest_config = 0;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
+
+ if (!offset) {
+ dev_err(adev->dev, "invalid harvest table offset\n");
+ return;
+ }
+
+ harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
+
+ for (i = 0; i < 32; i++) {
+ if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
+ break;
+
+ switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ adev->vcn.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+ adev->jpeg.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+
+ adev->vcn.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ adev->jpeg.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ case UMC_HWID:
+ umc_harvest_config |=
+ 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
+ (*umc_harvest_count)++;
+ break;
+ case GC_HWID:
+ adev->gfx.xcc_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ case SDMA0_HWID:
+ adev->sdma.sdma_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ default:
+ break;
+ }
+ }
+
+ adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+ ~umc_harvest_config;
+}
+
+/* ================================================== */
+
+struct ip_hw_instance {
+ struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
+
+ int hw_id;
+ u8 num_instance;
+ u8 major, minor, revision;
+ u8 harvest;
+
+ int num_base_addresses;
+ u32 base_addr[] __counted_by(num_base_addresses);
+};
+
+struct ip_hw_id {
+ struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
+ int hw_id;
+};
+
+struct ip_die_entry {
+ struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
+ u16 num_ips;
+};
+
+/* -------------------------------------------------- */
+
+struct ip_hw_instance_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
+};
+
+static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
+}
+
+static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
+}
+
+static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
+}
+
+static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
+}
+
+static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
+}
+
+static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
+}
+
+static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
+}
+
+static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ ssize_t res, at;
+ int ii;
+
+ for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
+ /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
+ */
+ if (at + 12 > PAGE_SIZE)
+ break;
+ res = sysfs_emit_at(buf, at, "0x%08X\n",
+ ip_hw_instance->base_addr[ii]);
+ if (res <= 0)
+ break;
+ at += res;
+ }
+
+ return res < 0 ? res : at;
+}
+
+static struct ip_hw_instance_attr ip_hw_attr[] = {
+ __ATTR_RO(hw_id),
+ __ATTR_RO(num_instance),
+ __ATTR_RO(major),
+ __ATTR_RO(minor),
+ __ATTR_RO(revision),
+ __ATTR_RO(harvest),
+ __ATTR_RO(num_base_addresses),
+ __ATTR_RO(base_addr),
+};
+
+static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
+ATTRIBUTE_GROUPS(ip_hw_instance);
+
+#define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
+#define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
+
+static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+ struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
+
+ if (!ip_hw_attr->show)
+ return -EIO;
+
+ return ip_hw_attr->show(ip_hw_instance, buf);
+}
+
+static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
+ .show = ip_hw_instance_attr_show,
+};
+
+static void ip_hw_instance_release(struct kobject *kobj)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+
+ kfree(ip_hw_instance);
+}
+
+static const struct kobj_type ip_hw_instance_ktype = {
+ .release = ip_hw_instance_release,
+ .sysfs_ops = &ip_hw_instance_sysfs_ops,
+ .default_groups = ip_hw_instance_groups,
+};
+
+/* -------------------------------------------------- */
+
+#define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
+
+static void ip_hw_id_release(struct kobject *kobj)
+{
+ struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
+
+ if (!list_empty(&ip_hw_id->hw_id_kset.list))
+ DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
+ kfree(ip_hw_id);
+}
+
+static const struct kobj_type ip_hw_id_ktype = {
+ .release = ip_hw_id_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* -------------------------------------------------- */
+
+static void die_kobj_release(struct kobject *kobj);
+static void ip_disc_release(struct kobject *kobj);
+
+struct ip_die_entry_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
+};
+
+#define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
+
+static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
+}
+
+/* If there are more ip_die_entry attrs, other than the number of IPs,
+ * we can make this intro an array of attrs, and then initialize
+ * ip_die_entry_attrs in a loop.
+ */
+static struct ip_die_entry_attribute num_ips_attr =
+ __ATTR_RO(num_ips);
+
+static struct attribute *ip_die_entry_attrs[] = {
+ &num_ips_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
+
+#define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
+
+static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!ip_die_entry_attr->show)
+ return -EIO;
+
+ return ip_die_entry_attr->show(ip_die_entry, buf);
+}
+
+static void ip_die_entry_release(struct kobject *kobj)
+{
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!list_empty(&ip_die_entry->ip_kset.list))
+ DRM_ERROR("ip_die_entry->ip_kset is not empty");
+ kfree(ip_die_entry);
+}
+
+static const struct sysfs_ops ip_die_entry_sysfs_ops = {
+ .show = ip_die_entry_attr_show,
+};
+
+static const struct kobj_type ip_die_entry_ktype = {
+ .release = ip_die_entry_release,
+ .sysfs_ops = &ip_die_entry_sysfs_ops,
+ .default_groups = ip_die_entry_groups,
+};
+
+static const struct kobj_type die_kobj_ktype = {
+ .release = die_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static const struct kobj_type ip_discovery_ktype = {
+ .release = ip_disc_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+struct ip_discovery_top {
+ struct kobject kobj; /* ip_discovery/ */
+ struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
+ struct amdgpu_device *adev;
+};
+
+static void die_kobj_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
+ struct ip_discovery_top,
+ die_kset);
+ if (!list_empty(&ip_top->die_kset.list))
+ DRM_ERROR("ip_top->die_kset is not empty");
+}
+
+static void ip_disc_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
+ kobj);
+ struct amdgpu_device *adev = ip_top->adev;
+
+ adev->ip_top = NULL;
+ kfree(ip_top);
+}
+
+static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
+ uint16_t hw_id, uint8_t inst)
+{
+ uint8_t harvest = 0;
+
+ /* Until a uniform way is figured, get mask based on hwid */
+ switch (hw_id) {
+ case VCN_HWID:
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ break;
+ case DMU_HWID:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
+ harvest = 0x1;
+ break;
+ case UMC_HWID:
+ /* TODO: It needs another parsing; for now, ignore.*/
+ break;
+ case GC_HWID:
+ harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
+ break;
+ case SDMA0_HWID:
+ harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
+ break;
+ default:
+ break;
+ }
+
+ return harvest;
+}
+
+static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
+ struct ip_die_entry *ip_die_entry,
+ const size_t _ip_offset, const int num_ips,
+ bool reg_base_64)
+{
+ int ii, jj, kk, res;
+
+ DRM_DEBUG("num_ips:%d", num_ips);
+
+ /* Find all IPs of a given HW ID, and add their instance to
+ * #die/#hw_id/#instance/<attributes>
+ */
+ for (ii = 0; ii < HW_ID_MAX; ii++) {
+ struct ip_hw_id *ip_hw_id = NULL;
+ size_t ip_offset = _ip_offset;
+
+ for (jj = 0; jj < num_ips; jj++) {
+ struct ip_v4 *ip;
+ struct ip_hw_instance *ip_hw_instance;
+
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ if (amdgpu_discovery_validate_ip(ip) ||
+ le16_to_cpu(ip->hw_id) != ii)
+ goto next_ip;
+
+ DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
+
+ /* We have a hw_id match; register the hw
+ * block if not yet registered.
+ */
+ if (!ip_hw_id) {
+ ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
+ if (!ip_hw_id)
+ return -ENOMEM;
+ ip_hw_id->hw_id = ii;
+
+ kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
+ ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
+ ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
+ res = kset_register(&ip_hw_id->hw_id_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_hw_id kset");
+ kfree(ip_hw_id);
+ return res;
+ }
+ if (hw_id_names[ii]) {
+ res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
+ &ip_hw_id->hw_id_kset.kobj,
+ hw_id_names[ii]);
+ if (res) {
+ DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
+ hw_id_names[ii],
+ kobject_name(&ip_die_entry->ip_kset.kobj));
+ }
+ }
+ }
+
+ /* Now register its instance.
+ */
+ ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
+ base_addr,
+ ip->num_base_address),
+ GFP_KERNEL);
+ if (!ip_hw_instance) {
+ DRM_ERROR("no memory for ip_hw_instance");
+ return -ENOMEM;
+ }
+ ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
+ ip_hw_instance->num_instance = ip->instance_number;
+ ip_hw_instance->major = ip->major;
+ ip_hw_instance->minor = ip->minor;
+ ip_hw_instance->revision = ip->revision;
+ ip_hw_instance->harvest =
+ amdgpu_discovery_get_harvest_info(
+ adev, ip_hw_instance->hw_id,
+ ip_hw_instance->num_instance);
+ ip_hw_instance->num_base_addresses = ip->num_base_address;
+
+ for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
+ if (reg_base_64)
+ ip_hw_instance->base_addr[kk] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
+ else
+ ip_hw_instance->base_addr[kk] = ip->base_address[kk];
+ }
+
+ kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
+ ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
+ res = kobject_add(&ip_hw_instance->kobj, NULL,
+ "%d", ip_hw_instance->num_instance);
+next_ip:
+ if (reg_base_64)
+ ip_offset += struct_size(ip, base_address_64,
+ ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address,
+ ip->num_base_address);
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct kset *die_kset = &adev->ip_top->die_kset;
+ u16 num_dies, die_offset, num_ips;
+ size_t ip_offset;
+ int ii, res;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ DRM_DEBUG("number of dies: %d\n", num_dies);
+
+ for (ii = 0; ii < num_dies; ii++) {
+ struct ip_die_entry *ip_die_entry;
+
+ die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ /* Add the die to the kset.
+ *
+ * dhdr->die_id == ii, which was checked in
+ * amdgpu_discovery_reg_base_init().
+ */
+
+ ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
+ if (!ip_die_entry)
+ return -ENOMEM;
+
+ ip_die_entry->num_ips = num_ips;
+
+ kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
+ ip_die_entry->ip_kset.kobj.kset = die_kset;
+ ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
+ res = kset_register(&ip_die_entry->ip_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_die_entry kset");
+ kfree(ip_die_entry);
+ return res;
+ }
+
+ amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
+{
+ struct kset *die_kset;
+ int res, ii;
+
+ if (!adev->mman.discovery_bin)
+ return -EINVAL;
+
+ adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
+ if (!adev->ip_top)
+ return -ENOMEM;
+
+ adev->ip_top->adev = adev;
+
+ res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ &adev->dev->kobj, "ip_discovery");
+ if (res) {
+ DRM_ERROR("Couldn't init and add ip_discovery/");
+ goto Err;
+ }
+
+ die_kset = &adev->ip_top->die_kset;
+ kobject_set_name(&die_kset->kobj, "%s", "die");
+ die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.ktype = &die_kobj_ktype;
+ res = kset_register(&adev->ip_top->die_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register die_kset");
+ goto Err;
+ }
+
+ for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
+ ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
+ ip_hw_instance_attrs[ii] = NULL;
+
+ res = amdgpu_discovery_sysfs_recurse(adev);
+
+ return res;
+Err:
+ kobject_put(&adev->ip_top->kobj);
+ return res;
+}
+
+/* -------------------------------------------------- */
+
+#define list_to_kobj(el) container_of(el, struct kobject, entry)
+
+static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
+{
+ struct list_head *el, *tmp;
+ struct kset *hw_id_kset;
+
+ hw_id_kset = &ip_hw_id->hw_id_kset;
+ spin_lock(&hw_id_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
+ list_del_init(el);
+ spin_unlock(&hw_id_kset->list_lock);
+ /* kobject is embedded in ip_hw_instance */
+ kobject_put(list_to_kobj(el));
+ spin_lock(&hw_id_kset->list_lock);
+ }
+ spin_unlock(&hw_id_kset->list_lock);
+ kobject_put(&ip_hw_id->hw_id_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
+{
+ struct list_head *el, *tmp;
+ struct kset *ip_kset;
+
+ ip_kset = &ip_die_entry->ip_kset;
+ spin_lock(&ip_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &ip_kset->list) {
+ list_del_init(el);
+ spin_unlock(&ip_kset->list_lock);
+ amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
+ spin_lock(&ip_kset->list_lock);
+ }
+ spin_unlock(&ip_kset->list_lock);
+ kobject_put(&ip_die_entry->ip_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
+{
+ struct list_head *el, *tmp;
+ struct kset *die_kset;
+
+ die_kset = &adev->ip_top->die_kset;
+ spin_lock(&die_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &die_kset->list) {
+ list_del_init(el);
+ spin_unlock(&die_kset->list_lock);
+ amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
+ spin_lock(&die_kset->list_lock);
+ }
+ spin_unlock(&die_kset->list_lock);
+ kobject_put(&adev->ip_top->die_kset.kobj);
+ kobject_put(&adev->ip_top->kobj);
+}
+
+/* ================================================== */
+
+static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
+{
+ uint8_t num_base_address, subrev, variant;
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct ip_v4 *ip;
+ uint16_t die_offset;
+ uint16_t ip_offset;
+ uint16_t num_dies;
+ uint16_t num_ips;
+ int hw_ip;
+ int i, j, k;
+ int r;
+
+ r = amdgpu_discovery_init(adev);
+ if (r) {
+ DRM_ERROR("amdgpu_discovery_init failed\n");
+ return r;
+ }
+
+ adev->gfx.xcc_mask = 0;
+ adev->sdma.sdma_mask = 0;
+ adev->vcn.inst_mask = 0;
+ adev->jpeg.inst_mask = 0;
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ DRM_DEBUG("number of dies: %d\n", num_dies);
+
+ for (i = 0; i < num_dies; i++) {
+ die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ if (le16_to_cpu(dhdr->die_id) != i) {
+ DRM_ERROR("invalid die id %d, expected %d\n",
+ le16_to_cpu(dhdr->die_id), i);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("number of hardware IPs on die%d: %d\n",
+ le16_to_cpu(dhdr->die_id), num_ips);
+
+ for (j = 0; j < num_ips; j++) {
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+
+ if (amdgpu_discovery_validate_ip(ip))
+ goto next_ip;
+
+ num_base_address = ip->num_base_address;
+
+ DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
+ hw_id_names[le16_to_cpu(ip->hw_id)],
+ le16_to_cpu(ip->hw_id),
+ ip->instance_number,
+ ip->major, ip->minor,
+ ip->revision);
+
+ if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
+ /* Bit [5:0]: original revision value
+ * Bit [7:6]: en/decode capability:
+ * 0b00 : VCN function normally
+ * 0b10 : encode is disabled
+ * 0b01 : decode is disabled
+ */
+ if (adev->vcn.num_vcn_inst <
+ AMDGPU_MAX_VCN_INSTANCES) {
+ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ ip->revision & 0xc0;
+ adev->vcn.num_vcn_inst++;
+ adev->vcn.inst_mask |=
+ (1U << ip->instance_number);
+ adev->jpeg.inst_mask |=
+ (1U << ip->instance_number);
+ } else {
+ dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
+ adev->vcn.num_vcn_inst + 1,
+ AMDGPU_MAX_VCN_INSTANCES);
+ }
+ ip->revision &= ~0xc0;
+ }
+ if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
+ if (adev->sdma.num_instances <
+ AMDGPU_MAX_SDMA_INSTANCES) {
+ adev->sdma.num_instances++;
+ adev->sdma.sdma_mask |=
+ (1U << ip->instance_number);
+ } else {
+ dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
+ adev->sdma.num_instances + 1,
+ AMDGPU_MAX_SDMA_INSTANCES);
+ }
+ }
+
+ if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
+ if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
+ adev->vpe.num_instances++;
+ else
+ dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
+ adev->vpe.num_instances + 1,
+ AMDGPU_MAX_VPE_INSTANCES);
+ }
+
+ if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
+ adev->gmc.num_umc++;
+ adev->umc.node_inst_num++;
+ }
+
+ if (le16_to_cpu(ip->hw_id) == GC_HWID)
+ adev->gfx.xcc_mask |=
+ (1U << ip->instance_number);
+
+ for (k = 0; k < num_base_address; k++) {
+ /*
+ * convert the endianness of base addresses in place,
+ * so that we don't need to convert them when accessing adev->reg_offset.
+ */
+ if (ihdr->base_addr_64_bit)
+ /* Truncate the 64bit base address from ip discovery
+ * and only store lower 32bit ip base in reg_offset[].
+ * Bits > 32 follows ASIC specific format, thus just
+ * discard them and handle it within specific ASIC.
+ * By this way reg_offset[] and related helpers can
+ * stay unchanged.
+ * The base address is in dwords, thus clear the
+ * highest 2 bits to store.
+ */
+ ip->base_address[k] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
+ else
+ ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
+ DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
+ }
+
+ for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
+ if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
+ hw_id_map[hw_ip] != 0) {
+ DRM_DEBUG("set register base offset for %s\n",
+ hw_id_names[le16_to_cpu(ip->hw_id)]);
+ adev->reg_offset[hw_ip][ip->instance_number] =
+ ip->base_address;
+ /* Instance support is somewhat inconsistent.
+ * SDMA is a good example. Sienna cichlid has 4 total
+ * SDMA instances, each enumerated separately (HWIDs
+ * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
+ * but they are enumerated as multiple instances of the
+ * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
+ * example. On most chips there are multiple instances
+ * with the same HWID.
+ */
+
+ if (ihdr->version < 3) {
+ subrev = 0;
+ variant = 0;
+ } else {
+ subrev = ip->sub_revision;
+ variant = ip->variant;
+ }
+
+ adev->ip_versions[hw_ip]
+ [ip->instance_number] =
+ IP_VERSION_FULL(ip->major,
+ ip->minor,
+ ip->revision,
+ variant,
+ subrev);
+ }
+ }
+
+next_ip:
+ if (ihdr->base_addr_64_bit)
+ ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
+{
+ int vcn_harvest_count = 0;
+ int umc_harvest_count = 0;
+
+ /*
+ * Harvest table does not fit Navi1x and legacy GPUs,
+ * so read harvest bit per IP data structure to set
+ * harvest configuration.
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {
+ if ((adev->pdev->device == 0x731E &&
+ (adev->pdev->revision == 0xC6 ||
+ adev->pdev->revision == 0xC7)) ||
+ (adev->pdev->device == 0x7340 &&
+ adev->pdev->revision == 0xC9) ||
+ (adev->pdev->device == 0x7360 &&
+ adev->pdev->revision == 0xC7))
+ amdgpu_discovery_read_harvest_bit_per_ip(adev,
+ &vcn_harvest_count);
+ } else {
+ amdgpu_discovery_read_from_harvest_table(adev,
+ &vcn_harvest_count,
+ &umc_harvest_count);
+ }
+
+ amdgpu_discovery_harvest_config_quirk(adev);
+
+ if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
+
+ if (umc_harvest_count < adev->gmc.num_umc) {
+ adev->gmc.num_umc -= umc_harvest_count;
+ }
+}
+
+union gc_info {
+ struct gc_info_v1_0 v1;
+ struct gc_info_v1_1 v1_1;
+ struct gc_info_v1_2 v1_2;
+ struct gc_info_v2_0 v2;
+ struct gc_info_v2_1 v2_1;
+};
+
+static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union gc_info *gc_info;
+ u16 offset;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[GC].offset);
+
+ if (!offset)
+ return 0;
+
+ gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(gc_info->v1.header.version_major)) {
+ case 1:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
+ le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
+ adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
+ }
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
+ adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
+ adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
+ adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
+ adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
+ adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
+ adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
+ }
+ break;
+ case 2:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+ if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
+ adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled GC info table %d.%d\n",
+ le16_to_cpu(gc_info->v1.header.version_major),
+ le16_to_cpu(gc_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union mall_info {
+ struct mall_info_v1_0 v1;
+ struct mall_info_v2_0 v2;
+};
+
+static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union mall_info *mall_info;
+ u32 u, mall_size_per_umc, m_s_present, half_use;
+ u64 mall_size;
+ u16 offset;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
+
+ if (!offset)
+ return 0;
+
+ mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(mall_info->v1.header.version_major)) {
+ case 1:
+ mall_size = 0;
+ mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
+ m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
+ half_use = le32_to_cpu(mall_info->v1.m_half_use);
+ for (u = 0; u < adev->gmc.num_umc; u++) {
+ if (m_s_present & (1 << u))
+ mall_size += mall_size_per_umc * 2;
+ else if (half_use & (1 << u))
+ mall_size += mall_size_per_umc / 2;
+ else
+ mall_size += mall_size_per_umc;
+ }
+ adev->gmc.mall_size = mall_size;
+ adev->gmc.m_half_use = half_use;
+ break;
+ case 2:
+ mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
+ adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled MALL info table %d.%d\n",
+ le16_to_cpu(mall_info->v1.header.version_major),
+ le16_to_cpu(mall_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union vcn_info {
+ struct vcn_info_v1_0 v1;
+};
+
+static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union vcn_info *vcn_info;
+ u16 offset;
+ int v;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
+ * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
+ * but that may change in the future with new GPUs so keep this
+ * check for defensive purposes.
+ */
+ if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
+ dev_err(adev->dev, "invalid vcn instances\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
+
+ if (!offset)
+ return 0;
+
+ vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
+ case 1:
+ /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
+ * so this won't overflow.
+ */
+ for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
+ adev->vcn.vcn_codec_disable_mask[v] =
+ le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled VCN info table %d.%d\n",
+ le16_to_cpu(vcn_info->v1.header.version_major),
+ le16_to_cpu(vcn_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
+{
+ /* what IP to use for this? */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add common ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
+{
+ /* use GC or MMHUB IP version */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 3, 0):
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ break;
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 1):
+ amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
+ break;
+ case IP_VERSION(6, 1, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
+ break;
+ case IP_VERSION(7, 0, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ break;
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 8):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
+ break;
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 11):
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 4):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, MP0_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 11):
+ amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
+ break;
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#if defined(CONFIG_DRM_AMD_DC)
+static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
+{
+ amdgpu_device_set_sriov_virtual_display(adev);
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+}
+#endif
+
+static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
+{
+ if (adev->enable_virtual_display) {
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+ return 0;
+ }
+
+ if (!amdgpu_device_has_dc_support(adev))
+ return 0;
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ return -EINVAL;
+ }
+ } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ case IP_VERSION(12, 1, 0):
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, DCI_HWIP, 0));
+ return -EINVAL;
+ }
+ }
+#endif
+ return 0;
+}
+
+static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ break;
+ case IP_VERSION(9, 4, 3):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 2):
+ case IP_VERSION(4, 4, 0):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ break;
+ case IP_VERSION(4, 4, 2):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
+ break;
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 6):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 7):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
+ amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 2, 0):
+ /* UVD is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, UVD_HWIP, 0));
+ return -EINVAL;
+ }
+ switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 1, 0):
+ /* VCE is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, VCE_HWIP, 0));
+ return -EINVAL;
+ }
+ } else {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 2, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 3):
+ break;
+ case IP_VERSION(2, 5, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
+ break;
+ case IP_VERSION(2, 6, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
+ break;
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 16):
+ case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 0, 2):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
+ break;
+ case IP_VERSION(3, 0, 33):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ break;
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 2):
+ case IP_VERSION(4, 0, 4):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
+ break;
+ case IP_VERSION(4, 0, 3):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
+ break;
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, UVD_HWIP, 0));
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ if (amdgpu_mes) {
+ amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
+ adev->enable_mes = true;
+ if (amdgpu_mes_kiq)
+ adev->enable_mes_kiq = true;
+ }
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
+ adev->enable_mes = true;
+ adev->enable_mes_kiq = true;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ aqua_vanjaram_init_soc_config(adev);
+ break;
+ default:
+ break;
+ }
+}
+
+static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
+ amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ if (amdgpu_umsch_mm & 0x1) {
+ amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
+ adev->enable_umsch_mm = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+{
+ int r;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
+ break;
+ case CHIP_VEGA12:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
+ break;
+ case CHIP_RAVEN:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 1;
+ adev->vcn.num_vcn_inst = 1;
+ adev->gmc.num_umc = 2;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
+ } else {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
+ }
+ break;
+ case CHIP_VEGA20:
+ vega20_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 8;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
+ break;
+ case CHIP_ARCTURUS:
+ arct_reg_base_init(adev);
+ adev->sdma.num_instances = 8;
+ adev->vcn.num_vcn_inst = 2;
+ adev->gmc.num_umc = 8;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
+ break;
+ case CHIP_ALDEBARAN:
+ aldebaran_reg_base_init(adev);
+ adev->sdma.num_instances = 5;
+ adev->vcn.num_vcn_inst = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+ break;
+ default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ break;
+ }
+
+ amdgpu_discovery_init_soc_config(adev);
+ amdgpu_discovery_sysfs_init(adev);
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ adev->family = AMDGPU_FAMILY_AI;
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ adev->family = AMDGPU_FAMILY_RV;
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->family = AMDGPU_FAMILY_NV;
+ break;
+ case IP_VERSION(10, 3, 1):
+ adev->family = AMDGPU_FAMILY_VGH;
+ adev->apu_flags |= AMD_APU_IS_VANGOGH;
+ break;
+ case IP_VERSION(10, 3, 3):
+ adev->family = AMDGPU_FAMILY_YC;
+ break;
+ case IP_VERSION(10, 3, 6):
+ adev->family = AMDGPU_FAMILY_GC_10_3_6;
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->family = AMDGPU_FAMILY_GC_10_3_7;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ adev->family = AMDGPU_FAMILY_GC_11_0_0;
+ break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->family = AMDGPU_FAMILY_GC_11_0_1;
+ break;
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ adev->family = AMDGPU_FAMILY_GC_11_5_0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ adev->flags |= AMD_IS_APU;
+ break;
+ default:
+ break;
+ }
+
+ if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
+ adev->gmc.xgmi.supported = true;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
+
+ /* set NBIO version */
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 2, 0):
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ case IP_VERSION(2, 5, 0):
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 9, 0):
+ adev->nbio.funcs = &nbio_v7_9_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 11, 0):
+ case IP_VERSION(7, 11, 1):
+ adev->nbio.funcs = &nbio_v7_11_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 2, 0):
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 0):
+ case IP_VERSION(7, 5, 1):
+ adev->nbio.funcs = &nbio_v7_2_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
+ break;
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 3, 1):
+ case IP_VERSION(2, 3, 2):
+ case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
+ case IP_VERSION(3, 3, 3):
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ if (amdgpu_sriov_vf(adev))
+ adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
+ else
+ adev->nbio.funcs = &nbio_v4_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 7, 0):
+ case IP_VERSION(7, 7, 1):
+ adev->nbio.funcs = &nbio_v7_7_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
+ break;
+ case IP_VERSION(6, 3, 1):
+ adev->nbio.funcs = &nbif_v6_3_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
+ adev->hdp.funcs = &hdp_v4_0_funcs;
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 0, 4):
+ case IP_VERSION(5, 2, 0):
+ adev->hdp.funcs = &hdp_v5_0_funcs;
+ break;
+ case IP_VERSION(5, 2, 1):
+ adev->hdp.funcs = &hdp_v5_2_funcs;
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 1, 0):
+ adev->hdp.funcs = &hdp_v6_0_funcs;
+ break;
+ case IP_VERSION(7, 0, 0):
+ adev->hdp.funcs = &hdp_v7_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
+ case IP_VERSION(3, 6, 0):
+ case IP_VERSION(3, 6, 1):
+ case IP_VERSION(3, 6, 2):
+ adev->df.funcs = &df_v3_6_funcs;
+ break;
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 5, 2):
+ adev->df.funcs = &df_v1_7_funcs;
+ break;
+ case IP_VERSION(4, 3, 0):
+ adev->df.funcs = &df_v4_3_funcs;
+ break;
+ case IP_VERSION(4, 6, 2):
+ adev->df.funcs = &df_v4_6_2_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(10, 0, 2):
+ adev->smuio.funcs = &smuio_v9_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ adev->smuio.funcs = &smuio_v11_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 6):
+ case IP_VERSION(11, 0, 10):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 9):
+ case IP_VERSION(13, 0, 10):
+ adev->smuio.funcs = &smuio_v11_0_6_funcs;
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->smuio.funcs = &smuio_v13_0_funcs;
+ break;
+ case IP_VERSION(13, 0, 3):
+ adev->smuio.funcs = &smuio_v13_0_3_funcs;
+ if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
+ adev->flags |= AMD_IS_APU;
+ }
+ break;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ adev->smuio.funcs = &smuio_v13_0_6_funcs;
+ break;
+ case IP_VERSION(14, 0, 2):
+ adev->smuio.funcs = &smuio_v14_0_2_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
+ adev->lsdma.funcs = &lsdma_v6_0_funcs;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ adev->lsdma.funcs = &lsdma_v7_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ r = amdgpu_discovery_set_common_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gmc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ /* For SR-IOV, PSP needs to be initialized before IH */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+ } else {
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_display_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_sdma_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ !amdgpu_sriov_vf(adev)) ||
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_mes_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_vpe_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
diff --git a/rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/preimage b/rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/preimage
new file mode 100644
index 000000000000..66f0a23afec8
--- /dev/null
+++ b/rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/preimage
@@ -0,0 +1,2784 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_discovery.h"
+#include "soc15_hw_ip.h"
+#include "discovery.h"
+#include "amdgpu_ras.h"
+
+#include "soc15.h"
+#include "gfx_v9_0.h"
+#include "gfx_v9_4_3.h"
+#include "gmc_v9_0.h"
+#include "df_v1_7.h"
+#include "df_v3_6.h"
+#include "df_v4_3.h"
+#include "df_v4_6_2.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
+#include "nbio_v7_9.h"
+#include "nbio_v7_11.h"
+#include "hdp_v4_0.h"
+#include "vega10_ih.h"
+#include "vega20_ih.h"
+#include "sdma_v4_0.h"
+#include "sdma_v4_4_2.h"
+#include "uvd_v7_0.h"
+#include "vce_v4_0.h"
+#include "vcn_v1_0.h"
+#include "vcn_v2_5.h"
+#include "jpeg_v2_5.h"
+#include "smuio_v9_0.h"
+#include "gmc_v10_0.h"
+#include "gmc_v11_0.h"
+#include "gfxhub_v2_0.h"
+#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
+#include "nbio_v4_3.h"
+#include "nbio_v7_2.h"
+#include "nbio_v7_7.h"
+#include "nbif_v6_3_1.h"
+#include "hdp_v5_0.h"
+#include "hdp_v5_2.h"
+#include "hdp_v6_0.h"
+#include "hdp_v7_0.h"
+#include "nv.h"
+#include "soc21.h"
+#include "navi10_ih.h"
+#include "ih_v6_0.h"
+#include "ih_v6_1.h"
+#include "ih_v7_0.h"
+#include "gfx_v10_0.h"
+#include "gfx_v11_0.h"
+#include "sdma_v5_0.h"
+#include "sdma_v5_2.h"
+#include "sdma_v6_0.h"
+#include "lsdma_v6_0.h"
+#include "lsdma_v7_0.h"
+#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
+#include "vcn_v3_0.h"
+#include "jpeg_v3_0.h"
+#include "vcn_v4_0.h"
+#include "jpeg_v4_0.h"
+#include "vcn_v4_0_3.h"
+#include "jpeg_v4_0_3.h"
+#include "vcn_v4_0_5.h"
+#include "jpeg_v4_0_5.h"
+#include "amdgpu_vkms.h"
+#include "mes_v10_1.h"
+#include "mes_v11_0.h"
+#include "smuio_v11_0.h"
+#include "smuio_v11_0_6.h"
+#include "smuio_v13_0.h"
+#include "smuio_v13_0_3.h"
+#include "smuio_v13_0_6.h"
+#include "smuio_v14_0_2.h"
+#include "vcn_v5_0_0.h"
+#include "jpeg_v5_0_0.h"
+
+#include "amdgpu_vpe.h"
+
+#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
+MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+
+#define mmIP_DISCOVERY_VERSION 0x16A00
+#define mmRCC_CONFIG_MEMSIZE 0xde3
+#define mmMP0_SMN_C2PMSG_33 0x16061
+#define mmMM_INDEX 0x0
+#define mmMM_INDEX_HI 0x6
+#define mmMM_DATA 0x1
+
+static const char *hw_id_names[HW_ID_MAX] = {
+ [MP1_HWID] = "MP1",
+ [MP2_HWID] = "MP2",
+ [THM_HWID] = "THM",
+ [SMUIO_HWID] = "SMUIO",
+ [FUSE_HWID] = "FUSE",
+ [CLKA_HWID] = "CLKA",
+ [PWR_HWID] = "PWR",
+ [GC_HWID] = "GC",
+ [UVD_HWID] = "UVD",
+ [AUDIO_AZ_HWID] = "AUDIO_AZ",
+ [ACP_HWID] = "ACP",
+ [DCI_HWID] = "DCI",
+ [DMU_HWID] = "DMU",
+ [DCO_HWID] = "DCO",
+ [DIO_HWID] = "DIO",
+ [XDMA_HWID] = "XDMA",
+ [DCEAZ_HWID] = "DCEAZ",
+ [DAZ_HWID] = "DAZ",
+ [SDPMUX_HWID] = "SDPMUX",
+ [NTB_HWID] = "NTB",
+ [IOHC_HWID] = "IOHC",
+ [L2IMU_HWID] = "L2IMU",
+ [VCE_HWID] = "VCE",
+ [MMHUB_HWID] = "MMHUB",
+ [ATHUB_HWID] = "ATHUB",
+ [DBGU_NBIO_HWID] = "DBGU_NBIO",
+ [DFX_HWID] = "DFX",
+ [DBGU0_HWID] = "DBGU0",
+ [DBGU1_HWID] = "DBGU1",
+ [OSSSYS_HWID] = "OSSSYS",
+ [HDP_HWID] = "HDP",
+ [SDMA0_HWID] = "SDMA0",
+ [SDMA1_HWID] = "SDMA1",
+ [SDMA2_HWID] = "SDMA2",
+ [SDMA3_HWID] = "SDMA3",
+ [LSDMA_HWID] = "LSDMA",
+ [ISP_HWID] = "ISP",
+ [DBGU_IO_HWID] = "DBGU_IO",
+ [DF_HWID] = "DF",
+ [CLKB_HWID] = "CLKB",
+ [FCH_HWID] = "FCH",
+ [DFX_DAP_HWID] = "DFX_DAP",
+ [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
+ [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
+ [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
+ [L1IMU3_HWID] = "L1IMU3",
+ [L1IMU4_HWID] = "L1IMU4",
+ [L1IMU5_HWID] = "L1IMU5",
+ [L1IMU6_HWID] = "L1IMU6",
+ [L1IMU7_HWID] = "L1IMU7",
+ [L1IMU8_HWID] = "L1IMU8",
+ [L1IMU9_HWID] = "L1IMU9",
+ [L1IMU10_HWID] = "L1IMU10",
+ [L1IMU11_HWID] = "L1IMU11",
+ [L1IMU12_HWID] = "L1IMU12",
+ [L1IMU13_HWID] = "L1IMU13",
+ [L1IMU14_HWID] = "L1IMU14",
+ [L1IMU15_HWID] = "L1IMU15",
+ [WAFLC_HWID] = "WAFLC",
+ [FCH_USB_PD_HWID] = "FCH_USB_PD",
+ [PCIE_HWID] = "PCIE",
+ [PCS_HWID] = "PCS",
+ [DDCL_HWID] = "DDCL",
+ [SST_HWID] = "SST",
+ [IOAGR_HWID] = "IOAGR",
+ [NBIF_HWID] = "NBIF",
+ [IOAPIC_HWID] = "IOAPIC",
+ [SYSTEMHUB_HWID] = "SYSTEMHUB",
+ [NTBCCP_HWID] = "NTBCCP",
+ [UMC_HWID] = "UMC",
+ [SATA_HWID] = "SATA",
+ [USB_HWID] = "USB",
+ [CCXSEC_HWID] = "CCXSEC",
+ [XGMI_HWID] = "XGMI",
+ [XGBE_HWID] = "XGBE",
+ [MP0_HWID] = "MP0",
+ [VPE_HWID] = "VPE",
+};
+
+static int hw_id_map[MAX_HWIP] = {
+ [GC_HWIP] = GC_HWID,
+ [HDP_HWIP] = HDP_HWID,
+ [SDMA0_HWIP] = SDMA0_HWID,
+ [SDMA1_HWIP] = SDMA1_HWID,
+ [SDMA2_HWIP] = SDMA2_HWID,
+ [SDMA3_HWIP] = SDMA3_HWID,
+ [LSDMA_HWIP] = LSDMA_HWID,
+ [MMHUB_HWIP] = MMHUB_HWID,
+ [ATHUB_HWIP] = ATHUB_HWID,
+ [NBIO_HWIP] = NBIF_HWID,
+ [MP0_HWIP] = MP0_HWID,
+ [MP1_HWIP] = MP1_HWID,
+ [UVD_HWIP] = UVD_HWID,
+ [VCE_HWIP] = VCE_HWID,
+ [DF_HWIP] = DF_HWID,
+ [DCE_HWIP] = DMU_HWID,
+ [OSSSYS_HWIP] = OSSSYS_HWID,
+ [SMUIO_HWIP] = SMUIO_HWID,
+ [PWR_HWIP] = PWR_HWID,
+ [NBIF_HWIP] = NBIF_HWID,
+ [THM_HWIP] = THM_HWID,
+ [CLK_HWIP] = CLKA_HWID,
+ [UMC_HWIP] = UMC_HWID,
+ [XGMI_HWIP] = XGMI_HWID,
+ [DCI_HWIP] = DCI_HWID,
+ [PCIE_HWIP] = PCIE_HWID,
+ [VPE_HWIP] = VPE_HWID,
+};
+
+static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
+{
+ u64 tmr_offset, tmr_size, pos;
+ void *discv_regn;
+ int ret;
+
+ ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
+ if (ret)
+ return ret;
+
+ pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
+
+ /* This region is read-only and reserved from system use */
+ discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ if (discv_regn) {
+ memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memunmap(discv_regn);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+#define IP_DISCOVERY_V2 2
+#define IP_DISCOVERY_V4 4
+
+static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary)
+{
+ uint64_t vram_size;
+ u32 msg;
+ int i, ret = 0;
+
+ /* It can take up to a second for IFWI init to complete on some dGPUs,
+ * but generally it should be in the 60-100ms range. Normally this starts
+ * as soon as the device gets power so by the time the OS loads this has long
+ * completed. However, when a card is hotplugged via e.g., USB4, we need to
+ * wait for this to complete. Once the C2PMSG is updated, we can
+ * continue.
+ */
+
+ for (i = 0; i < 1000; i++) {
+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ if (msg & 0x80000000)
+ break;
+ usleep_range(1000, 1100);
+ }
+
+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+
+ if (vram_size) {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->mman.discovery_tmr_size, false);
+ } else {
+ ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
+ }
+
+ return ret;
+}
+
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
+{
+ const struct firmware *fw;
+ const char *fw_name;
+ int r;
+
+ switch (amdgpu_discovery) {
+ case 2:
+ fw_name = FIRMWARE_IP_DISCOVERY;
+ break;
+ default:
+ dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
+ return -EINVAL;
+ }
+
+ r = request_firmware(&fw, fw_name, adev->dev);
+ if (r) {
+ dev_err(adev->dev, "can't load firmware \"%s\"\n",
+ fw_name);
+ return r;
+ }
+
+ memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
+ release_firmware(fw);
+
+ return 0;
+}
+
+static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
+{
+ uint16_t checksum = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ checksum += data[i];
+
+ return checksum;
+}
+
+static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
+ uint16_t expected)
+{
+ return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
+}
+
+static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
+{
+ struct binary_header *bhdr;
+ bhdr = (struct binary_header *)binary;
+
+ return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
+}
+
+static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
+{
+ /*
+ * So far, apply this quirk only on those Navy Flounder boards which
+ * have a bad harvest table of VCN config.
+ */
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
+ switch (adev->pdev->revision) {
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ case 0xC5:
+ case 0xC7:
+ case 0xCF:
+ case 0xDF:
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
+{
+ struct table_info *info;
+ struct binary_header *bhdr;
+ uint16_t offset;
+ uint16_t size;
+ uint16_t checksum;
+ int r;
+
+ adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
+ adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
+ if (!adev->mman.discovery_bin)
+ return -ENOMEM;
+
+ /* Read from file if it is the preferred option */
+ if (amdgpu_discovery == 2) {
+ dev_info(adev->dev, "use ip discovery information from file");
+ r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
+
+ if (r) {
+ dev_err(adev->dev, "failed to read ip discovery binary from file\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ } else {
+ r = amdgpu_discovery_read_binary_from_mem(
+ adev, adev->mman.discovery_bin);
+ if (r)
+ goto out;
+ }
+
+ /* check the ip discovery binary signature */
+ if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ dev_err(adev->dev,
+ "get invalid ip discovery binary signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+
+ offset = offsetof(struct binary_header, binary_checksum) +
+ sizeof(bhdr->binary_checksum);
+ size = le16_to_cpu(bhdr->binary_size) - offset;
+ checksum = le16_to_cpu(bhdr->binary_checksum);
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ size, checksum)) {
+ dev_err(adev->dev, "invalid ip discovery binary checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ info = &bhdr->table_list[IP_DISCOVERY];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct ip_discovery_header *ihdr =
+ (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
+ if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
+ dev_err(adev->dev, "invalid ip discovery data table signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le16_to_cpu(ihdr->size), checksum)) {
+ dev_err(adev->dev, "invalid ip discovery data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[GC];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct gpu_info_header *ghdr =
+ (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery gc table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(ghdr->size), checksum)) {
+ dev_err(adev->dev, "invalid gc data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[HARVEST_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct harvest_info_header *hhdr =
+ (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
+ dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ sizeof(struct harvest_table), checksum)) {
+ dev_err(adev->dev, "invalid harvest data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[VCN_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct vcn_info_header *vhdr =
+ (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery vcn table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(vhdr->size_bytes), checksum)) {
+ dev_err(adev->dev, "invalid vcn data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[MALL_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (0 && offset) {
+ struct mall_info_header *mhdr =
+ (struct mall_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery mall table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(mhdr->size_bytes), checksum)) {
+ dev_err(adev->dev, "invalid mall data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ return 0;
+
+out:
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
+ if ((amdgpu_discovery != 2) &&
+ (RREG32(mmIP_DISCOVERY_VERSION) == 4))
+ amdgpu_ras_query_boot_status(adev, 4);
+ return r;
+}
+
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
+
+void amdgpu_discovery_fini(struct amdgpu_device *adev)
+{
+ amdgpu_discovery_sysfs_fini(adev);
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
+}
+
+static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
+{
+ if (ip->instance_number >= HWIP_MAX_INSTANCE) {
+ DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
+ ip->instance_number);
+ return -EINVAL;
+ }
+ if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
+ DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
+ le16_to_cpu(ip->hw_id));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct ip_v4 *ip;
+ uint16_t die_offset, ip_offset, num_dies, num_ips;
+ int i, j;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ /* scan harvest bit of all IP data structures */
+ for (i = 0; i < num_dies; i++) {
+ die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ for (j = 0; j < num_ips; j++) {
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+
+ if (amdgpu_discovery_validate_ip(ip))
+ goto next_ip;
+
+ if (le16_to_cpu(ip->variant) == 1) {
+ switch (le16_to_cpu(ip->hw_id)) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ if (ip->instance_number == 0) {
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ } else {
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ }
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+next_ip:
+ if (ihdr->base_addr_64_bit)
+ ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ }
+ }
+}
+
+static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count,
+ uint32_t *umc_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct harvest_table *harvest_info;
+ u16 offset;
+ int i;
+ uint32_t umc_harvest_config = 0;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
+
+ if (!offset) {
+ dev_err(adev->dev, "invalid harvest table offset\n");
+ return;
+ }
+
+ harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
+
+ for (i = 0; i < 32; i++) {
+ if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
+ break;
+
+ switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ adev->vcn.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+ adev->jpeg.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+
+ adev->vcn.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ adev->jpeg.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ case UMC_HWID:
+ umc_harvest_config |=
+ 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
+ (*umc_harvest_count)++;
+ break;
+ case GC_HWID:
+ adev->gfx.xcc_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ case SDMA0_HWID:
+ adev->sdma.sdma_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ default:
+ break;
+ }
+ }
+
+ adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+ ~umc_harvest_config;
+}
+
+/* ================================================== */
+
+struct ip_hw_instance {
+ struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
+
+ int hw_id;
+ u8 num_instance;
+ u8 major, minor, revision;
+ u8 harvest;
+
+ int num_base_addresses;
+ u32 base_addr[] __counted_by(num_base_addresses);
+};
+
+struct ip_hw_id {
+ struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
+ int hw_id;
+};
+
+struct ip_die_entry {
+ struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
+ u16 num_ips;
+};
+
+/* -------------------------------------------------- */
+
+struct ip_hw_instance_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
+};
+
+static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
+}
+
+static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
+}
+
+static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
+}
+
+static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
+}
+
+static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
+}
+
+static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
+}
+
+static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
+}
+
+static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ ssize_t res, at;
+ int ii;
+
+ for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
+ /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
+ */
+ if (at + 12 > PAGE_SIZE)
+ break;
+ res = sysfs_emit_at(buf, at, "0x%08X\n",
+ ip_hw_instance->base_addr[ii]);
+ if (res <= 0)
+ break;
+ at += res;
+ }
+
+ return res < 0 ? res : at;
+}
+
+static struct ip_hw_instance_attr ip_hw_attr[] = {
+ __ATTR_RO(hw_id),
+ __ATTR_RO(num_instance),
+ __ATTR_RO(major),
+ __ATTR_RO(minor),
+ __ATTR_RO(revision),
+ __ATTR_RO(harvest),
+ __ATTR_RO(num_base_addresses),
+ __ATTR_RO(base_addr),
+};
+
+static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
+ATTRIBUTE_GROUPS(ip_hw_instance);
+
+#define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
+#define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
+
+static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+ struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
+
+ if (!ip_hw_attr->show)
+ return -EIO;
+
+ return ip_hw_attr->show(ip_hw_instance, buf);
+}
+
+static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
+ .show = ip_hw_instance_attr_show,
+};
+
+static void ip_hw_instance_release(struct kobject *kobj)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+
+ kfree(ip_hw_instance);
+}
+
+static const struct kobj_type ip_hw_instance_ktype = {
+ .release = ip_hw_instance_release,
+ .sysfs_ops = &ip_hw_instance_sysfs_ops,
+ .default_groups = ip_hw_instance_groups,
+};
+
+/* -------------------------------------------------- */
+
+#define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
+
+static void ip_hw_id_release(struct kobject *kobj)
+{
+ struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
+
+ if (!list_empty(&ip_hw_id->hw_id_kset.list))
+ DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
+ kfree(ip_hw_id);
+}
+
+static const struct kobj_type ip_hw_id_ktype = {
+ .release = ip_hw_id_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* -------------------------------------------------- */
+
+static void die_kobj_release(struct kobject *kobj);
+static void ip_disc_release(struct kobject *kobj);
+
+struct ip_die_entry_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
+};
+
+#define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
+
+static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
+}
+
+/* If there are more ip_die_entry attrs, other than the number of IPs,
+ * we can make this intro an array of attrs, and then initialize
+ * ip_die_entry_attrs in a loop.
+ */
+static struct ip_die_entry_attribute num_ips_attr =
+ __ATTR_RO(num_ips);
+
+static struct attribute *ip_die_entry_attrs[] = {
+ &num_ips_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
+
+#define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
+
+static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!ip_die_entry_attr->show)
+ return -EIO;
+
+ return ip_die_entry_attr->show(ip_die_entry, buf);
+}
+
+static void ip_die_entry_release(struct kobject *kobj)
+{
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!list_empty(&ip_die_entry->ip_kset.list))
+ DRM_ERROR("ip_die_entry->ip_kset is not empty");
+ kfree(ip_die_entry);
+}
+
+static const struct sysfs_ops ip_die_entry_sysfs_ops = {
+ .show = ip_die_entry_attr_show,
+};
+
+static const struct kobj_type ip_die_entry_ktype = {
+ .release = ip_die_entry_release,
+ .sysfs_ops = &ip_die_entry_sysfs_ops,
+ .default_groups = ip_die_entry_groups,
+};
+
+static const struct kobj_type die_kobj_ktype = {
+ .release = die_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static const struct kobj_type ip_discovery_ktype = {
+ .release = ip_disc_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+struct ip_discovery_top {
+ struct kobject kobj; /* ip_discovery/ */
+ struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
+ struct amdgpu_device *adev;
+};
+
+static void die_kobj_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
+ struct ip_discovery_top,
+ die_kset);
+ if (!list_empty(&ip_top->die_kset.list))
+ DRM_ERROR("ip_top->die_kset is not empty");
+}
+
+static void ip_disc_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
+ kobj);
+ struct amdgpu_device *adev = ip_top->adev;
+
+ adev->ip_top = NULL;
+ kfree(ip_top);
+}
+
+static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
+ uint16_t hw_id, uint8_t inst)
+{
+ uint8_t harvest = 0;
+
+ /* Until a uniform way is figured, get mask based on hwid */
+ switch (hw_id) {
+ case VCN_HWID:
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ break;
+ case DMU_HWID:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
+ harvest = 0x1;
+ break;
+ case UMC_HWID:
+ /* TODO: It needs another parsing; for now, ignore.*/
+ break;
+ case GC_HWID:
+ harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
+ break;
+ case SDMA0_HWID:
+ harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
+ break;
+ default:
+ break;
+ }
+
+ return harvest;
+}
+
+static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
+ struct ip_die_entry *ip_die_entry,
+ const size_t _ip_offset, const int num_ips,
+ bool reg_base_64)
+{
+ int ii, jj, kk, res;
+
+ DRM_DEBUG("num_ips:%d", num_ips);
+
+ /* Find all IPs of a given HW ID, and add their instance to
+ * #die/#hw_id/#instance/<attributes>
+ */
+ for (ii = 0; ii < HW_ID_MAX; ii++) {
+ struct ip_hw_id *ip_hw_id = NULL;
+ size_t ip_offset = _ip_offset;
+
+ for (jj = 0; jj < num_ips; jj++) {
+ struct ip_v4 *ip;
+ struct ip_hw_instance *ip_hw_instance;
+
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ if (amdgpu_discovery_validate_ip(ip) ||
+ le16_to_cpu(ip->hw_id) != ii)
+ goto next_ip;
+
+ DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
+
+ /* We have a hw_id match; register the hw
+ * block if not yet registered.
+ */
+ if (!ip_hw_id) {
+ ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
+ if (!ip_hw_id)
+ return -ENOMEM;
+ ip_hw_id->hw_id = ii;
+
+ kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
+ ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
+ ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
+ res = kset_register(&ip_hw_id->hw_id_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_hw_id kset");
+ kfree(ip_hw_id);
+ return res;
+ }
+ if (hw_id_names[ii]) {
+ res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
+ &ip_hw_id->hw_id_kset.kobj,
+ hw_id_names[ii]);
+ if (res) {
+ DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
+ hw_id_names[ii],
+ kobject_name(&ip_die_entry->ip_kset.kobj));
+ }
+ }
+ }
+
+ /* Now register its instance.
+ */
+ ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
+ base_addr,
+ ip->num_base_address),
+ GFP_KERNEL);
+ if (!ip_hw_instance) {
+ DRM_ERROR("no memory for ip_hw_instance");
+ return -ENOMEM;
+ }
+ ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
+ ip_hw_instance->num_instance = ip->instance_number;
+ ip_hw_instance->major = ip->major;
+ ip_hw_instance->minor = ip->minor;
+ ip_hw_instance->revision = ip->revision;
+ ip_hw_instance->harvest =
+ amdgpu_discovery_get_harvest_info(
+ adev, ip_hw_instance->hw_id,
+ ip_hw_instance->num_instance);
+ ip_hw_instance->num_base_addresses = ip->num_base_address;
+
+ for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
+ if (reg_base_64)
+ ip_hw_instance->base_addr[kk] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
+ else
+ ip_hw_instance->base_addr[kk] = ip->base_address[kk];
+ }
+
+ kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
+ ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
+ res = kobject_add(&ip_hw_instance->kobj, NULL,
+ "%d", ip_hw_instance->num_instance);
+next_ip:
+ if (reg_base_64)
+ ip_offset += struct_size(ip, base_address_64,
+ ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address,
+ ip->num_base_address);
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct kset *die_kset = &adev->ip_top->die_kset;
+ u16 num_dies, die_offset, num_ips;
+ size_t ip_offset;
+ int ii, res;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ DRM_DEBUG("number of dies: %d\n", num_dies);
+
+ for (ii = 0; ii < num_dies; ii++) {
+ struct ip_die_entry *ip_die_entry;
+
+ die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ /* Add the die to the kset.
+ *
+ * dhdr->die_id == ii, which was checked in
+ * amdgpu_discovery_reg_base_init().
+ */
+
+ ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
+ if (!ip_die_entry)
+ return -ENOMEM;
+
+ ip_die_entry->num_ips = num_ips;
+
+ kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
+ ip_die_entry->ip_kset.kobj.kset = die_kset;
+ ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
+ res = kset_register(&ip_die_entry->ip_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_die_entry kset");
+ kfree(ip_die_entry);
+ return res;
+ }
+
+ amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
+{
+ struct kset *die_kset;
+ int res, ii;
+
+ if (!adev->mman.discovery_bin)
+ return -EINVAL;
+
+ adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
+ if (!adev->ip_top)
+ return -ENOMEM;
+
+ adev->ip_top->adev = adev;
+
+ res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ &adev->dev->kobj, "ip_discovery");
+ if (res) {
+ DRM_ERROR("Couldn't init and add ip_discovery/");
+ goto Err;
+ }
+
+ die_kset = &adev->ip_top->die_kset;
+ kobject_set_name(&die_kset->kobj, "%s", "die");
+ die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.ktype = &die_kobj_ktype;
+ res = kset_register(&adev->ip_top->die_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register die_kset");
+ goto Err;
+ }
+
+ for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
+ ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
+ ip_hw_instance_attrs[ii] = NULL;
+
+ res = amdgpu_discovery_sysfs_recurse(adev);
+
+ return res;
+Err:
+ kobject_put(&adev->ip_top->kobj);
+ return res;
+}
+
+/* -------------------------------------------------- */
+
+#define list_to_kobj(el) container_of(el, struct kobject, entry)
+
+static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
+{
+ struct list_head *el, *tmp;
+ struct kset *hw_id_kset;
+
+ hw_id_kset = &ip_hw_id->hw_id_kset;
+ spin_lock(&hw_id_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
+ list_del_init(el);
+ spin_unlock(&hw_id_kset->list_lock);
+ /* kobject is embedded in ip_hw_instance */
+ kobject_put(list_to_kobj(el));
+ spin_lock(&hw_id_kset->list_lock);
+ }
+ spin_unlock(&hw_id_kset->list_lock);
+ kobject_put(&ip_hw_id->hw_id_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
+{
+ struct list_head *el, *tmp;
+ struct kset *ip_kset;
+
+ ip_kset = &ip_die_entry->ip_kset;
+ spin_lock(&ip_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &ip_kset->list) {
+ list_del_init(el);
+ spin_unlock(&ip_kset->list_lock);
+ amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
+ spin_lock(&ip_kset->list_lock);
+ }
+ spin_unlock(&ip_kset->list_lock);
+ kobject_put(&ip_die_entry->ip_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
+{
+ struct list_head *el, *tmp;
+ struct kset *die_kset;
+
+ die_kset = &adev->ip_top->die_kset;
+ spin_lock(&die_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &die_kset->list) {
+ list_del_init(el);
+ spin_unlock(&die_kset->list_lock);
+ amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
+ spin_lock(&die_kset->list_lock);
+ }
+ spin_unlock(&die_kset->list_lock);
+ kobject_put(&adev->ip_top->die_kset.kobj);
+ kobject_put(&adev->ip_top->kobj);
+}
+
+/* ================================================== */
+
+static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
+{
+ uint8_t num_base_address, subrev, variant;
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct ip_v4 *ip;
+ uint16_t die_offset;
+ uint16_t ip_offset;
+ uint16_t num_dies;
+ uint16_t num_ips;
+ int hw_ip;
+ int i, j, k;
+ int r;
+
+ r = amdgpu_discovery_init(adev);
+ if (r) {
+ DRM_ERROR("amdgpu_discovery_init failed\n");
+ return r;
+ }
+
+ adev->gfx.xcc_mask = 0;
+ adev->sdma.sdma_mask = 0;
+ adev->vcn.inst_mask = 0;
+ adev->jpeg.inst_mask = 0;
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ DRM_DEBUG("number of dies: %d\n", num_dies);
+
+ for (i = 0; i < num_dies; i++) {
+ die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ if (le16_to_cpu(dhdr->die_id) != i) {
+ DRM_ERROR("invalid die id %d, expected %d\n",
+ le16_to_cpu(dhdr->die_id), i);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("number of hardware IPs on die%d: %d\n",
+ le16_to_cpu(dhdr->die_id), num_ips);
+
+ for (j = 0; j < num_ips; j++) {
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+
+ if (amdgpu_discovery_validate_ip(ip))
+ goto next_ip;
+
+ num_base_address = ip->num_base_address;
+
+ DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
+ hw_id_names[le16_to_cpu(ip->hw_id)],
+ le16_to_cpu(ip->hw_id),
+ ip->instance_number,
+ ip->major, ip->minor,
+ ip->revision);
+
+ if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
+ /* Bit [5:0]: original revision value
+ * Bit [7:6]: en/decode capability:
+ * 0b00 : VCN function normally
+ * 0b10 : encode is disabled
+ * 0b01 : decode is disabled
+ */
+ if (adev->vcn.num_vcn_inst <
+ AMDGPU_MAX_VCN_INSTANCES) {
+ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ ip->revision & 0xc0;
+ adev->vcn.num_vcn_inst++;
+ adev->vcn.inst_mask |=
+ (1U << ip->instance_number);
+ adev->jpeg.inst_mask |=
+ (1U << ip->instance_number);
+ } else {
+ dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
+ adev->vcn.num_vcn_inst + 1,
+ AMDGPU_MAX_VCN_INSTANCES);
+ }
+ ip->revision &= ~0xc0;
+ }
+ if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
+ if (adev->sdma.num_instances <
+ AMDGPU_MAX_SDMA_INSTANCES) {
+ adev->sdma.num_instances++;
+ adev->sdma.sdma_mask |=
+ (1U << ip->instance_number);
+ } else {
+ dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
+ adev->sdma.num_instances + 1,
+ AMDGPU_MAX_SDMA_INSTANCES);
+ }
+ }
+
+ if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
+ if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
+ adev->vpe.num_instances++;
+ else
+ dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
+ adev->vpe.num_instances + 1,
+ AMDGPU_MAX_VPE_INSTANCES);
+ }
+
+ if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
+ adev->gmc.num_umc++;
+ adev->umc.node_inst_num++;
+ }
+
+ if (le16_to_cpu(ip->hw_id) == GC_HWID)
+ adev->gfx.xcc_mask |=
+ (1U << ip->instance_number);
+
+ for (k = 0; k < num_base_address; k++) {
+ /*
+ * convert the endianness of base addresses in place,
+ * so that we don't need to convert them when accessing adev->reg_offset.
+ */
+ if (ihdr->base_addr_64_bit)
+ /* Truncate the 64bit base address from ip discovery
+ * and only store lower 32bit ip base in reg_offset[].
+ * Bits > 32 follows ASIC specific format, thus just
+ * discard them and handle it within specific ASIC.
+ * By this way reg_offset[] and related helpers can
+ * stay unchanged.
+ * The base address is in dwords, thus clear the
+ * highest 2 bits to store.
+ */
+ ip->base_address[k] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
+ else
+ ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
+ DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
+ }
+
+ for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
+ if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
+ hw_id_map[hw_ip] != 0) {
+ DRM_DEBUG("set register base offset for %s\n",
+ hw_id_names[le16_to_cpu(ip->hw_id)]);
+ adev->reg_offset[hw_ip][ip->instance_number] =
+ ip->base_address;
+ /* Instance support is somewhat inconsistent.
+ * SDMA is a good example. Sienna cichlid has 4 total
+ * SDMA instances, each enumerated separately (HWIDs
+ * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
+ * but they are enumerated as multiple instances of the
+ * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
+ * example. On most chips there are multiple instances
+ * with the same HWID.
+ */
+
+ if (ihdr->version < 3) {
+ subrev = 0;
+ variant = 0;
+ } else {
+ subrev = ip->sub_revision;
+ variant = ip->variant;
+ }
+
+ adev->ip_versions[hw_ip]
+ [ip->instance_number] =
+ IP_VERSION_FULL(ip->major,
+ ip->minor,
+ ip->revision,
+ variant,
+ subrev);
+ }
+ }
+
+next_ip:
+ if (ihdr->base_addr_64_bit)
+ ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
+{
+ int vcn_harvest_count = 0;
+ int umc_harvest_count = 0;
+
+ /*
+ * Harvest table does not fit Navi1x and legacy GPUs,
+ * so read harvest bit per IP data structure to set
+ * harvest configuration.
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {
+ if ((adev->pdev->device == 0x731E &&
+ (adev->pdev->revision == 0xC6 ||
+ adev->pdev->revision == 0xC7)) ||
+ (adev->pdev->device == 0x7340 &&
+ adev->pdev->revision == 0xC9) ||
+ (adev->pdev->device == 0x7360 &&
+ adev->pdev->revision == 0xC7))
+ amdgpu_discovery_read_harvest_bit_per_ip(adev,
+ &vcn_harvest_count);
+ } else {
+ amdgpu_discovery_read_from_harvest_table(adev,
+ &vcn_harvest_count,
+ &umc_harvest_count);
+ }
+
+ amdgpu_discovery_harvest_config_quirk(adev);
+
+ if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
+
+ if (umc_harvest_count < adev->gmc.num_umc) {
+ adev->gmc.num_umc -= umc_harvest_count;
+ }
+}
+
+union gc_info {
+ struct gc_info_v1_0 v1;
+ struct gc_info_v1_1 v1_1;
+ struct gc_info_v1_2 v1_2;
+ struct gc_info_v2_0 v2;
+ struct gc_info_v2_1 v2_1;
+};
+
+static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union gc_info *gc_info;
+ u16 offset;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[GC].offset);
+
+ if (!offset)
+ return 0;
+
+ gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(gc_info->v1.header.version_major)) {
+ case 1:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
+ le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
+ adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
+ }
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
+ adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
+ adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
+ adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
+ adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
+ adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
+ adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
+ }
+ break;
+ case 2:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+ if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
+ adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled GC info table %d.%d\n",
+ le16_to_cpu(gc_info->v1.header.version_major),
+ le16_to_cpu(gc_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union mall_info {
+ struct mall_info_v1_0 v1;
+ struct mall_info_v2_0 v2;
+};
+
+static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union mall_info *mall_info;
+ u32 u, mall_size_per_umc, m_s_present, half_use;
+ u64 mall_size;
+ u16 offset;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
+
+ if (!offset)
+ return 0;
+
+ mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(mall_info->v1.header.version_major)) {
+ case 1:
+ mall_size = 0;
+ mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
+ m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
+ half_use = le32_to_cpu(mall_info->v1.m_half_use);
+ for (u = 0; u < adev->gmc.num_umc; u++) {
+ if (m_s_present & (1 << u))
+ mall_size += mall_size_per_umc * 2;
+ else if (half_use & (1 << u))
+ mall_size += mall_size_per_umc / 2;
+ else
+ mall_size += mall_size_per_umc;
+ }
+ adev->gmc.mall_size = mall_size;
+ adev->gmc.m_half_use = half_use;
+ break;
+ case 2:
+ mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
+ adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled MALL info table %d.%d\n",
+ le16_to_cpu(mall_info->v1.header.version_major),
+ le16_to_cpu(mall_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union vcn_info {
+ struct vcn_info_v1_0 v1;
+};
+
+static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union vcn_info *vcn_info;
+ u16 offset;
+ int v;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
+ * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
+ * but that may change in the future with new GPUs so keep this
+ * check for defensive purposes.
+ */
+ if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
+ dev_err(adev->dev, "invalid vcn instances\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
+
+ if (!offset)
+ return 0;
+
+ vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
+ case 1:
+ /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
+ * so this won't overflow.
+ */
+ for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
+ adev->vcn.vcn_codec_disable_mask[v] =
+ le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled VCN info table %d.%d\n",
+ le16_to_cpu(vcn_info->v1.header.version_major),
+ le16_to_cpu(vcn_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
+{
+ /* what IP to use for this? */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add common ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
+{
+ /* use GC or MMHUB IP version */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 3, 0):
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ break;
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 1):
+ amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
+ break;
+ case IP_VERSION(6, 1, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
+ break;
+ case IP_VERSION(7, 0, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ break;
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 8):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
+ break;
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 11):
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 4):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, MP0_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 11):
+ amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
+ break;
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+<<<<<<<
+=======
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+>>>>>>>
+ amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#if defined(CONFIG_DRM_AMD_DC)
+static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
+{
+ amdgpu_device_set_sriov_virtual_display(adev);
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+}
+#endif
+
+static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
+{
+ if (adev->enable_virtual_display) {
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+ return 0;
+ }
+
+ if (!amdgpu_device_has_dc_support(adev))
+ return 0;
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ return -EINVAL;
+ }
+ } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ case IP_VERSION(12, 1, 0):
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, DCI_HWIP, 0));
+ return -EINVAL;
+ }
+ }
+#endif
+ return 0;
+}
+
+static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ break;
+ case IP_VERSION(9, 4, 3):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 2):
+ case IP_VERSION(4, 4, 0):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ break;
+ case IP_VERSION(4, 4, 2):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
+ break;
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 6):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 7):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
+ amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 2, 0):
+ /* UVD is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, UVD_HWIP, 0));
+ return -EINVAL;
+ }
+ switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 1, 0):
+ /* VCE is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, VCE_HWIP, 0));
+ return -EINVAL;
+ }
+ } else {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 2, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 3):
+ break;
+ case IP_VERSION(2, 5, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
+ break;
+ case IP_VERSION(2, 6, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
+ break;
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 16):
+ case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 0, 2):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
+ break;
+ case IP_VERSION(3, 0, 33):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ break;
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 2):
+ case IP_VERSION(4, 0, 4):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
+ break;
+ case IP_VERSION(4, 0, 3):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
+ break;
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, UVD_HWIP, 0));
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ if (amdgpu_mes) {
+ amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
+ adev->enable_mes = true;
+ if (amdgpu_mes_kiq)
+ adev->enable_mes_kiq = true;
+ }
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
+ adev->enable_mes = true;
+ adev->enable_mes_kiq = true;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ aqua_vanjaram_init_soc_config(adev);
+ break;
+ default:
+ break;
+ }
+}
+
+static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
+ amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ if (amdgpu_umsch_mm & 0x1) {
+ amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
+ adev->enable_umsch_mm = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+{
+ int r;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
+ break;
+ case CHIP_VEGA12:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
+ break;
+ case CHIP_RAVEN:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 1;
+ adev->vcn.num_vcn_inst = 1;
+ adev->gmc.num_umc = 2;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
+ } else {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
+ }
+ break;
+ case CHIP_VEGA20:
+ vega20_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 8;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
+ break;
+ case CHIP_ARCTURUS:
+ arct_reg_base_init(adev);
+ adev->sdma.num_instances = 8;
+ adev->vcn.num_vcn_inst = 2;
+ adev->gmc.num_umc = 8;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
+ break;
+ case CHIP_ALDEBARAN:
+ aldebaran_reg_base_init(adev);
+ adev->sdma.num_instances = 5;
+ adev->vcn.num_vcn_inst = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+ break;
+ default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ break;
+ }
+
+ amdgpu_discovery_init_soc_config(adev);
+ amdgpu_discovery_sysfs_init(adev);
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ adev->family = AMDGPU_FAMILY_AI;
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ adev->family = AMDGPU_FAMILY_RV;
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->family = AMDGPU_FAMILY_NV;
+ break;
+ case IP_VERSION(10, 3, 1):
+ adev->family = AMDGPU_FAMILY_VGH;
+ adev->apu_flags |= AMD_APU_IS_VANGOGH;
+ break;
+ case IP_VERSION(10, 3, 3):
+ adev->family = AMDGPU_FAMILY_YC;
+ break;
+ case IP_VERSION(10, 3, 6):
+ adev->family = AMDGPU_FAMILY_GC_10_3_6;
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->family = AMDGPU_FAMILY_GC_10_3_7;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ adev->family = AMDGPU_FAMILY_GC_11_0_0;
+ break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->family = AMDGPU_FAMILY_GC_11_0_1;
+ break;
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ adev->family = AMDGPU_FAMILY_GC_11_5_0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ adev->flags |= AMD_IS_APU;
+ break;
+ default:
+ break;
+ }
+
+ if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
+ adev->gmc.xgmi.supported = true;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
+
+ /* set NBIO version */
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 2, 0):
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ case IP_VERSION(2, 5, 0):
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 9, 0):
+ adev->nbio.funcs = &nbio_v7_9_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 11, 0):
+ case IP_VERSION(7, 11, 1):
+ adev->nbio.funcs = &nbio_v7_11_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 2, 0):
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 0):
+ case IP_VERSION(7, 5, 1):
+ adev->nbio.funcs = &nbio_v7_2_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
+ break;
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 3, 1):
+ case IP_VERSION(2, 3, 2):
+ case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
+ case IP_VERSION(3, 3, 3):
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ if (amdgpu_sriov_vf(adev))
+ adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
+ else
+ adev->nbio.funcs = &nbio_v4_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 7, 0):
+ case IP_VERSION(7, 7, 1):
+ adev->nbio.funcs = &nbio_v7_7_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
+ break;
+ case IP_VERSION(6, 3, 1):
+ adev->nbio.funcs = &nbif_v6_3_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
+ adev->hdp.funcs = &hdp_v4_0_funcs;
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 0, 4):
+ case IP_VERSION(5, 2, 0):
+ adev->hdp.funcs = &hdp_v5_0_funcs;
+ break;
+ case IP_VERSION(5, 2, 1):
+ adev->hdp.funcs = &hdp_v5_2_funcs;
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 1, 0):
+ adev->hdp.funcs = &hdp_v6_0_funcs;
+ break;
+ case IP_VERSION(7, 0, 0):
+ adev->hdp.funcs = &hdp_v7_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
+ case IP_VERSION(3, 6, 0):
+ case IP_VERSION(3, 6, 1):
+ case IP_VERSION(3, 6, 2):
+ adev->df.funcs = &df_v3_6_funcs;
+ break;
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 5, 2):
+ adev->df.funcs = &df_v1_7_funcs;
+ break;
+ case IP_VERSION(4, 3, 0):
+ adev->df.funcs = &df_v4_3_funcs;
+ break;
+ case IP_VERSION(4, 6, 2):
+ adev->df.funcs = &df_v4_6_2_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(10, 0, 2):
+ adev->smuio.funcs = &smuio_v9_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ adev->smuio.funcs = &smuio_v11_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 6):
+ case IP_VERSION(11, 0, 10):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 9):
+ case IP_VERSION(13, 0, 10):
+ adev->smuio.funcs = &smuio_v11_0_6_funcs;
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->smuio.funcs = &smuio_v13_0_funcs;
+ break;
+ case IP_VERSION(13, 0, 3):
+ adev->smuio.funcs = &smuio_v13_0_3_funcs;
+ if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
+ adev->flags |= AMD_IS_APU;
+ }
+ break;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ adev->smuio.funcs = &smuio_v13_0_6_funcs;
+ break;
+ case IP_VERSION(14, 0, 2):
+ adev->smuio.funcs = &smuio_v14_0_2_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
+ adev->lsdma.funcs = &lsdma_v6_0_funcs;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ adev->lsdma.funcs = &lsdma_v7_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ r = amdgpu_discovery_set_common_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gmc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ /* For SR-IOV, PSP needs to be initialized before IH */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+ } else {
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_display_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_sdma_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ !amdgpu_sriov_vf(adev)) ||
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_mes_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_vpe_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
diff --git a/rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/preimage.1 b/rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/preimage.1
new file mode 100644
index 000000000000..66f0a23afec8
--- /dev/null
+++ b/rr-cache/2e1c73681a5100f81b9a3a6b2d3dcf7184f8fe64/preimage.1
@@ -0,0 +1,2784 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_discovery.h"
+#include "soc15_hw_ip.h"
+#include "discovery.h"
+#include "amdgpu_ras.h"
+
+#include "soc15.h"
+#include "gfx_v9_0.h"
+#include "gfx_v9_4_3.h"
+#include "gmc_v9_0.h"
+#include "df_v1_7.h"
+#include "df_v3_6.h"
+#include "df_v4_3.h"
+#include "df_v4_6_2.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
+#include "nbio_v7_9.h"
+#include "nbio_v7_11.h"
+#include "hdp_v4_0.h"
+#include "vega10_ih.h"
+#include "vega20_ih.h"
+#include "sdma_v4_0.h"
+#include "sdma_v4_4_2.h"
+#include "uvd_v7_0.h"
+#include "vce_v4_0.h"
+#include "vcn_v1_0.h"
+#include "vcn_v2_5.h"
+#include "jpeg_v2_5.h"
+#include "smuio_v9_0.h"
+#include "gmc_v10_0.h"
+#include "gmc_v11_0.h"
+#include "gfxhub_v2_0.h"
+#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
+#include "nbio_v4_3.h"
+#include "nbio_v7_2.h"
+#include "nbio_v7_7.h"
+#include "nbif_v6_3_1.h"
+#include "hdp_v5_0.h"
+#include "hdp_v5_2.h"
+#include "hdp_v6_0.h"
+#include "hdp_v7_0.h"
+#include "nv.h"
+#include "soc21.h"
+#include "navi10_ih.h"
+#include "ih_v6_0.h"
+#include "ih_v6_1.h"
+#include "ih_v7_0.h"
+#include "gfx_v10_0.h"
+#include "gfx_v11_0.h"
+#include "sdma_v5_0.h"
+#include "sdma_v5_2.h"
+#include "sdma_v6_0.h"
+#include "lsdma_v6_0.h"
+#include "lsdma_v7_0.h"
+#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
+#include "vcn_v3_0.h"
+#include "jpeg_v3_0.h"
+#include "vcn_v4_0.h"
+#include "jpeg_v4_0.h"
+#include "vcn_v4_0_3.h"
+#include "jpeg_v4_0_3.h"
+#include "vcn_v4_0_5.h"
+#include "jpeg_v4_0_5.h"
+#include "amdgpu_vkms.h"
+#include "mes_v10_1.h"
+#include "mes_v11_0.h"
+#include "smuio_v11_0.h"
+#include "smuio_v11_0_6.h"
+#include "smuio_v13_0.h"
+#include "smuio_v13_0_3.h"
+#include "smuio_v13_0_6.h"
+#include "smuio_v14_0_2.h"
+#include "vcn_v5_0_0.h"
+#include "jpeg_v5_0_0.h"
+
+#include "amdgpu_vpe.h"
+
+#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
+MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+
+#define mmIP_DISCOVERY_VERSION 0x16A00
+#define mmRCC_CONFIG_MEMSIZE 0xde3
+#define mmMP0_SMN_C2PMSG_33 0x16061
+#define mmMM_INDEX 0x0
+#define mmMM_INDEX_HI 0x6
+#define mmMM_DATA 0x1
+
+static const char *hw_id_names[HW_ID_MAX] = {
+ [MP1_HWID] = "MP1",
+ [MP2_HWID] = "MP2",
+ [THM_HWID] = "THM",
+ [SMUIO_HWID] = "SMUIO",
+ [FUSE_HWID] = "FUSE",
+ [CLKA_HWID] = "CLKA",
+ [PWR_HWID] = "PWR",
+ [GC_HWID] = "GC",
+ [UVD_HWID] = "UVD",
+ [AUDIO_AZ_HWID] = "AUDIO_AZ",
+ [ACP_HWID] = "ACP",
+ [DCI_HWID] = "DCI",
+ [DMU_HWID] = "DMU",
+ [DCO_HWID] = "DCO",
+ [DIO_HWID] = "DIO",
+ [XDMA_HWID] = "XDMA",
+ [DCEAZ_HWID] = "DCEAZ",
+ [DAZ_HWID] = "DAZ",
+ [SDPMUX_HWID] = "SDPMUX",
+ [NTB_HWID] = "NTB",
+ [IOHC_HWID] = "IOHC",
+ [L2IMU_HWID] = "L2IMU",
+ [VCE_HWID] = "VCE",
+ [MMHUB_HWID] = "MMHUB",
+ [ATHUB_HWID] = "ATHUB",
+ [DBGU_NBIO_HWID] = "DBGU_NBIO",
+ [DFX_HWID] = "DFX",
+ [DBGU0_HWID] = "DBGU0",
+ [DBGU1_HWID] = "DBGU1",
+ [OSSSYS_HWID] = "OSSSYS",
+ [HDP_HWID] = "HDP",
+ [SDMA0_HWID] = "SDMA0",
+ [SDMA1_HWID] = "SDMA1",
+ [SDMA2_HWID] = "SDMA2",
+ [SDMA3_HWID] = "SDMA3",
+ [LSDMA_HWID] = "LSDMA",
+ [ISP_HWID] = "ISP",
+ [DBGU_IO_HWID] = "DBGU_IO",
+ [DF_HWID] = "DF",
+ [CLKB_HWID] = "CLKB",
+ [FCH_HWID] = "FCH",
+ [DFX_DAP_HWID] = "DFX_DAP",
+ [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
+ [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
+ [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
+ [L1IMU3_HWID] = "L1IMU3",
+ [L1IMU4_HWID] = "L1IMU4",
+ [L1IMU5_HWID] = "L1IMU5",
+ [L1IMU6_HWID] = "L1IMU6",
+ [L1IMU7_HWID] = "L1IMU7",
+ [L1IMU8_HWID] = "L1IMU8",
+ [L1IMU9_HWID] = "L1IMU9",
+ [L1IMU10_HWID] = "L1IMU10",
+ [L1IMU11_HWID] = "L1IMU11",
+ [L1IMU12_HWID] = "L1IMU12",
+ [L1IMU13_HWID] = "L1IMU13",
+ [L1IMU14_HWID] = "L1IMU14",
+ [L1IMU15_HWID] = "L1IMU15",
+ [WAFLC_HWID] = "WAFLC",
+ [FCH_USB_PD_HWID] = "FCH_USB_PD",
+ [PCIE_HWID] = "PCIE",
+ [PCS_HWID] = "PCS",
+ [DDCL_HWID] = "DDCL",
+ [SST_HWID] = "SST",
+ [IOAGR_HWID] = "IOAGR",
+ [NBIF_HWID] = "NBIF",
+ [IOAPIC_HWID] = "IOAPIC",
+ [SYSTEMHUB_HWID] = "SYSTEMHUB",
+ [NTBCCP_HWID] = "NTBCCP",
+ [UMC_HWID] = "UMC",
+ [SATA_HWID] = "SATA",
+ [USB_HWID] = "USB",
+ [CCXSEC_HWID] = "CCXSEC",
+ [XGMI_HWID] = "XGMI",
+ [XGBE_HWID] = "XGBE",
+ [MP0_HWID] = "MP0",
+ [VPE_HWID] = "VPE",
+};
+
+static int hw_id_map[MAX_HWIP] = {
+ [GC_HWIP] = GC_HWID,
+ [HDP_HWIP] = HDP_HWID,
+ [SDMA0_HWIP] = SDMA0_HWID,
+ [SDMA1_HWIP] = SDMA1_HWID,
+ [SDMA2_HWIP] = SDMA2_HWID,
+ [SDMA3_HWIP] = SDMA3_HWID,
+ [LSDMA_HWIP] = LSDMA_HWID,
+ [MMHUB_HWIP] = MMHUB_HWID,
+ [ATHUB_HWIP] = ATHUB_HWID,
+ [NBIO_HWIP] = NBIF_HWID,
+ [MP0_HWIP] = MP0_HWID,
+ [MP1_HWIP] = MP1_HWID,
+ [UVD_HWIP] = UVD_HWID,
+ [VCE_HWIP] = VCE_HWID,
+ [DF_HWIP] = DF_HWID,
+ [DCE_HWIP] = DMU_HWID,
+ [OSSSYS_HWIP] = OSSSYS_HWID,
+ [SMUIO_HWIP] = SMUIO_HWID,
+ [PWR_HWIP] = PWR_HWID,
+ [NBIF_HWIP] = NBIF_HWID,
+ [THM_HWIP] = THM_HWID,
+ [CLK_HWIP] = CLKA_HWID,
+ [UMC_HWIP] = UMC_HWID,
+ [XGMI_HWIP] = XGMI_HWID,
+ [DCI_HWIP] = DCI_HWID,
+ [PCIE_HWIP] = PCIE_HWID,
+ [VPE_HWIP] = VPE_HWID,
+};
+
+static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
+{
+ u64 tmr_offset, tmr_size, pos;
+ void *discv_regn;
+ int ret;
+
+ ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
+ if (ret)
+ return ret;
+
+ pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
+
+ /* This region is read-only and reserved from system use */
+ discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ if (discv_regn) {
+ memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memunmap(discv_regn);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+#define IP_DISCOVERY_V2 2
+#define IP_DISCOVERY_V4 4
+
+static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary)
+{
+ uint64_t vram_size;
+ u32 msg;
+ int i, ret = 0;
+
+ /* It can take up to a second for IFWI init to complete on some dGPUs,
+ * but generally it should be in the 60-100ms range. Normally this starts
+ * as soon as the device gets power so by the time the OS loads this has long
+ * completed. However, when a card is hotplugged via e.g., USB4, we need to
+ * wait for this to complete. Once the C2PMSG is updated, we can
+ * continue.
+ */
+
+ for (i = 0; i < 1000; i++) {
+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ if (msg & 0x80000000)
+ break;
+ usleep_range(1000, 1100);
+ }
+
+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+
+ if (vram_size) {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->mman.discovery_tmr_size, false);
+ } else {
+ ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
+ }
+
+ return ret;
+}
+
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
+{
+ const struct firmware *fw;
+ const char *fw_name;
+ int r;
+
+ switch (amdgpu_discovery) {
+ case 2:
+ fw_name = FIRMWARE_IP_DISCOVERY;
+ break;
+ default:
+ dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
+ return -EINVAL;
+ }
+
+ r = request_firmware(&fw, fw_name, adev->dev);
+ if (r) {
+ dev_err(adev->dev, "can't load firmware \"%s\"\n",
+ fw_name);
+ return r;
+ }
+
+ memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
+ release_firmware(fw);
+
+ return 0;
+}
+
+static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
+{
+ uint16_t checksum = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ checksum += data[i];
+
+ return checksum;
+}
+
+static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
+ uint16_t expected)
+{
+ return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
+}
+
+static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
+{
+ struct binary_header *bhdr;
+ bhdr = (struct binary_header *)binary;
+
+ return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
+}
+
+static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
+{
+ /*
+ * So far, apply this quirk only on those Navy Flounder boards which
+ * have a bad harvest table of VCN config.
+ */
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
+ switch (adev->pdev->revision) {
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ case 0xC5:
+ case 0xC7:
+ case 0xCF:
+ case 0xDF:
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
+{
+ struct table_info *info;
+ struct binary_header *bhdr;
+ uint16_t offset;
+ uint16_t size;
+ uint16_t checksum;
+ int r;
+
+ adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
+ adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
+ if (!adev->mman.discovery_bin)
+ return -ENOMEM;
+
+ /* Read from file if it is the preferred option */
+ if (amdgpu_discovery == 2) {
+ dev_info(adev->dev, "use ip discovery information from file");
+ r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
+
+ if (r) {
+ dev_err(adev->dev, "failed to read ip discovery binary from file\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ } else {
+ r = amdgpu_discovery_read_binary_from_mem(
+ adev, adev->mman.discovery_bin);
+ if (r)
+ goto out;
+ }
+
+ /* check the ip discovery binary signature */
+ if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ dev_err(adev->dev,
+ "get invalid ip discovery binary signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+
+ offset = offsetof(struct binary_header, binary_checksum) +
+ sizeof(bhdr->binary_checksum);
+ size = le16_to_cpu(bhdr->binary_size) - offset;
+ checksum = le16_to_cpu(bhdr->binary_checksum);
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ size, checksum)) {
+ dev_err(adev->dev, "invalid ip discovery binary checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ info = &bhdr->table_list[IP_DISCOVERY];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct ip_discovery_header *ihdr =
+ (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
+ if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
+ dev_err(adev->dev, "invalid ip discovery data table signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le16_to_cpu(ihdr->size), checksum)) {
+ dev_err(adev->dev, "invalid ip discovery data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[GC];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct gpu_info_header *ghdr =
+ (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery gc table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(ghdr->size), checksum)) {
+ dev_err(adev->dev, "invalid gc data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[HARVEST_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct harvest_info_header *hhdr =
+ (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
+ dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ sizeof(struct harvest_table), checksum)) {
+ dev_err(adev->dev, "invalid harvest data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[VCN_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct vcn_info_header *vhdr =
+ (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery vcn table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(vhdr->size_bytes), checksum)) {
+ dev_err(adev->dev, "invalid vcn data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[MALL_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (0 && offset) {
+ struct mall_info_header *mhdr =
+ (struct mall_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery mall table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(mhdr->size_bytes), checksum)) {
+ dev_err(adev->dev, "invalid mall data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ return 0;
+
+out:
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
+ if ((amdgpu_discovery != 2) &&
+ (RREG32(mmIP_DISCOVERY_VERSION) == 4))
+ amdgpu_ras_query_boot_status(adev, 4);
+ return r;
+}
+
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
+
+void amdgpu_discovery_fini(struct amdgpu_device *adev)
+{
+ amdgpu_discovery_sysfs_fini(adev);
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
+}
+
+static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
+{
+ if (ip->instance_number >= HWIP_MAX_INSTANCE) {
+ DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
+ ip->instance_number);
+ return -EINVAL;
+ }
+ if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
+ DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
+ le16_to_cpu(ip->hw_id));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct ip_v4 *ip;
+ uint16_t die_offset, ip_offset, num_dies, num_ips;
+ int i, j;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ /* scan harvest bit of all IP data structures */
+ for (i = 0; i < num_dies; i++) {
+ die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ for (j = 0; j < num_ips; j++) {
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+
+ if (amdgpu_discovery_validate_ip(ip))
+ goto next_ip;
+
+ if (le16_to_cpu(ip->variant) == 1) {
+ switch (le16_to_cpu(ip->hw_id)) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ if (ip->instance_number == 0) {
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ } else {
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ }
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+next_ip:
+ if (ihdr->base_addr_64_bit)
+ ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ }
+ }
+}
+
+static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count,
+ uint32_t *umc_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct harvest_table *harvest_info;
+ u16 offset;
+ int i;
+ uint32_t umc_harvest_config = 0;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
+
+ if (!offset) {
+ dev_err(adev->dev, "invalid harvest table offset\n");
+ return;
+ }
+
+ harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
+
+ for (i = 0; i < 32; i++) {
+ if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
+ break;
+
+ switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ adev->vcn.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+ adev->jpeg.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+
+ adev->vcn.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ adev->jpeg.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ case UMC_HWID:
+ umc_harvest_config |=
+ 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
+ (*umc_harvest_count)++;
+ break;
+ case GC_HWID:
+ adev->gfx.xcc_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ case SDMA0_HWID:
+ adev->sdma.sdma_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ default:
+ break;
+ }
+ }
+
+ adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+ ~umc_harvest_config;
+}
+
+/* ================================================== */
+
+struct ip_hw_instance {
+ struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
+
+ int hw_id;
+ u8 num_instance;
+ u8 major, minor, revision;
+ u8 harvest;
+
+ int num_base_addresses;
+ u32 base_addr[] __counted_by(num_base_addresses);
+};
+
+struct ip_hw_id {
+ struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
+ int hw_id;
+};
+
+struct ip_die_entry {
+ struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
+ u16 num_ips;
+};
+
+/* -------------------------------------------------- */
+
+struct ip_hw_instance_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
+};
+
+static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
+}
+
+static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
+}
+
+static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
+}
+
+static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
+}
+
+static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
+}
+
+static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
+}
+
+static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
+}
+
+static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ ssize_t res, at;
+ int ii;
+
+ for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
+ /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
+ */
+ if (at + 12 > PAGE_SIZE)
+ break;
+ res = sysfs_emit_at(buf, at, "0x%08X\n",
+ ip_hw_instance->base_addr[ii]);
+ if (res <= 0)
+ break;
+ at += res;
+ }
+
+ return res < 0 ? res : at;
+}
+
+static struct ip_hw_instance_attr ip_hw_attr[] = {
+ __ATTR_RO(hw_id),
+ __ATTR_RO(num_instance),
+ __ATTR_RO(major),
+ __ATTR_RO(minor),
+ __ATTR_RO(revision),
+ __ATTR_RO(harvest),
+ __ATTR_RO(num_base_addresses),
+ __ATTR_RO(base_addr),
+};
+
+static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
+ATTRIBUTE_GROUPS(ip_hw_instance);
+
+#define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
+#define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
+
+static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+ struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
+
+ if (!ip_hw_attr->show)
+ return -EIO;
+
+ return ip_hw_attr->show(ip_hw_instance, buf);
+}
+
+static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
+ .show = ip_hw_instance_attr_show,
+};
+
+static void ip_hw_instance_release(struct kobject *kobj)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+
+ kfree(ip_hw_instance);
+}
+
+static const struct kobj_type ip_hw_instance_ktype = {
+ .release = ip_hw_instance_release,
+ .sysfs_ops = &ip_hw_instance_sysfs_ops,
+ .default_groups = ip_hw_instance_groups,
+};
+
+/* -------------------------------------------------- */
+
+#define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
+
+static void ip_hw_id_release(struct kobject *kobj)
+{
+ struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
+
+ if (!list_empty(&ip_hw_id->hw_id_kset.list))
+ DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
+ kfree(ip_hw_id);
+}
+
+static const struct kobj_type ip_hw_id_ktype = {
+ .release = ip_hw_id_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* -------------------------------------------------- */
+
+static void die_kobj_release(struct kobject *kobj);
+static void ip_disc_release(struct kobject *kobj);
+
+struct ip_die_entry_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
+};
+
+#define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
+
+static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
+}
+
+/* If there are more ip_die_entry attrs, other than the number of IPs,
+ * we can make this intro an array of attrs, and then initialize
+ * ip_die_entry_attrs in a loop.
+ */
+static struct ip_die_entry_attribute num_ips_attr =
+ __ATTR_RO(num_ips);
+
+static struct attribute *ip_die_entry_attrs[] = {
+ &num_ips_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
+
+#define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
+
+static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!ip_die_entry_attr->show)
+ return -EIO;
+
+ return ip_die_entry_attr->show(ip_die_entry, buf);
+}
+
+static void ip_die_entry_release(struct kobject *kobj)
+{
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!list_empty(&ip_die_entry->ip_kset.list))
+ DRM_ERROR("ip_die_entry->ip_kset is not empty");
+ kfree(ip_die_entry);
+}
+
+static const struct sysfs_ops ip_die_entry_sysfs_ops = {
+ .show = ip_die_entry_attr_show,
+};
+
+static const struct kobj_type ip_die_entry_ktype = {
+ .release = ip_die_entry_release,
+ .sysfs_ops = &ip_die_entry_sysfs_ops,
+ .default_groups = ip_die_entry_groups,
+};
+
+static const struct kobj_type die_kobj_ktype = {
+ .release = die_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static const struct kobj_type ip_discovery_ktype = {
+ .release = ip_disc_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+struct ip_discovery_top {
+ struct kobject kobj; /* ip_discovery/ */
+ struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
+ struct amdgpu_device *adev;
+};
+
+static void die_kobj_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
+ struct ip_discovery_top,
+ die_kset);
+ if (!list_empty(&ip_top->die_kset.list))
+ DRM_ERROR("ip_top->die_kset is not empty");
+}
+
+static void ip_disc_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
+ kobj);
+ struct amdgpu_device *adev = ip_top->adev;
+
+ adev->ip_top = NULL;
+ kfree(ip_top);
+}
+
+static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
+ uint16_t hw_id, uint8_t inst)
+{
+ uint8_t harvest = 0;
+
+ /* Until a uniform way is figured, get mask based on hwid */
+ switch (hw_id) {
+ case VCN_HWID:
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ break;
+ case DMU_HWID:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
+ harvest = 0x1;
+ break;
+ case UMC_HWID:
+ /* TODO: It needs another parsing; for now, ignore.*/
+ break;
+ case GC_HWID:
+ harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
+ break;
+ case SDMA0_HWID:
+ harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
+ break;
+ default:
+ break;
+ }
+
+ return harvest;
+}
+
+static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
+ struct ip_die_entry *ip_die_entry,
+ const size_t _ip_offset, const int num_ips,
+ bool reg_base_64)
+{
+ int ii, jj, kk, res;
+
+ DRM_DEBUG("num_ips:%d", num_ips);
+
+ /* Find all IPs of a given HW ID, and add their instance to
+ * #die/#hw_id/#instance/<attributes>
+ */
+ for (ii = 0; ii < HW_ID_MAX; ii++) {
+ struct ip_hw_id *ip_hw_id = NULL;
+ size_t ip_offset = _ip_offset;
+
+ for (jj = 0; jj < num_ips; jj++) {
+ struct ip_v4 *ip;
+ struct ip_hw_instance *ip_hw_instance;
+
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ if (amdgpu_discovery_validate_ip(ip) ||
+ le16_to_cpu(ip->hw_id) != ii)
+ goto next_ip;
+
+ DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
+
+ /* We have a hw_id match; register the hw
+ * block if not yet registered.
+ */
+ if (!ip_hw_id) {
+ ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
+ if (!ip_hw_id)
+ return -ENOMEM;
+ ip_hw_id->hw_id = ii;
+
+ kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
+ ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
+ ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
+ res = kset_register(&ip_hw_id->hw_id_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_hw_id kset");
+ kfree(ip_hw_id);
+ return res;
+ }
+ if (hw_id_names[ii]) {
+ res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
+ &ip_hw_id->hw_id_kset.kobj,
+ hw_id_names[ii]);
+ if (res) {
+ DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
+ hw_id_names[ii],
+ kobject_name(&ip_die_entry->ip_kset.kobj));
+ }
+ }
+ }
+
+ /* Now register its instance.
+ */
+ ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
+ base_addr,
+ ip->num_base_address),
+ GFP_KERNEL);
+ if (!ip_hw_instance) {
+ DRM_ERROR("no memory for ip_hw_instance");
+ return -ENOMEM;
+ }
+ ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
+ ip_hw_instance->num_instance = ip->instance_number;
+ ip_hw_instance->major = ip->major;
+ ip_hw_instance->minor = ip->minor;
+ ip_hw_instance->revision = ip->revision;
+ ip_hw_instance->harvest =
+ amdgpu_discovery_get_harvest_info(
+ adev, ip_hw_instance->hw_id,
+ ip_hw_instance->num_instance);
+ ip_hw_instance->num_base_addresses = ip->num_base_address;
+
+ for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
+ if (reg_base_64)
+ ip_hw_instance->base_addr[kk] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
+ else
+ ip_hw_instance->base_addr[kk] = ip->base_address[kk];
+ }
+
+ kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
+ ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
+ res = kobject_add(&ip_hw_instance->kobj, NULL,
+ "%d", ip_hw_instance->num_instance);
+next_ip:
+ if (reg_base_64)
+ ip_offset += struct_size(ip, base_address_64,
+ ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address,
+ ip->num_base_address);
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct kset *die_kset = &adev->ip_top->die_kset;
+ u16 num_dies, die_offset, num_ips;
+ size_t ip_offset;
+ int ii, res;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ DRM_DEBUG("number of dies: %d\n", num_dies);
+
+ for (ii = 0; ii < num_dies; ii++) {
+ struct ip_die_entry *ip_die_entry;
+
+ die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ /* Add the die to the kset.
+ *
+ * dhdr->die_id == ii, which was checked in
+ * amdgpu_discovery_reg_base_init().
+ */
+
+ ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
+ if (!ip_die_entry)
+ return -ENOMEM;
+
+ ip_die_entry->num_ips = num_ips;
+
+ kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
+ ip_die_entry->ip_kset.kobj.kset = die_kset;
+ ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
+ res = kset_register(&ip_die_entry->ip_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_die_entry kset");
+ kfree(ip_die_entry);
+ return res;
+ }
+
+ amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
+{
+ struct kset *die_kset;
+ int res, ii;
+
+ if (!adev->mman.discovery_bin)
+ return -EINVAL;
+
+ adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
+ if (!adev->ip_top)
+ return -ENOMEM;
+
+ adev->ip_top->adev = adev;
+
+ res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ &adev->dev->kobj, "ip_discovery");
+ if (res) {
+ DRM_ERROR("Couldn't init and add ip_discovery/");
+ goto Err;
+ }
+
+ die_kset = &adev->ip_top->die_kset;
+ kobject_set_name(&die_kset->kobj, "%s", "die");
+ die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.ktype = &die_kobj_ktype;
+ res = kset_register(&adev->ip_top->die_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register die_kset");
+ goto Err;
+ }
+
+ for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
+ ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
+ ip_hw_instance_attrs[ii] = NULL;
+
+ res = amdgpu_discovery_sysfs_recurse(adev);
+
+ return res;
+Err:
+ kobject_put(&adev->ip_top->kobj);
+ return res;
+}
+
+/* -------------------------------------------------- */
+
+#define list_to_kobj(el) container_of(el, struct kobject, entry)
+
+static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
+{
+ struct list_head *el, *tmp;
+ struct kset *hw_id_kset;
+
+ hw_id_kset = &ip_hw_id->hw_id_kset;
+ spin_lock(&hw_id_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
+ list_del_init(el);
+ spin_unlock(&hw_id_kset->list_lock);
+ /* kobject is embedded in ip_hw_instance */
+ kobject_put(list_to_kobj(el));
+ spin_lock(&hw_id_kset->list_lock);
+ }
+ spin_unlock(&hw_id_kset->list_lock);
+ kobject_put(&ip_hw_id->hw_id_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
+{
+ struct list_head *el, *tmp;
+ struct kset *ip_kset;
+
+ ip_kset = &ip_die_entry->ip_kset;
+ spin_lock(&ip_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &ip_kset->list) {
+ list_del_init(el);
+ spin_unlock(&ip_kset->list_lock);
+ amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
+ spin_lock(&ip_kset->list_lock);
+ }
+ spin_unlock(&ip_kset->list_lock);
+ kobject_put(&ip_die_entry->ip_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
+{
+ struct list_head *el, *tmp;
+ struct kset *die_kset;
+
+ die_kset = &adev->ip_top->die_kset;
+ spin_lock(&die_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &die_kset->list) {
+ list_del_init(el);
+ spin_unlock(&die_kset->list_lock);
+ amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
+ spin_lock(&die_kset->list_lock);
+ }
+ spin_unlock(&die_kset->list_lock);
+ kobject_put(&adev->ip_top->die_kset.kobj);
+ kobject_put(&adev->ip_top->kobj);
+}
+
+/* ================================================== */
+
+static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
+{
+ uint8_t num_base_address, subrev, variant;
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct ip_v4 *ip;
+ uint16_t die_offset;
+ uint16_t ip_offset;
+ uint16_t num_dies;
+ uint16_t num_ips;
+ int hw_ip;
+ int i, j, k;
+ int r;
+
+ r = amdgpu_discovery_init(adev);
+ if (r) {
+ DRM_ERROR("amdgpu_discovery_init failed\n");
+ return r;
+ }
+
+ adev->gfx.xcc_mask = 0;
+ adev->sdma.sdma_mask = 0;
+ adev->vcn.inst_mask = 0;
+ adev->jpeg.inst_mask = 0;
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ DRM_DEBUG("number of dies: %d\n", num_dies);
+
+ for (i = 0; i < num_dies; i++) {
+ die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ if (le16_to_cpu(dhdr->die_id) != i) {
+ DRM_ERROR("invalid die id %d, expected %d\n",
+ le16_to_cpu(dhdr->die_id), i);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("number of hardware IPs on die%d: %d\n",
+ le16_to_cpu(dhdr->die_id), num_ips);
+
+ for (j = 0; j < num_ips; j++) {
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+
+ if (amdgpu_discovery_validate_ip(ip))
+ goto next_ip;
+
+ num_base_address = ip->num_base_address;
+
+ DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
+ hw_id_names[le16_to_cpu(ip->hw_id)],
+ le16_to_cpu(ip->hw_id),
+ ip->instance_number,
+ ip->major, ip->minor,
+ ip->revision);
+
+ if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
+ /* Bit [5:0]: original revision value
+ * Bit [7:6]: en/decode capability:
+ * 0b00 : VCN function normally
+ * 0b10 : encode is disabled
+ * 0b01 : decode is disabled
+ */
+ if (adev->vcn.num_vcn_inst <
+ AMDGPU_MAX_VCN_INSTANCES) {
+ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ ip->revision & 0xc0;
+ adev->vcn.num_vcn_inst++;
+ adev->vcn.inst_mask |=
+ (1U << ip->instance_number);
+ adev->jpeg.inst_mask |=
+ (1U << ip->instance_number);
+ } else {
+ dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
+ adev->vcn.num_vcn_inst + 1,
+ AMDGPU_MAX_VCN_INSTANCES);
+ }
+ ip->revision &= ~0xc0;
+ }
+ if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
+ if (adev->sdma.num_instances <
+ AMDGPU_MAX_SDMA_INSTANCES) {
+ adev->sdma.num_instances++;
+ adev->sdma.sdma_mask |=
+ (1U << ip->instance_number);
+ } else {
+ dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
+ adev->sdma.num_instances + 1,
+ AMDGPU_MAX_SDMA_INSTANCES);
+ }
+ }
+
+ if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
+ if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
+ adev->vpe.num_instances++;
+ else
+ dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
+ adev->vpe.num_instances + 1,
+ AMDGPU_MAX_VPE_INSTANCES);
+ }
+
+ if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
+ adev->gmc.num_umc++;
+ adev->umc.node_inst_num++;
+ }
+
+ if (le16_to_cpu(ip->hw_id) == GC_HWID)
+ adev->gfx.xcc_mask |=
+ (1U << ip->instance_number);
+
+ for (k = 0; k < num_base_address; k++) {
+ /*
+ * convert the endianness of base addresses in place,
+ * so that we don't need to convert them when accessing adev->reg_offset.
+ */
+ if (ihdr->base_addr_64_bit)
+ /* Truncate the 64bit base address from ip discovery
+ * and only store lower 32bit ip base in reg_offset[].
+ * Bits > 32 follows ASIC specific format, thus just
+ * discard them and handle it within specific ASIC.
+ * By this way reg_offset[] and related helpers can
+ * stay unchanged.
+ * The base address is in dwords, thus clear the
+ * highest 2 bits to store.
+ */
+ ip->base_address[k] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
+ else
+ ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
+ DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
+ }
+
+ for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
+ if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
+ hw_id_map[hw_ip] != 0) {
+ DRM_DEBUG("set register base offset for %s\n",
+ hw_id_names[le16_to_cpu(ip->hw_id)]);
+ adev->reg_offset[hw_ip][ip->instance_number] =
+ ip->base_address;
+ /* Instance support is somewhat inconsistent.
+ * SDMA is a good example. Sienna cichlid has 4 total
+ * SDMA instances, each enumerated separately (HWIDs
+ * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
+ * but they are enumerated as multiple instances of the
+ * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
+ * example. On most chips there are multiple instances
+ * with the same HWID.
+ */
+
+ if (ihdr->version < 3) {
+ subrev = 0;
+ variant = 0;
+ } else {
+ subrev = ip->sub_revision;
+ variant = ip->variant;
+ }
+
+ adev->ip_versions[hw_ip]
+ [ip->instance_number] =
+ IP_VERSION_FULL(ip->major,
+ ip->minor,
+ ip->revision,
+ variant,
+ subrev);
+ }
+ }
+
+next_ip:
+ if (ihdr->base_addr_64_bit)
+ ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
+{
+ int vcn_harvest_count = 0;
+ int umc_harvest_count = 0;
+
+ /*
+ * Harvest table does not fit Navi1x and legacy GPUs,
+ * so read harvest bit per IP data structure to set
+ * harvest configuration.
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) {
+ if ((adev->pdev->device == 0x731E &&
+ (adev->pdev->revision == 0xC6 ||
+ adev->pdev->revision == 0xC7)) ||
+ (adev->pdev->device == 0x7340 &&
+ adev->pdev->revision == 0xC9) ||
+ (adev->pdev->device == 0x7360 &&
+ adev->pdev->revision == 0xC7))
+ amdgpu_discovery_read_harvest_bit_per_ip(adev,
+ &vcn_harvest_count);
+ } else {
+ amdgpu_discovery_read_from_harvest_table(adev,
+ &vcn_harvest_count,
+ &umc_harvest_count);
+ }
+
+ amdgpu_discovery_harvest_config_quirk(adev);
+
+ if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
+
+ if (umc_harvest_count < adev->gmc.num_umc) {
+ adev->gmc.num_umc -= umc_harvest_count;
+ }
+}
+
+union gc_info {
+ struct gc_info_v1_0 v1;
+ struct gc_info_v1_1 v1_1;
+ struct gc_info_v1_2 v1_2;
+ struct gc_info_v2_0 v2;
+ struct gc_info_v2_1 v2_1;
+};
+
+static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union gc_info *gc_info;
+ u16 offset;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[GC].offset);
+
+ if (!offset)
+ return 0;
+
+ gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(gc_info->v1.header.version_major)) {
+ case 1:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
+ le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
+ adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
+ }
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
+ adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
+ adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
+ adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
+ adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
+ adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
+ adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
+ }
+ break;
+ case 2:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+ if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
+ adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled GC info table %d.%d\n",
+ le16_to_cpu(gc_info->v1.header.version_major),
+ le16_to_cpu(gc_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union mall_info {
+ struct mall_info_v1_0 v1;
+ struct mall_info_v2_0 v2;
+};
+
+static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union mall_info *mall_info;
+ u32 u, mall_size_per_umc, m_s_present, half_use;
+ u64 mall_size;
+ u16 offset;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
+
+ if (!offset)
+ return 0;
+
+ mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(mall_info->v1.header.version_major)) {
+ case 1:
+ mall_size = 0;
+ mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
+ m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
+ half_use = le32_to_cpu(mall_info->v1.m_half_use);
+ for (u = 0; u < adev->gmc.num_umc; u++) {
+ if (m_s_present & (1 << u))
+ mall_size += mall_size_per_umc * 2;
+ else if (half_use & (1 << u))
+ mall_size += mall_size_per_umc / 2;
+ else
+ mall_size += mall_size_per_umc;
+ }
+ adev->gmc.mall_size = mall_size;
+ adev->gmc.m_half_use = half_use;
+ break;
+ case 2:
+ mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
+ adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled MALL info table %d.%d\n",
+ le16_to_cpu(mall_info->v1.header.version_major),
+ le16_to_cpu(mall_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union vcn_info {
+ struct vcn_info_v1_0 v1;
+};
+
+static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union vcn_info *vcn_info;
+ u16 offset;
+ int v;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
+ * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
+ * but that may change in the future with new GPUs so keep this
+ * check for defensive purposes.
+ */
+ if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
+ dev_err(adev->dev, "invalid vcn instances\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
+
+ if (!offset)
+ return 0;
+
+ vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
+ case 1:
+ /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
+ * so this won't overflow.
+ */
+ for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
+ adev->vcn.vcn_codec_disable_mask[v] =
+ le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled VCN info table %d.%d\n",
+ le16_to_cpu(vcn_info->v1.header.version_major),
+ le16_to_cpu(vcn_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
+{
+ /* what IP to use for this? */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add common ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
+{
+ /* use GC or MMHUB IP version */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 3, 0):
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ break;
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 1):
+ amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
+ break;
+ case IP_VERSION(6, 1, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
+ break;
+ case IP_VERSION(7, 0, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ break;
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 8):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
+ break;
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 11):
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 4):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, MP0_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 11):
+ amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
+ break;
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+<<<<<<<
+=======
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+>>>>>>>
+ amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#if defined(CONFIG_DRM_AMD_DC)
+static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
+{
+ amdgpu_device_set_sriov_virtual_display(adev);
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+}
+#endif
+
+static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
+{
+ if (adev->enable_virtual_display) {
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+ return 0;
+ }
+
+ if (!amdgpu_device_has_dc_support(adev))
+ return 0;
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ return -EINVAL;
+ }
+ } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ case IP_VERSION(12, 1, 0):
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, DCI_HWIP, 0));
+ return -EINVAL;
+ }
+ }
+#endif
+ return 0;
+}
+
+static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ break;
+ case IP_VERSION(9, 4, 3):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 2):
+ case IP_VERSION(4, 4, 0):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ break;
+ case IP_VERSION(4, 4, 2):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
+ break;
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 6):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 7):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
+ amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 2, 0):
+ /* UVD is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, UVD_HWIP, 0));
+ return -EINVAL;
+ }
+ switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 1, 0):
+ /* VCE is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, VCE_HWIP, 0));
+ return -EINVAL;
+ }
+ } else {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 2, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 3):
+ break;
+ case IP_VERSION(2, 5, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
+ break;
+ case IP_VERSION(2, 6, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
+ break;
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 16):
+ case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 0, 2):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
+ break;
+ case IP_VERSION(3, 0, 33):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ break;
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 2):
+ case IP_VERSION(4, 0, 4):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
+ break;
+ case IP_VERSION(4, 0, 3):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
+ break;
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, UVD_HWIP, 0));
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ if (amdgpu_mes) {
+ amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
+ adev->enable_mes = true;
+ if (amdgpu_mes_kiq)
+ adev->enable_mes_kiq = true;
+ }
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
+ adev->enable_mes = true;
+ adev->enable_mes_kiq = true;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ aqua_vanjaram_init_soc_config(adev);
+ break;
+ default:
+ break;
+ }
+}
+
+static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
+ amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ if (amdgpu_umsch_mm & 0x1) {
+ amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
+ adev->enable_umsch_mm = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+{
+ int r;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
+ break;
+ case CHIP_VEGA12:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
+ break;
+ case CHIP_RAVEN:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 1;
+ adev->vcn.num_vcn_inst = 1;
+ adev->gmc.num_umc = 2;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
+ } else {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
+ }
+ break;
+ case CHIP_VEGA20:
+ vega20_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 8;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
+ break;
+ case CHIP_ARCTURUS:
+ arct_reg_base_init(adev);
+ adev->sdma.num_instances = 8;
+ adev->vcn.num_vcn_inst = 2;
+ adev->gmc.num_umc = 8;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
+ break;
+ case CHIP_ALDEBARAN:
+ aldebaran_reg_base_init(adev);
+ adev->sdma.num_instances = 5;
+ adev->vcn.num_vcn_inst = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+ break;
+ default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ break;
+ }
+
+ amdgpu_discovery_init_soc_config(adev);
+ amdgpu_discovery_sysfs_init(adev);
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ adev->family = AMDGPU_FAMILY_AI;
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ adev->family = AMDGPU_FAMILY_RV;
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->family = AMDGPU_FAMILY_NV;
+ break;
+ case IP_VERSION(10, 3, 1):
+ adev->family = AMDGPU_FAMILY_VGH;
+ adev->apu_flags |= AMD_APU_IS_VANGOGH;
+ break;
+ case IP_VERSION(10, 3, 3):
+ adev->family = AMDGPU_FAMILY_YC;
+ break;
+ case IP_VERSION(10, 3, 6):
+ adev->family = AMDGPU_FAMILY_GC_10_3_6;
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->family = AMDGPU_FAMILY_GC_10_3_7;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ adev->family = AMDGPU_FAMILY_GC_11_0_0;
+ break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->family = AMDGPU_FAMILY_GC_11_0_1;
+ break;
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ adev->family = AMDGPU_FAMILY_GC_11_5_0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ adev->flags |= AMD_IS_APU;
+ break;
+ default:
+ break;
+ }
+
+ if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
+ adev->gmc.xgmi.supported = true;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
+
+ /* set NBIO version */
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 2, 0):
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ case IP_VERSION(2, 5, 0):
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 9, 0):
+ adev->nbio.funcs = &nbio_v7_9_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 11, 0):
+ case IP_VERSION(7, 11, 1):
+ adev->nbio.funcs = &nbio_v7_11_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 2, 0):
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 0):
+ case IP_VERSION(7, 5, 1):
+ adev->nbio.funcs = &nbio_v7_2_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
+ break;
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 3, 1):
+ case IP_VERSION(2, 3, 2):
+ case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
+ case IP_VERSION(3, 3, 3):
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ if (amdgpu_sriov_vf(adev))
+ adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
+ else
+ adev->nbio.funcs = &nbio_v4_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 7, 0):
+ case IP_VERSION(7, 7, 1):
+ adev->nbio.funcs = &nbio_v7_7_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
+ break;
+ case IP_VERSION(6, 3, 1):
+ adev->nbio.funcs = &nbif_v6_3_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
+ adev->hdp.funcs = &hdp_v4_0_funcs;
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 0, 4):
+ case IP_VERSION(5, 2, 0):
+ adev->hdp.funcs = &hdp_v5_0_funcs;
+ break;
+ case IP_VERSION(5, 2, 1):
+ adev->hdp.funcs = &hdp_v5_2_funcs;
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 1, 0):
+ adev->hdp.funcs = &hdp_v6_0_funcs;
+ break;
+ case IP_VERSION(7, 0, 0):
+ adev->hdp.funcs = &hdp_v7_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
+ case IP_VERSION(3, 6, 0):
+ case IP_VERSION(3, 6, 1):
+ case IP_VERSION(3, 6, 2):
+ adev->df.funcs = &df_v3_6_funcs;
+ break;
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 5, 2):
+ adev->df.funcs = &df_v1_7_funcs;
+ break;
+ case IP_VERSION(4, 3, 0):
+ adev->df.funcs = &df_v4_3_funcs;
+ break;
+ case IP_VERSION(4, 6, 2):
+ adev->df.funcs = &df_v4_6_2_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(10, 0, 2):
+ adev->smuio.funcs = &smuio_v9_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ adev->smuio.funcs = &smuio_v11_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 6):
+ case IP_VERSION(11, 0, 10):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 9):
+ case IP_VERSION(13, 0, 10):
+ adev->smuio.funcs = &smuio_v11_0_6_funcs;
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->smuio.funcs = &smuio_v13_0_funcs;
+ break;
+ case IP_VERSION(13, 0, 3):
+ adev->smuio.funcs = &smuio_v13_0_3_funcs;
+ if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
+ adev->flags |= AMD_IS_APU;
+ }
+ break;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ adev->smuio.funcs = &smuio_v13_0_6_funcs;
+ break;
+ case IP_VERSION(14, 0, 2):
+ adev->smuio.funcs = &smuio_v14_0_2_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
+ adev->lsdma.funcs = &lsdma_v6_0_funcs;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ adev->lsdma.funcs = &lsdma_v7_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ r = amdgpu_discovery_set_common_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gmc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ /* For SR-IOV, PSP needs to be initialized before IH */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+ } else {
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_display_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_sdma_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ !amdgpu_sriov_vf(adev)) ||
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_mes_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_vpe_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
diff --git a/rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/postimage b/rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/postimage
new file mode 100644
index 000000000000..3bc9662fbd28
--- /dev/null
+++ b/rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/postimage
@@ -0,0 +1,1844 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/reboot.h>
+
+#define SWSMU_CODE_LAYER_L3
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
+#include "smu_v14_0.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "amdgpu_ras.h"
+#include "smu_cmn.h"
+
+#include "asic_reg/mp/mp_14_0_2_offset.h"
+#include "asic_reg/mp/mp_14_0_2_sh_mask.h"
+
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
+
+#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
+
+int smu_v14_0_init_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ char fw_name[30];
+ char ucode_prefix[30];
+ int err = 0;
+ const struct smc_firmware_header_v1_0 *hdr;
+ const struct common_firmware_header *header;
+ struct amdgpu_firmware_info *ucode = NULL;
+
+ /* doesn't need to load smu firmware in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ if (err)
+ goto out;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
+ ucode->fw = adev->pm.fw;
+ header = (const struct common_firmware_header *)ucode->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
+ return err;
+}
+
+void smu_v14_0_fini_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ amdgpu_ucode_release(&adev->pm.fw);
+ adev->pm.fw_version = 0;
+}
+
+int smu_v14_0_load_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const uint32_t *src;
+ const struct smc_firmware_header_v1_0 *hdr;
+ uint32_t addr_start = MP1_SRAM;
+ uint32_t i;
+ uint32_t smc_fw_size;
+ uint32_t mp1_fw_flags;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ src = (const uint32_t *)(adev->pm.fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ smc_fw_size = hdr->header.ucode_size_bytes;
+
+ for (i = 1; i < smc_fw_size/4 - 1; i++) {
+ WREG32_PCIE(addr_start, src[i]);
+ addr_start += 4;
+ }
+
+ WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
+ 1 & MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
+ WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
+ 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout)
+ return -ETIME;
+
+ return 0;
+}
+
+int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_firmware_info *ucode = NULL;
+ uint32_t size = 0, pptable_id = 0;
+ int ret = 0;
+ void *table;
+
+ /* doesn't need to load smu firmware in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
+ if (!adev->scpm_enabled)
+ return 0;
+
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3)))
+ return 0;
+
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu->smu_table.boot_values.pp_table_id;
+ }
+
+ /* "pptable_id == 0" means vbios carries the pptable. */
+ if (!pptable_id)
+ return 0;
+
+ ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+ if (ret)
+ return ret;
+
+ smu->pptable_firmware.data = table;
+ smu->pptable_firmware.size = size;
+
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
+ ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
+ ucode->fw = &smu->pptable_firmware;
+ adev->firmware.fw_size +=
+ ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
+
+ return 0;
+}
+
+int smu_v14_0_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+int smu_v14_0_check_fw_version(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return ret;
+
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
+ smu_minor = (smu_version >> 8) & 0xff;
+ smu_debug = (smu_version >> 0) & 0xff;
+ if (smu->is_apu)
+ adev->pm.fw_version = smu_version;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(14, 0, 0):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ break;
+ case IP_VERSION(14, 0, 1):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
+ break;
+ default:
+ dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
+ break;
+ }
+
+ if (adev->pm.fw)
+ dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
+
+ /*
+ * 1. if_version mismatch is not critical as our fw is designed
+ * to be backward compatible.
+ * 2. New fw usually brings some optimizations. But that's visible
+ * only on the paired driver.
+ * Considering above, we just leave user a verbal message instead
+ * of halt driver loading.
+ */
+ if (if_version != smu->smc_driver_if_version) {
+ dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
+ smu->smc_driver_if_version, if_version,
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
+ dev_info(adev->dev, "SMU driver if version not matched\n");
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t ppt_offset_bytes;
+ const struct smc_firmware_header_v2_0 *v2;
+
+ v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
+
+ ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
+ *size = le32_to_cpu(v2->ppt_size_bytes);
+ *table = (uint8_t *)v2 + ppt_offset_bytes;
+
+ return 0;
+}
+
+static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table,
+ uint32_t *size, uint32_t pptable_id)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const struct smc_firmware_header_v2_1 *v2_1;
+ struct smc_soft_pptable_entry *entries;
+ uint32_t pptable_count = 0;
+ int i = 0;
+
+ v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
+ entries = (struct smc_soft_pptable_entry *)
+ ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
+ pptable_count = le32_to_cpu(v2_1->pptable_count);
+ for (i = 0; i < pptable_count; i++) {
+ if (le32_to_cpu(entries[i].id) == pptable_id) {
+ *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
+ *size = le32_to_cpu(entries[i].ppt_size_bytes);
+ break;
+ }
+ }
+
+ if (i == pptable_count)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t atom_table_size;
+ uint8_t frev, crev;
+ int ret, index;
+
+ dev_info(adev->dev, "use vbios provided pptable\n");
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ powerplayinfo);
+
+ ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
+ (uint8_t **)table);
+ if (ret)
+ return ret;
+
+ if (size)
+ *size = atom_table_size;
+
+ return 0;
+}
+
+int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
+{
+ const struct smc_firmware_header_v1_0 *hdr;
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t version_major, version_minor;
+ int ret;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ if (!hdr)
+ return -EINVAL;
+
+ dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
+
+ version_major = le16_to_cpu(hdr->header.header_version_major);
+ version_minor = le16_to_cpu(hdr->header.header_version_minor);
+ if (version_major != 2) {
+ dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
+ version_major, version_minor);
+ return -EINVAL;
+ }
+
+ switch (version_minor) {
+ case 0:
+ ret = smu_v14_0_set_pptable_v2_0(smu, table, size);
+ break;
+ case 1:
+ ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_setup_pptable(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t size = 0, pptable_id = 0;
+ void *table;
+ int ret = 0;
+
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu->smu_table.boot_values.pp_table_id;
+ }
+
+ /* force using vbios pptable in sriov mode */
+ if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
+ ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size);
+ else
+ ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+
+ if (ret)
+ return ret;
+
+ if (!smu->smu_table.power_play_table)
+ smu->smu_table.power_play_table = table;
+ if (!smu->smu_table.power_play_table_size)
+ smu->smu_table.power_play_table_size = size;
+
+ return 0;
+}
+
+int smu_v14_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ int ret = 0;
+
+ smu_table->driver_pptable =
+ kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
+ if (!smu_table->driver_pptable) {
+ ret = -ENOMEM;
+ goto err0_out;
+ }
+
+ smu_table->max_sustainable_clocks =
+ kzalloc(sizeof(struct smu_14_0_max_sustainable_clocks), GFP_KERNEL);
+ if (!smu_table->max_sustainable_clocks) {
+ ret = -ENOMEM;
+ goto err1_out;
+ }
+
+ if (tables[SMU_TABLE_OVERDRIVE].size) {
+ smu_table->overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->overdrive_table) {
+ ret = -ENOMEM;
+ goto err2_out;
+ }
+
+ smu_table->boot_overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->boot_overdrive_table) {
+ ret = -ENOMEM;
+ goto err3_out;
+ }
+ }
+
+ smu_table->combo_pptable =
+ kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
+ if (!smu_table->combo_pptable) {
+ ret = -ENOMEM;
+ goto err4_out;
+ }
+
+ return 0;
+
+err4_out:
+ kfree(smu_table->boot_overdrive_table);
+err3_out:
+ kfree(smu_table->overdrive_table);
+err2_out:
+ kfree(smu_table->max_sustainable_clocks);
+err1_out:
+ kfree(smu_table->driver_pptable);
+err0_out:
+ return ret;
+}
+
+int smu_v14_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ kfree(smu_table->gpu_metrics_table);
+ kfree(smu_table->combo_pptable);
+ kfree(smu_table->boot_overdrive_table);
+ kfree(smu_table->overdrive_table);
+ kfree(smu_table->max_sustainable_clocks);
+ kfree(smu_table->driver_pptable);
+ smu_table->gpu_metrics_table = NULL;
+ smu_table->combo_pptable = NULL;
+ smu_table->boot_overdrive_table = NULL;
+ smu_table->overdrive_table = NULL;
+ smu_table->max_sustainable_clocks = NULL;
+ smu_table->driver_pptable = NULL;
+ kfree(smu_table->hardcode_pptable);
+ smu_table->hardcode_pptable = NULL;
+
+ kfree(smu_table->ecc_table);
+ kfree(smu_table->metrics_table);
+ kfree(smu_table->watermarks_table);
+ smu_table->ecc_table = NULL;
+ smu_table->metrics_table = NULL;
+ smu_table->watermarks_table = NULL;
+ smu_table->metrics_time = 0;
+
+ kfree(smu_dpm->dpm_context);
+ kfree(smu_dpm->golden_dpm_context);
+ kfree(smu_dpm->dpm_current_power_state);
+ kfree(smu_dpm->dpm_request_power_state);
+ smu_dpm->dpm_context = NULL;
+ smu_dpm->golden_dpm_context = NULL;
+ smu_dpm->dpm_context_size = 0;
+ smu_dpm->dpm_current_power_state = NULL;
+ smu_dpm->dpm_request_power_state = NULL;
+
+ return 0;
+}
+
+int smu_v14_0_init_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (smu_power->power_context || smu_power->power_context_size != 0)
+ return -EINVAL;
+
+ smu_power->power_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
+ GFP_KERNEL);
+ if (!smu_power->power_context)
+ return -ENOMEM;
+ smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context);
+
+ return 0;
+}
+
+int smu_v14_0_fini_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (!smu_power->power_context || smu_power->power_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_power->power_context);
+ smu_power->power_context = NULL;
+ smu_power->power_context_size = 0;
+
+ return 0;
+}
+
+int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_4 *v_3_4;
+ struct atom_firmware_info_v3_3 *v_3_3;
+ struct atom_firmware_info_v3_1 *v_3_1;
+ struct atom_smu_info_v3_6 *smu_info_v3_6;
+ struct atom_smu_info_v4_0 *smu_info_v4_0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu14\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ break;
+ case 3:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ break;
+ case 4:
+ default:
+ v_3_4 = (struct atom_firmware_info_v3_4 *)header;
+ smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
+ break;
+ }
+
+ smu->smu_table.boot_values.format_revision = header->format_revision;
+ smu->smu_table.boot_values.content_revision = header->content_revision;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smu_info);
+ if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header)) {
+
+ if ((frev == 3) && (crev == 6)) {
+ smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
+
+ smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
+ smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
+ smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
+ smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
+ } else if ((frev == 3) && (crev == 1)) {
+ return 0;
+ } else if ((frev == 4) && (crev == 0)) {
+ smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
+
+ smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
+ smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
+ smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
+ smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
+ smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
+ } else {
+ dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
+ (uint32_t)frev, (uint32_t)crev);
+ }
+ }
+
+ return 0;
+}
+
+
+int smu_v14_0_notify_memory_pool_location(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+ uint64_t address;
+ uint32_t address_low, address_high;
+
+ if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
+ return ret;
+
+ address = memory_pool->mc_address;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+ address_high, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+ address_low, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+ (uint32_t)memory_pool->size, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+int smu_v14_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address),
+ NULL);
+ if (!ret)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address),
+ NULL);
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_tool_table_location(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
+
+ if (tool_table->mc_address) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(tool_table->mc_address),
+ NULL);
+ if (!ret)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrLow,
+ lower_32_bits(tool_table->mc_address),
+ NULL);
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_allowed_mask(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t feature_mask[2];
+
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ feature->feature_num < 64)
+ return -EINVAL;
+
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+ feature_mask[1], NULL);
+ if (ret)
+ return ret;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0],
+ NULL);
+}
+
+int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 2):
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+ if (enable)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ else
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_system_features_control(struct smu_context *smu,
+ bool en)
+{
+ return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures), NULL);
+}
+
+int smu_v14_0_notify_display_change(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+
+ return ret;
+}
+
+int smu_v14_0_get_current_power_limit(struct smu_context *smu,
+ uint32_t *power_limit)
+{
+ int power_src;
+ int ret = 0;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
+ return -EINVAL;
+
+ power_src = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ smu->adev->pm.ac_power ?
+ SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetPptLimit,
+ power_src << 16,
+ power_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
+
+ return ret;
+}
+
+int smu_v14_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+
+ return 0;
+}
+
+static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned tyep,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ /* For THM irqs */
+ // TODO
+
+ /* For MP1 SW irqs */
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
+
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ /* For THM irqs */
+ // TODO
+
+ /* For MP1 SW irqs */
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
+#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+
+static int smu_v14_0_irq_process(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t client_id = entry->client_id;
+ uint32_t src_id = entry->src_id;
+
+ if (client_id == SOC15_IH_CLIENTID_THM) {
+ switch (src_id) {
+ case THM_11_0__SRCID__THM_DIG_THERM_L2H:
+ schedule_delayed_work(&smu->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ break;
+ case THM_11_0__SRCID__THM_DIG_THERM_H2L:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
+ break;
+ default:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
+ src_id);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs smu_v14_0_irq_funcs = {
+ .set = smu_v14_0_set_irq_state,
+ .process = smu_v14_0_irq_process,
+};
+
+int smu_v14_0_register_irq_handler(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_irq_src *irq_src = &smu->irq_source;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ irq_src->num_types = 1;
+ irq_src->funcs = &smu_v14_0_irq_funcs;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_L2H,
+ irq_src);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_H2L,
+ irq_src);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+ SMU_IH_INTERRUPT_ID_TO_DRIVER,
+ irq_src);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v14_0_wait_for_reset_complete(struct smu_context *smu,
+ uint64_t event_arg)
+{
+ int ret = 0;
+
+ dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
+
+ return ret;
+}
+
+int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg)
+{
+ int ret = -EINVAL;
+
+ switch (event) {
+ case SMU_EVENT_RESET_COMPLETE:
+ ret = smu_v14_0_wait_for_reset_complete(smu, event_arg);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param = 0;
+ uint32_t clock_limit;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ param = (clk_id & 0xffff) << 16;
+
+ if (max) {
+ if (smu->adev->pm.ac_power)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxDpmFreq,
+ param,
+ max);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDcModeMaxDpmFreq,
+ param,
+ max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ return ret;
+}
+
+int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param, NULL);
+ if (ret)
+ goto out;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param, NULL);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct smu_14_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_14_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_14_0_dpm_table *mem_table =
+ &dpm_context->dpm_tables.uclk_table;
+ struct smu_14_0_dpm_table *soc_table =
+ &dpm_context->dpm_tables.soc_table;
+ struct smu_14_0_dpm_table *vclk_table =
+ &dpm_context->dpm_tables.vclk_table;
+ struct smu_14_0_dpm_table *dclk_table =
+ &dpm_context->dpm_tables.dclk_table;
+ struct smu_14_0_dpm_table *fclk_table =
+ &dpm_context->dpm_tables.fclk_table;
+ struct smu_umd_pstate_table *pstate_table =
+ &smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t sclk_min = 0, sclk_max = 0;
+ uint32_t mclk_min = 0, mclk_max = 0;
+ uint32_t socclk_min = 0, socclk_max = 0;
+ uint32_t vclk_min = 0, vclk_max = 0;
+ uint32_t dclk_min = 0, dclk_max = 0;
+ uint32_t fclk_min = 0, fclk_max = 0;
+ int ret = 0, i;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ sclk_min = sclk_max = gfx_table->max;
+ mclk_min = mclk_max = mem_table->max;
+ socclk_min = socclk_max = soc_table->max;
+ vclk_min = vclk_max = vclk_table->max;
+ dclk_min = dclk_max = dclk_table->max;
+ fclk_min = fclk_max = fclk_table->max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ sclk_min = sclk_max = gfx_table->min;
+ mclk_min = mclk_max = mem_table->min;
+ socclk_min = socclk_max = soc_table->min;
+ vclk_min = vclk_max = vclk_table->min;
+ dclk_min = dclk_max = dclk_table->min;
+ fclk_min = fclk_max = fclk_table->min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ sclk_min = gfx_table->min;
+ sclk_max = gfx_table->max;
+ mclk_min = mem_table->min;
+ mclk_max = mem_table->max;
+ socclk_min = soc_table->min;
+ socclk_max = soc_table->max;
+ vclk_min = vclk_table->min;
+ vclk_max = vclk_table->max;
+ dclk_min = dclk_table->min;
+ dclk_max = dclk_table->max;
+ fclk_min = fclk_table->min;
+ fclk_max = fclk_table->max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
+ vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
+ dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
+ fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ mclk_min = mclk_max = pstate_table->uclk_pstate.min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
+ vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
+ dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
+ fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ return 0;
+ default:
+ dev_err(adev->dev, "Invalid performance level %d\n", level);
+ return -EINVAL;
+ }
+
+ if (sclk_min && sclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ sclk_min,
+ sclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->gfxclk_pstate.curr.min = sclk_min;
+ pstate_table->gfxclk_pstate.curr.max = sclk_max;
+ }
+
+ if (mclk_min && mclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_MCLK,
+ mclk_min,
+ mclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->uclk_pstate.curr.min = mclk_min;
+ pstate_table->uclk_pstate.curr.max = mclk_max;
+ }
+
+ if (socclk_min && socclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_SOCCLK,
+ socclk_min,
+ socclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->socclk_pstate.curr.min = socclk_min;
+ pstate_table->socclk_pstate.curr.max = socclk_max;
+ }
+
+ if (vclk_min && vclk_max) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ i ? SMU_VCLK1 : SMU_VCLK,
+ vclk_min,
+ vclk_max);
+ if (ret)
+ return ret;
+ }
+ pstate_table->vclk_pstate.curr.min = vclk_min;
+ pstate_table->vclk_pstate.curr.max = vclk_max;
+ }
+
+ if (dclk_min && dclk_max) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ i ? SMU_DCLK1 : SMU_DCLK,
+ dclk_min,
+ dclk_max);
+ if (ret)
+ return ret;
+ }
+ pstate_table->dclk_pstate.curr.min = dclk_min;
+ pstate_table->dclk_pstate.curr.max = dclk_max;
+ }
+
+ if (fclk_min && fclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_FCLK,
+ fclk_min,
+ fclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->fclk_pstate.curr.min = fclk_min;
+ pstate_table->fclk_pstate.curr.max = fclk_max;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src)
+{
+ int pwr_source;
+
+ pwr_source = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ (uint32_t)power_src);
+ if (pwr_source < 0)
+ return -EINVAL;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_NotifyPowerSource,
+ pwr_source,
+ NULL);
+}
+
+static int smu_v14_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint16_t level,
+ uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!value)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ value);
+ if (ret)
+ return ret;
+
+ *value = *value & 0x7fffffff;
+
+ return ret;
+}
+
+static int smu_v14_0_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ int ret;
+
+ ret = smu_v14_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+
+ return ret;
+}
+
+static int smu_v14_0_get_fine_grained_status(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ bool *is_fine_grained_dpm)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+ uint32_t value;
+
+ if (!is_fine_grained_dpm)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ &value);
+ if (ret)
+ return ret;
+
+ /*
+ * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM
+ * now, we un-support it
+ */
+ *is_fine_grained_dpm = value & 0x80000000;
+
+ return 0;
+}
+
+int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_14_0_dpm_table *single_dpm_table)
+{
+ int ret = 0;
+ uint32_t clk;
+ int i;
+
+ ret = smu_v14_0_get_dpm_level_count(smu,
+ clk_type,
+ &single_dpm_table->count);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
+ return ret;
+ }
+
+ ret = smu_v14_0_get_fine_grained_status(smu,
+ clk_type,
+ &single_dpm_table->is_fine_grained);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++) {
+ ret = smu_v14_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ i,
+ &clk);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
+ return ret;
+ }
+
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+
+ if (i == 0)
+ single_dpm_table->min = clk;
+ else if (i == single_dpm_table->count - 1)
+ single_dpm_table->max = clk;
+ }
+
+ return 0;
+}
+
+int smu_v14_0_set_vcn_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
+ i << 16U, NULL);
+ else if (i == 1)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ i << 16U, NULL);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0,
+ i << 16U, NULL);
+ else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
+ i << 16U, NULL);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_run_btc(struct smu_context *smu)
+{
+ int res;
+
+ res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+ if (res)
+ dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+
+ return res;
+}
+
+int smu_v14_0_gpo_control(struct smu_context *smu,
+ bool enablement)
+{
+ int res;
+
+ res = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_AllowGpo,
+ enablement ? 1 : 0,
+ NULL);
+ if (res)
+ dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
+
+ return res;
+}
+
+int smu_v14_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement)
+{
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
+
+ return ret;
+}
+
+int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ArmD3,
+ baco_seq,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (baco_seq == BACO_SEQ_BAMACO ||
+ baco_seq == BACO_SEQ_BACO)
+ smu_baco->state = SMU_BACO_STATE_ENTER;
+ else
+ smu_baco->state = SMU_BACO_STATE_EXIT;
+
+ return 0;
+}
+
+int smu_v14_0_get_bamaco_support(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int bamaco_support = 0;
+
+ if (amdgpu_sriov_vf(smu->adev) ||
+ !smu_baco->platform_support)
+ return 0;
+
+ if (smu_baco->maco_support)
+ bamaco_support |= MACO_SUPPORT;
+
+ /* return true if ASIC is in BACO state already */
+ if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+ return (bamaco_support |= BACO_SUPPORT);
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ return 0;
+
+ return (bamaco_support |= BACO_SUPPORT);
+}
+
+enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+
+ return smu_baco->state;
+}
+
+int smu_v14_0_baco_set_state(struct smu_context *smu,
+ enum smu_baco_state state)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (smu_v14_0_baco_get_state(smu) == state)
+ return 0;
+
+ if (state == SMU_BACO_STATE_ENTER) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EnterBaco,
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO,
+ NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ExitBaco,
+ NULL);
+ if (ret)
+ return ret;
+
+ /* clear vbios scratch 6 and 7 for coming asic reinit */
+ WREG32(adev->bios_scratch_reg_offset + 6, 0);
+ WREG32(adev->bios_scratch_reg_offset + 7, 0);
+ }
+
+ if (!ret)
+ smu_baco->state = state;
+
+ return ret;
+}
+
+int smu_v14_0_baco_enter(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v14_0_baco_set_state(smu,
+ SMU_BACO_STATE_ENTER);
+ if (ret)
+ return ret;
+
+ msleep(10);
+
+ return ret;
+}
+
+int smu_v14_0_baco_exit(struct smu_context *smu)
+{
+ return smu_v14_0_baco_set_state(smu,
+ SMU_BACO_STATE_EXIT);
+}
+
+int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu)
+{
+ uint16_t index;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
+ }
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_EnableGfxImu);
+ return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE);
+}
+
+int smu_v14_0_set_default_dpm_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
+ smu_table->clocks_table, false);
+}
+
+int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ int ret = 0;
+
+ /* Only allowed in manual mode */
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < smu->gfx_default_hard_min_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_hard_min_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > smu->gfx_default_soft_max_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
+ dev_err(smu->adev->dev,
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max sclk failed!");
+ return ret;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
diff --git a/rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/preimage b/rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/preimage
new file mode 100644
index 000000000000..33543d386784
--- /dev/null
+++ b/rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/preimage
@@ -0,0 +1,1847 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/reboot.h>
+
+#define SWSMU_CODE_LAYER_L3
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
+#include "smu_v14_0.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "amdgpu_ras.h"
+#include "smu_cmn.h"
+
+#include "asic_reg/mp/mp_14_0_2_offset.h"
+#include "asic_reg/mp/mp_14_0_2_sh_mask.h"
+
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
+
+#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
+
+int smu_v14_0_init_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ char fw_name[30];
+ char ucode_prefix[30];
+ int err = 0;
+ const struct smc_firmware_header_v1_0 *hdr;
+ const struct common_firmware_header *header;
+ struct amdgpu_firmware_info *ucode = NULL;
+
+ /* doesn't need to load smu firmware in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ if (err)
+ goto out;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
+ ucode->fw = adev->pm.fw;
+ header = (const struct common_firmware_header *)ucode->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
+ return err;
+}
+
+void smu_v14_0_fini_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ amdgpu_ucode_release(&adev->pm.fw);
+ adev->pm.fw_version = 0;
+}
+
+int smu_v14_0_load_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const uint32_t *src;
+ const struct smc_firmware_header_v1_0 *hdr;
+ uint32_t addr_start = MP1_SRAM;
+ uint32_t i;
+ uint32_t smc_fw_size;
+ uint32_t mp1_fw_flags;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ src = (const uint32_t *)(adev->pm.fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ smc_fw_size = hdr->header.ucode_size_bytes;
+
+ for (i = 1; i < smc_fw_size/4 - 1; i++) {
+ WREG32_PCIE(addr_start, src[i]);
+ addr_start += 4;
+ }
+
+ WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
+ 1 & MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
+ WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
+ 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout)
+ return -ETIME;
+
+ return 0;
+}
+
+int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_firmware_info *ucode = NULL;
+ uint32_t size = 0, pptable_id = 0;
+ int ret = 0;
+ void *table;
+
+ /* doesn't need to load smu firmware in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
+ if (!adev->scpm_enabled)
+ return 0;
+
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3)))
+ return 0;
+
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu->smu_table.boot_values.pp_table_id;
+ }
+
+ /* "pptable_id == 0" means vbios carries the pptable. */
+ if (!pptable_id)
+ return 0;
+
+ ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+ if (ret)
+ return ret;
+
+ smu->pptable_firmware.data = table;
+ smu->pptable_firmware.size = size;
+
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
+ ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
+ ucode->fw = &smu->pptable_firmware;
+ adev->firmware.fw_size +=
+ ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
+
+ return 0;
+}
+
+int smu_v14_0_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+int smu_v14_0_check_fw_version(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return ret;
+
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
+ smu_minor = (smu_version >> 8) & 0xff;
+ smu_debug = (smu_version >> 0) & 0xff;
+ if (smu->is_apu)
+ adev->pm.fw_version = smu_version;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(14, 0, 0):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ break;
+ case IP_VERSION(14, 0, 1):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
+<<<<<<<
+=======
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
+>>>>>>>
+ break;
+ default:
+ dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
+ break;
+ }
+
+ if (adev->pm.fw)
+ dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
+
+ /*
+ * 1. if_version mismatch is not critical as our fw is designed
+ * to be backward compatible.
+ * 2. New fw usually brings some optimizations. But that's visible
+ * only on the paired driver.
+ * Considering above, we just leave user a verbal message instead
+ * of halt driver loading.
+ */
+ if (if_version != smu->smc_driver_if_version) {
+ dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
+ smu->smc_driver_if_version, if_version,
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
+ dev_info(adev->dev, "SMU driver if version not matched\n");
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t ppt_offset_bytes;
+ const struct smc_firmware_header_v2_0 *v2;
+
+ v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
+
+ ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
+ *size = le32_to_cpu(v2->ppt_size_bytes);
+ *table = (uint8_t *)v2 + ppt_offset_bytes;
+
+ return 0;
+}
+
+static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table,
+ uint32_t *size, uint32_t pptable_id)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const struct smc_firmware_header_v2_1 *v2_1;
+ struct smc_soft_pptable_entry *entries;
+ uint32_t pptable_count = 0;
+ int i = 0;
+
+ v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
+ entries = (struct smc_soft_pptable_entry *)
+ ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
+ pptable_count = le32_to_cpu(v2_1->pptable_count);
+ for (i = 0; i < pptable_count; i++) {
+ if (le32_to_cpu(entries[i].id) == pptable_id) {
+ *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
+ *size = le32_to_cpu(entries[i].ppt_size_bytes);
+ break;
+ }
+ }
+
+ if (i == pptable_count)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t atom_table_size;
+ uint8_t frev, crev;
+ int ret, index;
+
+ dev_info(adev->dev, "use vbios provided pptable\n");
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ powerplayinfo);
+
+ ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
+ (uint8_t **)table);
+ if (ret)
+ return ret;
+
+ if (size)
+ *size = atom_table_size;
+
+ return 0;
+}
+
+int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
+{
+ const struct smc_firmware_header_v1_0 *hdr;
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t version_major, version_minor;
+ int ret;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ if (!hdr)
+ return -EINVAL;
+
+ dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
+
+ version_major = le16_to_cpu(hdr->header.header_version_major);
+ version_minor = le16_to_cpu(hdr->header.header_version_minor);
+ if (version_major != 2) {
+ dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
+ version_major, version_minor);
+ return -EINVAL;
+ }
+
+ switch (version_minor) {
+ case 0:
+ ret = smu_v14_0_set_pptable_v2_0(smu, table, size);
+ break;
+ case 1:
+ ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_setup_pptable(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t size = 0, pptable_id = 0;
+ void *table;
+ int ret = 0;
+
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu->smu_table.boot_values.pp_table_id;
+ }
+
+ /* force using vbios pptable in sriov mode */
+ if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
+ ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size);
+ else
+ ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+
+ if (ret)
+ return ret;
+
+ if (!smu->smu_table.power_play_table)
+ smu->smu_table.power_play_table = table;
+ if (!smu->smu_table.power_play_table_size)
+ smu->smu_table.power_play_table_size = size;
+
+ return 0;
+}
+
+int smu_v14_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ int ret = 0;
+
+ smu_table->driver_pptable =
+ kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
+ if (!smu_table->driver_pptable) {
+ ret = -ENOMEM;
+ goto err0_out;
+ }
+
+ smu_table->max_sustainable_clocks =
+ kzalloc(sizeof(struct smu_14_0_max_sustainable_clocks), GFP_KERNEL);
+ if (!smu_table->max_sustainable_clocks) {
+ ret = -ENOMEM;
+ goto err1_out;
+ }
+
+ if (tables[SMU_TABLE_OVERDRIVE].size) {
+ smu_table->overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->overdrive_table) {
+ ret = -ENOMEM;
+ goto err2_out;
+ }
+
+ smu_table->boot_overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->boot_overdrive_table) {
+ ret = -ENOMEM;
+ goto err3_out;
+ }
+ }
+
+ smu_table->combo_pptable =
+ kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
+ if (!smu_table->combo_pptable) {
+ ret = -ENOMEM;
+ goto err4_out;
+ }
+
+ return 0;
+
+err4_out:
+ kfree(smu_table->boot_overdrive_table);
+err3_out:
+ kfree(smu_table->overdrive_table);
+err2_out:
+ kfree(smu_table->max_sustainable_clocks);
+err1_out:
+ kfree(smu_table->driver_pptable);
+err0_out:
+ return ret;
+}
+
+int smu_v14_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ kfree(smu_table->gpu_metrics_table);
+ kfree(smu_table->combo_pptable);
+ kfree(smu_table->boot_overdrive_table);
+ kfree(smu_table->overdrive_table);
+ kfree(smu_table->max_sustainable_clocks);
+ kfree(smu_table->driver_pptable);
+ smu_table->gpu_metrics_table = NULL;
+ smu_table->combo_pptable = NULL;
+ smu_table->boot_overdrive_table = NULL;
+ smu_table->overdrive_table = NULL;
+ smu_table->max_sustainable_clocks = NULL;
+ smu_table->driver_pptable = NULL;
+ kfree(smu_table->hardcode_pptable);
+ smu_table->hardcode_pptable = NULL;
+
+ kfree(smu_table->ecc_table);
+ kfree(smu_table->metrics_table);
+ kfree(smu_table->watermarks_table);
+ smu_table->ecc_table = NULL;
+ smu_table->metrics_table = NULL;
+ smu_table->watermarks_table = NULL;
+ smu_table->metrics_time = 0;
+
+ kfree(smu_dpm->dpm_context);
+ kfree(smu_dpm->golden_dpm_context);
+ kfree(smu_dpm->dpm_current_power_state);
+ kfree(smu_dpm->dpm_request_power_state);
+ smu_dpm->dpm_context = NULL;
+ smu_dpm->golden_dpm_context = NULL;
+ smu_dpm->dpm_context_size = 0;
+ smu_dpm->dpm_current_power_state = NULL;
+ smu_dpm->dpm_request_power_state = NULL;
+
+ return 0;
+}
+
+int smu_v14_0_init_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (smu_power->power_context || smu_power->power_context_size != 0)
+ return -EINVAL;
+
+ smu_power->power_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
+ GFP_KERNEL);
+ if (!smu_power->power_context)
+ return -ENOMEM;
+ smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context);
+
+ return 0;
+}
+
+int smu_v14_0_fini_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (!smu_power->power_context || smu_power->power_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_power->power_context);
+ smu_power->power_context = NULL;
+ smu_power->power_context_size = 0;
+
+ return 0;
+}
+
+int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_4 *v_3_4;
+ struct atom_firmware_info_v3_3 *v_3_3;
+ struct atom_firmware_info_v3_1 *v_3_1;
+ struct atom_smu_info_v3_6 *smu_info_v3_6;
+ struct atom_smu_info_v4_0 *smu_info_v4_0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu14\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ break;
+ case 3:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ break;
+ case 4:
+ default:
+ v_3_4 = (struct atom_firmware_info_v3_4 *)header;
+ smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
+ break;
+ }
+
+ smu->smu_table.boot_values.format_revision = header->format_revision;
+ smu->smu_table.boot_values.content_revision = header->content_revision;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smu_info);
+ if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header)) {
+
+ if ((frev == 3) && (crev == 6)) {
+ smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
+
+ smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
+ smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
+ smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
+ smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
+ } else if ((frev == 3) && (crev == 1)) {
+ return 0;
+ } else if ((frev == 4) && (crev == 0)) {
+ smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
+
+ smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
+ smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
+ smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
+ smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
+ smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
+ } else {
+ dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
+ (uint32_t)frev, (uint32_t)crev);
+ }
+ }
+
+ return 0;
+}
+
+
+int smu_v14_0_notify_memory_pool_location(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+ uint64_t address;
+ uint32_t address_low, address_high;
+
+ if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
+ return ret;
+
+ address = memory_pool->mc_address;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+ address_high, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+ address_low, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+ (uint32_t)memory_pool->size, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+int smu_v14_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address),
+ NULL);
+ if (!ret)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address),
+ NULL);
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_tool_table_location(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
+
+ if (tool_table->mc_address) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(tool_table->mc_address),
+ NULL);
+ if (!ret)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrLow,
+ lower_32_bits(tool_table->mc_address),
+ NULL);
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_allowed_mask(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t feature_mask[2];
+
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ feature->feature_num < 64)
+ return -EINVAL;
+
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+ feature_mask[1], NULL);
+ if (ret)
+ return ret;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0],
+ NULL);
+}
+
+int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 2):
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+ if (enable)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ else
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_system_features_control(struct smu_context *smu,
+ bool en)
+{
+ return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures), NULL);
+}
+
+int smu_v14_0_notify_display_change(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+
+ return ret;
+}
+
+int smu_v14_0_get_current_power_limit(struct smu_context *smu,
+ uint32_t *power_limit)
+{
+ int power_src;
+ int ret = 0;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
+ return -EINVAL;
+
+ power_src = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ smu->adev->pm.ac_power ?
+ SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetPptLimit,
+ power_src << 16,
+ power_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
+
+ return ret;
+}
+
+int smu_v14_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+
+ return 0;
+}
+
+static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned tyep,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ /* For THM irqs */
+ // TODO
+
+ /* For MP1 SW irqs */
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
+
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ /* For THM irqs */
+ // TODO
+
+ /* For MP1 SW irqs */
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
+#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+
+static int smu_v14_0_irq_process(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t client_id = entry->client_id;
+ uint32_t src_id = entry->src_id;
+
+ if (client_id == SOC15_IH_CLIENTID_THM) {
+ switch (src_id) {
+ case THM_11_0__SRCID__THM_DIG_THERM_L2H:
+ schedule_delayed_work(&smu->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ break;
+ case THM_11_0__SRCID__THM_DIG_THERM_H2L:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
+ break;
+ default:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
+ src_id);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs smu_v14_0_irq_funcs = {
+ .set = smu_v14_0_set_irq_state,
+ .process = smu_v14_0_irq_process,
+};
+
+int smu_v14_0_register_irq_handler(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_irq_src *irq_src = &smu->irq_source;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ irq_src->num_types = 1;
+ irq_src->funcs = &smu_v14_0_irq_funcs;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_L2H,
+ irq_src);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_H2L,
+ irq_src);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+ SMU_IH_INTERRUPT_ID_TO_DRIVER,
+ irq_src);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v14_0_wait_for_reset_complete(struct smu_context *smu,
+ uint64_t event_arg)
+{
+ int ret = 0;
+
+ dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
+
+ return ret;
+}
+
+int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg)
+{
+ int ret = -EINVAL;
+
+ switch (event) {
+ case SMU_EVENT_RESET_COMPLETE:
+ ret = smu_v14_0_wait_for_reset_complete(smu, event_arg);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param = 0;
+ uint32_t clock_limit;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ param = (clk_id & 0xffff) << 16;
+
+ if (max) {
+ if (smu->adev->pm.ac_power)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxDpmFreq,
+ param,
+ max);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDcModeMaxDpmFreq,
+ param,
+ max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ return ret;
+}
+
+int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param, NULL);
+ if (ret)
+ goto out;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param, NULL);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct smu_14_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_14_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_14_0_dpm_table *mem_table =
+ &dpm_context->dpm_tables.uclk_table;
+ struct smu_14_0_dpm_table *soc_table =
+ &dpm_context->dpm_tables.soc_table;
+ struct smu_14_0_dpm_table *vclk_table =
+ &dpm_context->dpm_tables.vclk_table;
+ struct smu_14_0_dpm_table *dclk_table =
+ &dpm_context->dpm_tables.dclk_table;
+ struct smu_14_0_dpm_table *fclk_table =
+ &dpm_context->dpm_tables.fclk_table;
+ struct smu_umd_pstate_table *pstate_table =
+ &smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t sclk_min = 0, sclk_max = 0;
+ uint32_t mclk_min = 0, mclk_max = 0;
+ uint32_t socclk_min = 0, socclk_max = 0;
+ uint32_t vclk_min = 0, vclk_max = 0;
+ uint32_t dclk_min = 0, dclk_max = 0;
+ uint32_t fclk_min = 0, fclk_max = 0;
+ int ret = 0, i;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ sclk_min = sclk_max = gfx_table->max;
+ mclk_min = mclk_max = mem_table->max;
+ socclk_min = socclk_max = soc_table->max;
+ vclk_min = vclk_max = vclk_table->max;
+ dclk_min = dclk_max = dclk_table->max;
+ fclk_min = fclk_max = fclk_table->max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ sclk_min = sclk_max = gfx_table->min;
+ mclk_min = mclk_max = mem_table->min;
+ socclk_min = socclk_max = soc_table->min;
+ vclk_min = vclk_max = vclk_table->min;
+ dclk_min = dclk_max = dclk_table->min;
+ fclk_min = fclk_max = fclk_table->min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ sclk_min = gfx_table->min;
+ sclk_max = gfx_table->max;
+ mclk_min = mem_table->min;
+ mclk_max = mem_table->max;
+ socclk_min = soc_table->min;
+ socclk_max = soc_table->max;
+ vclk_min = vclk_table->min;
+ vclk_max = vclk_table->max;
+ dclk_min = dclk_table->min;
+ dclk_max = dclk_table->max;
+ fclk_min = fclk_table->min;
+ fclk_max = fclk_table->max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
+ vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
+ dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
+ fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ mclk_min = mclk_max = pstate_table->uclk_pstate.min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
+ vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
+ dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
+ fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ return 0;
+ default:
+ dev_err(adev->dev, "Invalid performance level %d\n", level);
+ return -EINVAL;
+ }
+
+ if (sclk_min && sclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ sclk_min,
+ sclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->gfxclk_pstate.curr.min = sclk_min;
+ pstate_table->gfxclk_pstate.curr.max = sclk_max;
+ }
+
+ if (mclk_min && mclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_MCLK,
+ mclk_min,
+ mclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->uclk_pstate.curr.min = mclk_min;
+ pstate_table->uclk_pstate.curr.max = mclk_max;
+ }
+
+ if (socclk_min && socclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_SOCCLK,
+ socclk_min,
+ socclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->socclk_pstate.curr.min = socclk_min;
+ pstate_table->socclk_pstate.curr.max = socclk_max;
+ }
+
+ if (vclk_min && vclk_max) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ i ? SMU_VCLK1 : SMU_VCLK,
+ vclk_min,
+ vclk_max);
+ if (ret)
+ return ret;
+ }
+ pstate_table->vclk_pstate.curr.min = vclk_min;
+ pstate_table->vclk_pstate.curr.max = vclk_max;
+ }
+
+ if (dclk_min && dclk_max) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ i ? SMU_DCLK1 : SMU_DCLK,
+ dclk_min,
+ dclk_max);
+ if (ret)
+ return ret;
+ }
+ pstate_table->dclk_pstate.curr.min = dclk_min;
+ pstate_table->dclk_pstate.curr.max = dclk_max;
+ }
+
+ if (fclk_min && fclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_FCLK,
+ fclk_min,
+ fclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->fclk_pstate.curr.min = fclk_min;
+ pstate_table->fclk_pstate.curr.max = fclk_max;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src)
+{
+ int pwr_source;
+
+ pwr_source = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ (uint32_t)power_src);
+ if (pwr_source < 0)
+ return -EINVAL;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_NotifyPowerSource,
+ pwr_source,
+ NULL);
+}
+
+static int smu_v14_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint16_t level,
+ uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!value)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ value);
+ if (ret)
+ return ret;
+
+ *value = *value & 0x7fffffff;
+
+ return ret;
+}
+
+static int smu_v14_0_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ int ret;
+
+ ret = smu_v14_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+
+ return ret;
+}
+
+static int smu_v14_0_get_fine_grained_status(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ bool *is_fine_grained_dpm)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+ uint32_t value;
+
+ if (!is_fine_grained_dpm)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ &value);
+ if (ret)
+ return ret;
+
+ /*
+ * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM
+ * now, we un-support it
+ */
+ *is_fine_grained_dpm = value & 0x80000000;
+
+ return 0;
+}
+
+int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_14_0_dpm_table *single_dpm_table)
+{
+ int ret = 0;
+ uint32_t clk;
+ int i;
+
+ ret = smu_v14_0_get_dpm_level_count(smu,
+ clk_type,
+ &single_dpm_table->count);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
+ return ret;
+ }
+
+ ret = smu_v14_0_get_fine_grained_status(smu,
+ clk_type,
+ &single_dpm_table->is_fine_grained);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++) {
+ ret = smu_v14_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ i,
+ &clk);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
+ return ret;
+ }
+
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+
+ if (i == 0)
+ single_dpm_table->min = clk;
+ else if (i == single_dpm_table->count - 1)
+ single_dpm_table->max = clk;
+ }
+
+ return 0;
+}
+
+int smu_v14_0_set_vcn_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
+ i << 16U, NULL);
+ else if (i == 1)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ i << 16U, NULL);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0,
+ i << 16U, NULL);
+ else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
+ i << 16U, NULL);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_run_btc(struct smu_context *smu)
+{
+ int res;
+
+ res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+ if (res)
+ dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+
+ return res;
+}
+
+int smu_v14_0_gpo_control(struct smu_context *smu,
+ bool enablement)
+{
+ int res;
+
+ res = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_AllowGpo,
+ enablement ? 1 : 0,
+ NULL);
+ if (res)
+ dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
+
+ return res;
+}
+
+int smu_v14_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement)
+{
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
+
+ return ret;
+}
+
+int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ArmD3,
+ baco_seq,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (baco_seq == BACO_SEQ_BAMACO ||
+ baco_seq == BACO_SEQ_BACO)
+ smu_baco->state = SMU_BACO_STATE_ENTER;
+ else
+ smu_baco->state = SMU_BACO_STATE_EXIT;
+
+ return 0;
+}
+
+int smu_v14_0_get_bamaco_support(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int bamaco_support = 0;
+
+ if (amdgpu_sriov_vf(smu->adev) ||
+ !smu_baco->platform_support)
+ return 0;
+
+ if (smu_baco->maco_support)
+ bamaco_support |= MACO_SUPPORT;
+
+ /* return true if ASIC is in BACO state already */
+ if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+ return (bamaco_support |= BACO_SUPPORT);
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ return 0;
+
+ return (bamaco_support |= BACO_SUPPORT);
+}
+
+enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+
+ return smu_baco->state;
+}
+
+int smu_v14_0_baco_set_state(struct smu_context *smu,
+ enum smu_baco_state state)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (smu_v14_0_baco_get_state(smu) == state)
+ return 0;
+
+ if (state == SMU_BACO_STATE_ENTER) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EnterBaco,
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO,
+ NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ExitBaco,
+ NULL);
+ if (ret)
+ return ret;
+
+ /* clear vbios scratch 6 and 7 for coming asic reinit */
+ WREG32(adev->bios_scratch_reg_offset + 6, 0);
+ WREG32(adev->bios_scratch_reg_offset + 7, 0);
+ }
+
+ if (!ret)
+ smu_baco->state = state;
+
+ return ret;
+}
+
+int smu_v14_0_baco_enter(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v14_0_baco_set_state(smu,
+ SMU_BACO_STATE_ENTER);
+ if (ret)
+ return ret;
+
+ msleep(10);
+
+ return ret;
+}
+
+int smu_v14_0_baco_exit(struct smu_context *smu)
+{
+ return smu_v14_0_baco_set_state(smu,
+ SMU_BACO_STATE_EXIT);
+}
+
+int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu)
+{
+ uint16_t index;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
+ }
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_EnableGfxImu);
+ return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE);
+}
+
+int smu_v14_0_set_default_dpm_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
+ smu_table->clocks_table, false);
+}
+
+int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ int ret = 0;
+
+ /* Only allowed in manual mode */
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < smu->gfx_default_hard_min_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_hard_min_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > smu->gfx_default_soft_max_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
+ dev_err(smu->adev->dev,
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max sclk failed!");
+ return ret;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
diff --git a/rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/preimage.1 b/rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/preimage.1
new file mode 100644
index 000000000000..33543d386784
--- /dev/null
+++ b/rr-cache/51b3902d6bcab4674ff4e4b9376624bb5f6a303d/preimage.1
@@ -0,0 +1,1847 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/reboot.h>
+
+#define SWSMU_CODE_LAYER_L3
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
+#include "smu_v14_0.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "amdgpu_ras.h"
+#include "smu_cmn.h"
+
+#include "asic_reg/mp/mp_14_0_2_offset.h"
+#include "asic_reg/mp/mp_14_0_2_sh_mask.h"
+
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341
+#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342
+#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
+
+#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
+
+int smu_v14_0_init_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ char fw_name[30];
+ char ucode_prefix[30];
+ int err = 0;
+ const struct smc_firmware_header_v1_0 *hdr;
+ const struct common_firmware_header *header;
+ struct amdgpu_firmware_info *ucode = NULL;
+
+ /* doesn't need to load smu firmware in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ if (err)
+ goto out;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
+ ucode->fw = adev->pm.fw;
+ header = (const struct common_firmware_header *)ucode->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
+ return err;
+}
+
+void smu_v14_0_fini_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ amdgpu_ucode_release(&adev->pm.fw);
+ adev->pm.fw_version = 0;
+}
+
+int smu_v14_0_load_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const uint32_t *src;
+ const struct smc_firmware_header_v1_0 *hdr;
+ uint32_t addr_start = MP1_SRAM;
+ uint32_t i;
+ uint32_t smc_fw_size;
+ uint32_t mp1_fw_flags;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ src = (const uint32_t *)(adev->pm.fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ smc_fw_size = hdr->header.ucode_size_bytes;
+
+ for (i = 1; i < smc_fw_size/4 - 1; i++) {
+ WREG32_PCIE(addr_start, src[i]);
+ addr_start += 4;
+ }
+
+ WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
+ 1 & MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
+ WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
+ 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout)
+ return -ETIME;
+
+ return 0;
+}
+
+int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_firmware_info *ucode = NULL;
+ uint32_t size = 0, pptable_id = 0;
+ int ret = 0;
+ void *table;
+
+ /* doesn't need to load smu firmware in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
+ if (!adev->scpm_enabled)
+ return 0;
+
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3)))
+ return 0;
+
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu->smu_table.boot_values.pp_table_id;
+ }
+
+ /* "pptable_id == 0" means vbios carries the pptable. */
+ if (!pptable_id)
+ return 0;
+
+ ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+ if (ret)
+ return ret;
+
+ smu->pptable_firmware.data = table;
+ smu->pptable_firmware.size = size;
+
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
+ ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
+ ucode->fw = &smu->pptable_firmware;
+ adev->firmware.fw_size +=
+ ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
+
+ return 0;
+}
+
+int smu_v14_0_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
+ else
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+int smu_v14_0_check_fw_version(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return ret;
+
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
+ smu_minor = (smu_version >> 8) & 0xff;
+ smu_debug = (smu_version >> 0) & 0xff;
+ if (smu->is_apu)
+ adev->pm.fw_version = smu_version;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(14, 0, 0):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ break;
+ case IP_VERSION(14, 0, 1):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
+<<<<<<<
+=======
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
+>>>>>>>
+ break;
+ default:
+ dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
+ break;
+ }
+
+ if (adev->pm.fw)
+ dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
+
+ /*
+ * 1. if_version mismatch is not critical as our fw is designed
+ * to be backward compatible.
+ * 2. New fw usually brings some optimizations. But that's visible
+ * only on the paired driver.
+ * Considering above, we just leave user a verbal message instead
+ * of halt driver loading.
+ */
+ if (if_version != smu->smc_driver_if_version) {
+ dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
+ smu->smc_driver_if_version, if_version,
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
+ dev_info(adev->dev, "SMU driver if version not matched\n");
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t ppt_offset_bytes;
+ const struct smc_firmware_header_v2_0 *v2;
+
+ v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
+
+ ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
+ *size = le32_to_cpu(v2->ppt_size_bytes);
+ *table = (uint8_t *)v2 + ppt_offset_bytes;
+
+ return 0;
+}
+
+static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table,
+ uint32_t *size, uint32_t pptable_id)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const struct smc_firmware_header_v2_1 *v2_1;
+ struct smc_soft_pptable_entry *entries;
+ uint32_t pptable_count = 0;
+ int i = 0;
+
+ v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
+ entries = (struct smc_soft_pptable_entry *)
+ ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
+ pptable_count = le32_to_cpu(v2_1->pptable_count);
+ for (i = 0; i < pptable_count; i++) {
+ if (le32_to_cpu(entries[i].id) == pptable_id) {
+ *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
+ *size = le32_to_cpu(entries[i].ppt_size_bytes);
+ break;
+ }
+ }
+
+ if (i == pptable_count)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t atom_table_size;
+ uint8_t frev, crev;
+ int ret, index;
+
+ dev_info(adev->dev, "use vbios provided pptable\n");
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ powerplayinfo);
+
+ ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
+ (uint8_t **)table);
+ if (ret)
+ return ret;
+
+ if (size)
+ *size = atom_table_size;
+
+ return 0;
+}
+
+int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
+{
+ const struct smc_firmware_header_v1_0 *hdr;
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t version_major, version_minor;
+ int ret;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ if (!hdr)
+ return -EINVAL;
+
+ dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
+
+ version_major = le16_to_cpu(hdr->header.header_version_major);
+ version_minor = le16_to_cpu(hdr->header.header_version_minor);
+ if (version_major != 2) {
+ dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
+ version_major, version_minor);
+ return -EINVAL;
+ }
+
+ switch (version_minor) {
+ case 0:
+ ret = smu_v14_0_set_pptable_v2_0(smu, table, size);
+ break;
+ case 1:
+ ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_setup_pptable(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t size = 0, pptable_id = 0;
+ void *table;
+ int ret = 0;
+
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu->smu_table.boot_values.pp_table_id;
+ }
+
+ /* force using vbios pptable in sriov mode */
+ if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
+ ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size);
+ else
+ ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+
+ if (ret)
+ return ret;
+
+ if (!smu->smu_table.power_play_table)
+ smu->smu_table.power_play_table = table;
+ if (!smu->smu_table.power_play_table_size)
+ smu->smu_table.power_play_table_size = size;
+
+ return 0;
+}
+
+int smu_v14_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ int ret = 0;
+
+ smu_table->driver_pptable =
+ kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
+ if (!smu_table->driver_pptable) {
+ ret = -ENOMEM;
+ goto err0_out;
+ }
+
+ smu_table->max_sustainable_clocks =
+ kzalloc(sizeof(struct smu_14_0_max_sustainable_clocks), GFP_KERNEL);
+ if (!smu_table->max_sustainable_clocks) {
+ ret = -ENOMEM;
+ goto err1_out;
+ }
+
+ if (tables[SMU_TABLE_OVERDRIVE].size) {
+ smu_table->overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->overdrive_table) {
+ ret = -ENOMEM;
+ goto err2_out;
+ }
+
+ smu_table->boot_overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->boot_overdrive_table) {
+ ret = -ENOMEM;
+ goto err3_out;
+ }
+ }
+
+ smu_table->combo_pptable =
+ kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
+ if (!smu_table->combo_pptable) {
+ ret = -ENOMEM;
+ goto err4_out;
+ }
+
+ return 0;
+
+err4_out:
+ kfree(smu_table->boot_overdrive_table);
+err3_out:
+ kfree(smu_table->overdrive_table);
+err2_out:
+ kfree(smu_table->max_sustainable_clocks);
+err1_out:
+ kfree(smu_table->driver_pptable);
+err0_out:
+ return ret;
+}
+
+int smu_v14_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ kfree(smu_table->gpu_metrics_table);
+ kfree(smu_table->combo_pptable);
+ kfree(smu_table->boot_overdrive_table);
+ kfree(smu_table->overdrive_table);
+ kfree(smu_table->max_sustainable_clocks);
+ kfree(smu_table->driver_pptable);
+ smu_table->gpu_metrics_table = NULL;
+ smu_table->combo_pptable = NULL;
+ smu_table->boot_overdrive_table = NULL;
+ smu_table->overdrive_table = NULL;
+ smu_table->max_sustainable_clocks = NULL;
+ smu_table->driver_pptable = NULL;
+ kfree(smu_table->hardcode_pptable);
+ smu_table->hardcode_pptable = NULL;
+
+ kfree(smu_table->ecc_table);
+ kfree(smu_table->metrics_table);
+ kfree(smu_table->watermarks_table);
+ smu_table->ecc_table = NULL;
+ smu_table->metrics_table = NULL;
+ smu_table->watermarks_table = NULL;
+ smu_table->metrics_time = 0;
+
+ kfree(smu_dpm->dpm_context);
+ kfree(smu_dpm->golden_dpm_context);
+ kfree(smu_dpm->dpm_current_power_state);
+ kfree(smu_dpm->dpm_request_power_state);
+ smu_dpm->dpm_context = NULL;
+ smu_dpm->golden_dpm_context = NULL;
+ smu_dpm->dpm_context_size = 0;
+ smu_dpm->dpm_current_power_state = NULL;
+ smu_dpm->dpm_request_power_state = NULL;
+
+ return 0;
+}
+
+int smu_v14_0_init_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (smu_power->power_context || smu_power->power_context_size != 0)
+ return -EINVAL;
+
+ smu_power->power_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
+ GFP_KERNEL);
+ if (!smu_power->power_context)
+ return -ENOMEM;
+ smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context);
+
+ return 0;
+}
+
+int smu_v14_0_fini_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (!smu_power->power_context || smu_power->power_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_power->power_context);
+ smu_power->power_context = NULL;
+ smu_power->power_context_size = 0;
+
+ return 0;
+}
+
+int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_4 *v_3_4;
+ struct atom_firmware_info_v3_3 *v_3_3;
+ struct atom_firmware_info_v3_1 *v_3_1;
+ struct atom_smu_info_v3_6 *smu_info_v3_6;
+ struct atom_smu_info_v4_0 *smu_info_v4_0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu14\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ break;
+ case 3:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ break;
+ case 4:
+ default:
+ v_3_4 = (struct atom_firmware_info_v3_4 *)header;
+ smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
+ break;
+ }
+
+ smu->smu_table.boot_values.format_revision = header->format_revision;
+ smu->smu_table.boot_values.content_revision = header->content_revision;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smu_info);
+ if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header)) {
+
+ if ((frev == 3) && (crev == 6)) {
+ smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
+
+ smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
+ smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
+ smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
+ smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
+ } else if ((frev == 3) && (crev == 1)) {
+ return 0;
+ } else if ((frev == 4) && (crev == 0)) {
+ smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
+
+ smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
+ smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
+ smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
+ smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
+ smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
+ } else {
+ dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
+ (uint32_t)frev, (uint32_t)crev);
+ }
+ }
+
+ return 0;
+}
+
+
+int smu_v14_0_notify_memory_pool_location(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+ uint64_t address;
+ uint32_t address_low, address_high;
+
+ if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
+ return ret;
+
+ address = memory_pool->mc_address;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+ address_high, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+ address_low, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+ (uint32_t)memory_pool->size, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+int smu_v14_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address),
+ NULL);
+ if (!ret)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address),
+ NULL);
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_tool_table_location(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
+
+ if (tool_table->mc_address) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(tool_table->mc_address),
+ NULL);
+ if (!ret)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrLow,
+ lower_32_bits(tool_table->mc_address),
+ NULL);
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_allowed_mask(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t feature_mask[2];
+
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ feature->feature_num < 64)
+ return -EINVAL;
+
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+ feature_mask[1], NULL);
+ if (ret)
+ return ret;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0],
+ NULL);
+}
+
+int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 2):
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+ if (enable)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ else
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_system_features_control(struct smu_context *smu,
+ bool en)
+{
+ return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures), NULL);
+}
+
+int smu_v14_0_notify_display_change(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+
+ return ret;
+}
+
+int smu_v14_0_get_current_power_limit(struct smu_context *smu,
+ uint32_t *power_limit)
+{
+ int power_src;
+ int ret = 0;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
+ return -EINVAL;
+
+ power_src = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ smu->adev->pm.ac_power ?
+ SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetPptLimit,
+ power_src << 16,
+ power_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
+
+ return ret;
+}
+
+int smu_v14_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+
+ return 0;
+}
+
+static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned tyep,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ /* For THM irqs */
+ // TODO
+
+ /* For MP1 SW irqs */
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
+
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ /* For THM irqs */
+ // TODO
+
+ /* For MP1 SW irqs */
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
+ } else {
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
+#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+
+static int smu_v14_0_irq_process(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t client_id = entry->client_id;
+ uint32_t src_id = entry->src_id;
+
+ if (client_id == SOC15_IH_CLIENTID_THM) {
+ switch (src_id) {
+ case THM_11_0__SRCID__THM_DIG_THERM_L2H:
+ schedule_delayed_work(&smu->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ break;
+ case THM_11_0__SRCID__THM_DIG_THERM_H2L:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
+ break;
+ default:
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
+ src_id);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs smu_v14_0_irq_funcs = {
+ .set = smu_v14_0_set_irq_state,
+ .process = smu_v14_0_irq_process,
+};
+
+int smu_v14_0_register_irq_handler(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_irq_src *irq_src = &smu->irq_source;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ irq_src->num_types = 1;
+ irq_src->funcs = &smu_v14_0_irq_funcs;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_L2H,
+ irq_src);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
+ THM_11_0__SRCID__THM_DIG_THERM_H2L,
+ irq_src);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+ SMU_IH_INTERRUPT_ID_TO_DRIVER,
+ irq_src);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v14_0_wait_for_reset_complete(struct smu_context *smu,
+ uint64_t event_arg)
+{
+ int ret = 0;
+
+ dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
+
+ return ret;
+}
+
+int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg)
+{
+ int ret = -EINVAL;
+
+ switch (event) {
+ case SMU_EVENT_RESET_COMPLETE:
+ ret = smu_v14_0_wait_for_reset_complete(smu, event_arg);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param = 0;
+ uint32_t clock_limit;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ param = (clk_id & 0xffff) << 16;
+
+ if (max) {
+ if (smu->adev->pm.ac_power)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxDpmFreq,
+ param,
+ max);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDcModeMaxDpmFreq,
+ param,
+ max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ return ret;
+}
+
+int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param, NULL);
+ if (ret)
+ goto out;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param, NULL);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct smu_14_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_14_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_14_0_dpm_table *mem_table =
+ &dpm_context->dpm_tables.uclk_table;
+ struct smu_14_0_dpm_table *soc_table =
+ &dpm_context->dpm_tables.soc_table;
+ struct smu_14_0_dpm_table *vclk_table =
+ &dpm_context->dpm_tables.vclk_table;
+ struct smu_14_0_dpm_table *dclk_table =
+ &dpm_context->dpm_tables.dclk_table;
+ struct smu_14_0_dpm_table *fclk_table =
+ &dpm_context->dpm_tables.fclk_table;
+ struct smu_umd_pstate_table *pstate_table =
+ &smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t sclk_min = 0, sclk_max = 0;
+ uint32_t mclk_min = 0, mclk_max = 0;
+ uint32_t socclk_min = 0, socclk_max = 0;
+ uint32_t vclk_min = 0, vclk_max = 0;
+ uint32_t dclk_min = 0, dclk_max = 0;
+ uint32_t fclk_min = 0, fclk_max = 0;
+ int ret = 0, i;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ sclk_min = sclk_max = gfx_table->max;
+ mclk_min = mclk_max = mem_table->max;
+ socclk_min = socclk_max = soc_table->max;
+ vclk_min = vclk_max = vclk_table->max;
+ dclk_min = dclk_max = dclk_table->max;
+ fclk_min = fclk_max = fclk_table->max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ sclk_min = sclk_max = gfx_table->min;
+ mclk_min = mclk_max = mem_table->min;
+ socclk_min = socclk_max = soc_table->min;
+ vclk_min = vclk_max = vclk_table->min;
+ dclk_min = dclk_max = dclk_table->min;
+ fclk_min = fclk_max = fclk_table->min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ sclk_min = gfx_table->min;
+ sclk_max = gfx_table->max;
+ mclk_min = mem_table->min;
+ mclk_max = mem_table->max;
+ socclk_min = soc_table->min;
+ socclk_max = soc_table->max;
+ vclk_min = vclk_table->min;
+ vclk_max = vclk_table->max;
+ dclk_min = dclk_table->min;
+ dclk_max = dclk_table->max;
+ fclk_min = fclk_table->min;
+ fclk_max = fclk_table->max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
+ vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
+ dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
+ fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ mclk_min = mclk_max = pstate_table->uclk_pstate.min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
+ vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
+ dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
+ fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ return 0;
+ default:
+ dev_err(adev->dev, "Invalid performance level %d\n", level);
+ return -EINVAL;
+ }
+
+ if (sclk_min && sclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ sclk_min,
+ sclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->gfxclk_pstate.curr.min = sclk_min;
+ pstate_table->gfxclk_pstate.curr.max = sclk_max;
+ }
+
+ if (mclk_min && mclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_MCLK,
+ mclk_min,
+ mclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->uclk_pstate.curr.min = mclk_min;
+ pstate_table->uclk_pstate.curr.max = mclk_max;
+ }
+
+ if (socclk_min && socclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_SOCCLK,
+ socclk_min,
+ socclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->socclk_pstate.curr.min = socclk_min;
+ pstate_table->socclk_pstate.curr.max = socclk_max;
+ }
+
+ if (vclk_min && vclk_max) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ i ? SMU_VCLK1 : SMU_VCLK,
+ vclk_min,
+ vclk_max);
+ if (ret)
+ return ret;
+ }
+ pstate_table->vclk_pstate.curr.min = vclk_min;
+ pstate_table->vclk_pstate.curr.max = vclk_max;
+ }
+
+ if (dclk_min && dclk_max) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ i ? SMU_DCLK1 : SMU_DCLK,
+ dclk_min,
+ dclk_max);
+ if (ret)
+ return ret;
+ }
+ pstate_table->dclk_pstate.curr.min = dclk_min;
+ pstate_table->dclk_pstate.curr.max = dclk_max;
+ }
+
+ if (fclk_min && fclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_FCLK,
+ fclk_min,
+ fclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->fclk_pstate.curr.min = fclk_min;
+ pstate_table->fclk_pstate.curr.max = fclk_max;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src)
+{
+ int pwr_source;
+
+ pwr_source = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ (uint32_t)power_src);
+ if (pwr_source < 0)
+ return -EINVAL;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_NotifyPowerSource,
+ pwr_source,
+ NULL);
+}
+
+static int smu_v14_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint16_t level,
+ uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!value)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ value);
+ if (ret)
+ return ret;
+
+ *value = *value & 0x7fffffff;
+
+ return ret;
+}
+
+static int smu_v14_0_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ int ret;
+
+ ret = smu_v14_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+
+ return ret;
+}
+
+static int smu_v14_0_get_fine_grained_status(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ bool *is_fine_grained_dpm)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+ uint32_t value;
+
+ if (!is_fine_grained_dpm)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ &value);
+ if (ret)
+ return ret;
+
+ /*
+ * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM
+ * now, we un-support it
+ */
+ *is_fine_grained_dpm = value & 0x80000000;
+
+ return 0;
+}
+
+int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_14_0_dpm_table *single_dpm_table)
+{
+ int ret = 0;
+ uint32_t clk;
+ int i;
+
+ ret = smu_v14_0_get_dpm_level_count(smu,
+ clk_type,
+ &single_dpm_table->count);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
+ return ret;
+ }
+
+ ret = smu_v14_0_get_fine_grained_status(smu,
+ clk_type,
+ &single_dpm_table->is_fine_grained);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++) {
+ ret = smu_v14_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ i,
+ &clk);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
+ return ret;
+ }
+
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+
+ if (i == 0)
+ single_dpm_table->min = clk;
+ else if (i == single_dpm_table->count - 1)
+ single_dpm_table->max = clk;
+ }
+
+ return 0;
+}
+
+int smu_v14_0_set_vcn_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
+ i << 16U, NULL);
+ else if (i == 1)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ i << 16U, NULL);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
+ if (i == 0)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0,
+ i << 16U, NULL);
+ else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1,
+ i << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
+ i << 16U, NULL);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_run_btc(struct smu_context *smu)
+{
+ int res;
+
+ res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+ if (res)
+ dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+
+ return res;
+}
+
+int smu_v14_0_gpo_control(struct smu_context *smu,
+ bool enablement)
+{
+ int res;
+
+ res = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_AllowGpo,
+ enablement ? 1 : 0,
+ NULL);
+ if (res)
+ dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
+
+ return res;
+}
+
+int smu_v14_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement)
+{
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
+
+ return ret;
+}
+
+int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ArmD3,
+ baco_seq,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (baco_seq == BACO_SEQ_BAMACO ||
+ baco_seq == BACO_SEQ_BACO)
+ smu_baco->state = SMU_BACO_STATE_ENTER;
+ else
+ smu_baco->state = SMU_BACO_STATE_EXIT;
+
+ return 0;
+}
+
+int smu_v14_0_get_bamaco_support(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int bamaco_support = 0;
+
+ if (amdgpu_sriov_vf(smu->adev) ||
+ !smu_baco->platform_support)
+ return 0;
+
+ if (smu_baco->maco_support)
+ bamaco_support |= MACO_SUPPORT;
+
+ /* return true if ASIC is in BACO state already */
+ if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+ return (bamaco_support |= BACO_SUPPORT);
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ return 0;
+
+ return (bamaco_support |= BACO_SUPPORT);
+}
+
+enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+
+ return smu_baco->state;
+}
+
+int smu_v14_0_baco_set_state(struct smu_context *smu,
+ enum smu_baco_state state)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (smu_v14_0_baco_get_state(smu) == state)
+ return 0;
+
+ if (state == SMU_BACO_STATE_ENTER) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EnterBaco,
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO,
+ NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ExitBaco,
+ NULL);
+ if (ret)
+ return ret;
+
+ /* clear vbios scratch 6 and 7 for coming asic reinit */
+ WREG32(adev->bios_scratch_reg_offset + 6, 0);
+ WREG32(adev->bios_scratch_reg_offset + 7, 0);
+ }
+
+ if (!ret)
+ smu_baco->state = state;
+
+ return ret;
+}
+
+int smu_v14_0_baco_enter(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v14_0_baco_set_state(smu,
+ SMU_BACO_STATE_ENTER);
+ if (ret)
+ return ret;
+
+ msleep(10);
+
+ return ret;
+}
+
+int smu_v14_0_baco_exit(struct smu_context *smu)
+{
+ return smu_v14_0_baco_set_state(smu,
+ SMU_BACO_STATE_EXIT);
+}
+
+int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu)
+{
+ uint16_t index;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
+ }
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_EnableGfxImu);
+ return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE);
+}
+
+int smu_v14_0_set_default_dpm_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
+ smu_table->clocks_table, false);
+}
+
+int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ int ret = 0;
+
+ /* Only allowed in manual mode */
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < smu->gfx_default_hard_min_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_hard_min_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > smu->gfx_default_soft_max_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
+ dev_err(smu->adev->dev,
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max sclk failed!");
+ return ret;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
diff --git a/rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/postimage b/rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/postimage
new file mode 100644
index 000000000000..1fc4557e6fb4
--- /dev/null
+++ b/rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/postimage
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V14_0_H__
+#define __SMU_V14_0_H__
+
+#include "amdgpu_smu.h"
+
+#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+/* address block */
+#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
+#define smnMP1_PUB_CTRL 0x3010d10
+
+#define MAX_DPM_LEVELS 16
+#define MAX_PCIE_CONF 3
+
+struct smu_14_0_max_sustainable_clocks {
+ uint32_t display_clock;
+ uint32_t phy_clock;
+ uint32_t pixel_clock;
+ uint32_t uclock;
+ uint32_t dcef_clock;
+ uint32_t soc_clock;
+};
+
+struct smu_14_0_dpm_clk_level {
+ bool enabled;
+ uint32_t value;
+};
+
+struct smu_14_0_dpm_table {
+ uint32_t min; /* MHz */
+ uint32_t max; /* MHz */
+ uint32_t count;
+ bool is_fine_grained;
+ struct smu_14_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS];
+};
+
+struct smu_14_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint16_t clk_freq[MAX_PCIE_CONF];
+ uint32_t num_of_link_levels;
+};
+
+struct smu_14_0_dpm_tables {
+ struct smu_14_0_dpm_table soc_table;
+ struct smu_14_0_dpm_table gfx_table;
+ struct smu_14_0_dpm_table uclk_table;
+ struct smu_14_0_dpm_table eclk_table;
+ struct smu_14_0_dpm_table vclk_table;
+ struct smu_14_0_dpm_table dclk_table;
+ struct smu_14_0_dpm_table dcef_table;
+ struct smu_14_0_dpm_table pixel_table;
+ struct smu_14_0_dpm_table display_table;
+ struct smu_14_0_dpm_table phy_table;
+ struct smu_14_0_dpm_table fclk_table;
+ struct smu_14_0_pcie_table pcie_table;
+};
+
+struct smu_14_0_dpm_context {
+ struct smu_14_0_dpm_tables dpm_tables;
+ uint32_t workload_policy_mask;
+ uint32_t dcef_min_ds_clk;
+};
+
+enum smu_14_0_power_state {
+ SMU_14_0_POWER_STATE__D0 = 0,
+ SMU_14_0_POWER_STATE__D1,
+ SMU_14_0_POWER_STATE__D3, /* Sleep*/
+ SMU_14_0_POWER_STATE__D4, /* Hibernate*/
+ SMU_14_0_POWER_STATE__D5, /* Power off*/
+};
+
+struct smu_14_0_power_context {
+ uint32_t power_source;
+ uint8_t in_power_limit_boost_mode;
+ enum smu_14_0_power_state power_state;
+};
+
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
+
+int smu_v14_0_init_microcode(struct smu_context *smu);
+
+void smu_v14_0_fini_microcode(struct smu_context *smu);
+
+int smu_v14_0_load_microcode(struct smu_context *smu);
+
+int smu_v14_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v14_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v14_0_init_power(struct smu_context *smu);
+
+int smu_v14_0_fini_power(struct smu_context *smu);
+
+int smu_v14_0_check_fw_status(struct smu_context *smu);
+
+int smu_v14_0_setup_pptable(struct smu_context *smu);
+
+int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu);
+
+int smu_v14_0_check_fw_version(struct smu_context *smu);
+
+int smu_v14_0_set_driver_table_location(struct smu_context *smu);
+
+int smu_v14_0_set_tool_table_location(struct smu_context *smu);
+
+int smu_v14_0_notify_memory_pool_location(struct smu_context *smu);
+
+int smu_v14_0_system_features_control(struct smu_context *smu,
+ bool en);
+
+int smu_v14_0_set_allowed_mask(struct smu_context *smu);
+
+int smu_v14_0_notify_display_change(struct smu_context *smu);
+
+int smu_v14_0_get_current_power_limit(struct smu_context *smu,
+ uint32_t *power_limit);
+
+int smu_v14_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
+
+int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v14_0_register_irq_handler(struct smu_context *smu);
+
+int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq);
+
+int smu_v14_0_get_bamaco_support(struct smu_context *smu);
+
+enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu);
+
+int smu_v14_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
+
+int smu_v14_0_baco_enter(struct smu_context *smu);
+int smu_v14_0_baco_exit(struct smu_context *smu);
+
+int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max);
+
+int smu_v14_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
+
+int smu_v14_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src);
+
+int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_14_0_dpm_table *single_dpm_table);
+
+int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg);
+
+int smu_v14_0_set_vcn_enable(struct smu_context *smu,
+ bool enable);
+
+int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
+ bool enable);
+
+int smu_v14_0_init_pptable_microcode(struct smu_context *smu);
+
+int smu_v14_0_run_btc(struct smu_context *smu);
+
+int smu_v14_0_gpo_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu);
+
+int smu_v14_0_set_default_dpm_tables(struct smu_context *smu);
+
+int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
+int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size);
+
+void smu_v14_0_set_smu_mailbox_registers(struct smu_context *smu);
+
+#endif
+#endif
diff --git a/rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/preimage b/rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/preimage
new file mode 100644
index 000000000000..0da88b2ffcfc
--- /dev/null
+++ b/rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/preimage
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V14_0_H__
+#define __SMU_V14_0_H__
+
+#include "amdgpu_smu.h"
+
+#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
+<<<<<<<
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
+=======
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25
+>>>>>>>
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+/* address block */
+#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
+#define smnMP1_PUB_CTRL 0x3010d10
+
+#define MAX_DPM_LEVELS 16
+#define MAX_PCIE_CONF 3
+
+struct smu_14_0_max_sustainable_clocks {
+ uint32_t display_clock;
+ uint32_t phy_clock;
+ uint32_t pixel_clock;
+ uint32_t uclock;
+ uint32_t dcef_clock;
+ uint32_t soc_clock;
+};
+
+struct smu_14_0_dpm_clk_level {
+ bool enabled;
+ uint32_t value;
+};
+
+struct smu_14_0_dpm_table {
+ uint32_t min; /* MHz */
+ uint32_t max; /* MHz */
+ uint32_t count;
+ bool is_fine_grained;
+ struct smu_14_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS];
+};
+
+struct smu_14_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint16_t clk_freq[MAX_PCIE_CONF];
+ uint32_t num_of_link_levels;
+};
+
+struct smu_14_0_dpm_tables {
+ struct smu_14_0_dpm_table soc_table;
+ struct smu_14_0_dpm_table gfx_table;
+ struct smu_14_0_dpm_table uclk_table;
+ struct smu_14_0_dpm_table eclk_table;
+ struct smu_14_0_dpm_table vclk_table;
+ struct smu_14_0_dpm_table dclk_table;
+ struct smu_14_0_dpm_table dcef_table;
+ struct smu_14_0_dpm_table pixel_table;
+ struct smu_14_0_dpm_table display_table;
+ struct smu_14_0_dpm_table phy_table;
+ struct smu_14_0_dpm_table fclk_table;
+ struct smu_14_0_pcie_table pcie_table;
+};
+
+struct smu_14_0_dpm_context {
+ struct smu_14_0_dpm_tables dpm_tables;
+ uint32_t workload_policy_mask;
+ uint32_t dcef_min_ds_clk;
+};
+
+enum smu_14_0_power_state {
+ SMU_14_0_POWER_STATE__D0 = 0,
+ SMU_14_0_POWER_STATE__D1,
+ SMU_14_0_POWER_STATE__D3, /* Sleep*/
+ SMU_14_0_POWER_STATE__D4, /* Hibernate*/
+ SMU_14_0_POWER_STATE__D5, /* Power off*/
+};
+
+struct smu_14_0_power_context {
+ uint32_t power_source;
+ uint8_t in_power_limit_boost_mode;
+ enum smu_14_0_power_state power_state;
+};
+
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
+
+int smu_v14_0_init_microcode(struct smu_context *smu);
+
+void smu_v14_0_fini_microcode(struct smu_context *smu);
+
+int smu_v14_0_load_microcode(struct smu_context *smu);
+
+int smu_v14_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v14_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v14_0_init_power(struct smu_context *smu);
+
+int smu_v14_0_fini_power(struct smu_context *smu);
+
+int smu_v14_0_check_fw_status(struct smu_context *smu);
+
+int smu_v14_0_setup_pptable(struct smu_context *smu);
+
+int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu);
+
+int smu_v14_0_check_fw_version(struct smu_context *smu);
+
+int smu_v14_0_set_driver_table_location(struct smu_context *smu);
+
+int smu_v14_0_set_tool_table_location(struct smu_context *smu);
+
+int smu_v14_0_notify_memory_pool_location(struct smu_context *smu);
+
+int smu_v14_0_system_features_control(struct smu_context *smu,
+ bool en);
+
+int smu_v14_0_set_allowed_mask(struct smu_context *smu);
+
+int smu_v14_0_notify_display_change(struct smu_context *smu);
+
+int smu_v14_0_get_current_power_limit(struct smu_context *smu,
+ uint32_t *power_limit);
+
+int smu_v14_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
+
+int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v14_0_register_irq_handler(struct smu_context *smu);
+
+int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq);
+
+int smu_v14_0_get_bamaco_support(struct smu_context *smu);
+
+enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu);
+
+int smu_v14_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
+
+int smu_v14_0_baco_enter(struct smu_context *smu);
+int smu_v14_0_baco_exit(struct smu_context *smu);
+
+int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max);
+
+int smu_v14_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
+
+int smu_v14_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src);
+
+int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_14_0_dpm_table *single_dpm_table);
+
+int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg);
+
+int smu_v14_0_set_vcn_enable(struct smu_context *smu,
+ bool enable);
+
+int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
+ bool enable);
+
+int smu_v14_0_init_pptable_microcode(struct smu_context *smu);
+
+int smu_v14_0_run_btc(struct smu_context *smu);
+
+int smu_v14_0_gpo_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu);
+
+int smu_v14_0_set_default_dpm_tables(struct smu_context *smu);
+
+int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
+int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size);
+
+void smu_v14_0_set_smu_mailbox_registers(struct smu_context *smu);
+
+#endif
+#endif
diff --git a/rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/preimage.1 b/rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/preimage.1
new file mode 100644
index 000000000000..0da88b2ffcfc
--- /dev/null
+++ b/rr-cache/a4124209284161eea99191ee66dcfecb7c616ddb/preimage.1
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V14_0_H__
+#define __SMU_V14_0_H__
+
+#include "amdgpu_smu.h"
+
+#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
+<<<<<<<
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
+=======
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25
+>>>>>>>
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+/* address block */
+#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
+#define smnMP1_PUB_CTRL 0x3010d10
+
+#define MAX_DPM_LEVELS 16
+#define MAX_PCIE_CONF 3
+
+struct smu_14_0_max_sustainable_clocks {
+ uint32_t display_clock;
+ uint32_t phy_clock;
+ uint32_t pixel_clock;
+ uint32_t uclock;
+ uint32_t dcef_clock;
+ uint32_t soc_clock;
+};
+
+struct smu_14_0_dpm_clk_level {
+ bool enabled;
+ uint32_t value;
+};
+
+struct smu_14_0_dpm_table {
+ uint32_t min; /* MHz */
+ uint32_t max; /* MHz */
+ uint32_t count;
+ bool is_fine_grained;
+ struct smu_14_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS];
+};
+
+struct smu_14_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint16_t clk_freq[MAX_PCIE_CONF];
+ uint32_t num_of_link_levels;
+};
+
+struct smu_14_0_dpm_tables {
+ struct smu_14_0_dpm_table soc_table;
+ struct smu_14_0_dpm_table gfx_table;
+ struct smu_14_0_dpm_table uclk_table;
+ struct smu_14_0_dpm_table eclk_table;
+ struct smu_14_0_dpm_table vclk_table;
+ struct smu_14_0_dpm_table dclk_table;
+ struct smu_14_0_dpm_table dcef_table;
+ struct smu_14_0_dpm_table pixel_table;
+ struct smu_14_0_dpm_table display_table;
+ struct smu_14_0_dpm_table phy_table;
+ struct smu_14_0_dpm_table fclk_table;
+ struct smu_14_0_pcie_table pcie_table;
+};
+
+struct smu_14_0_dpm_context {
+ struct smu_14_0_dpm_tables dpm_tables;
+ uint32_t workload_policy_mask;
+ uint32_t dcef_min_ds_clk;
+};
+
+enum smu_14_0_power_state {
+ SMU_14_0_POWER_STATE__D0 = 0,
+ SMU_14_0_POWER_STATE__D1,
+ SMU_14_0_POWER_STATE__D3, /* Sleep*/
+ SMU_14_0_POWER_STATE__D4, /* Hibernate*/
+ SMU_14_0_POWER_STATE__D5, /* Power off*/
+};
+
+struct smu_14_0_power_context {
+ uint32_t power_source;
+ uint8_t in_power_limit_boost_mode;
+ enum smu_14_0_power_state power_state;
+};
+
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
+
+int smu_v14_0_init_microcode(struct smu_context *smu);
+
+void smu_v14_0_fini_microcode(struct smu_context *smu);
+
+int smu_v14_0_load_microcode(struct smu_context *smu);
+
+int smu_v14_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v14_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v14_0_init_power(struct smu_context *smu);
+
+int smu_v14_0_fini_power(struct smu_context *smu);
+
+int smu_v14_0_check_fw_status(struct smu_context *smu);
+
+int smu_v14_0_setup_pptable(struct smu_context *smu);
+
+int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu);
+
+int smu_v14_0_check_fw_version(struct smu_context *smu);
+
+int smu_v14_0_set_driver_table_location(struct smu_context *smu);
+
+int smu_v14_0_set_tool_table_location(struct smu_context *smu);
+
+int smu_v14_0_notify_memory_pool_location(struct smu_context *smu);
+
+int smu_v14_0_system_features_control(struct smu_context *smu,
+ bool en);
+
+int smu_v14_0_set_allowed_mask(struct smu_context *smu);
+
+int smu_v14_0_notify_display_change(struct smu_context *smu);
+
+int smu_v14_0_get_current_power_limit(struct smu_context *smu,
+ uint32_t *power_limit);
+
+int smu_v14_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
+
+int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v14_0_register_irq_handler(struct smu_context *smu);
+
+int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq);
+
+int smu_v14_0_get_bamaco_support(struct smu_context *smu);
+
+enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu);
+
+int smu_v14_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
+
+int smu_v14_0_baco_enter(struct smu_context *smu);
+int smu_v14_0_baco_exit(struct smu_context *smu);
+
+int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max);
+
+int smu_v14_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
+
+int smu_v14_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src);
+
+int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_14_0_dpm_table *single_dpm_table);
+
+int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg);
+
+int smu_v14_0_set_vcn_enable(struct smu_context *smu,
+ bool enable);
+
+int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
+ bool enable);
+
+int smu_v14_0_init_pptable_microcode(struct smu_context *smu);
+
+int smu_v14_0_run_btc(struct smu_context *smu);
+
+int smu_v14_0_gpo_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu);
+
+int smu_v14_0_set_default_dpm_tables(struct smu_context *smu);
+
+int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
+int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size);
+
+void smu_v14_0_set_smu_mailbox_registers(struct smu_context *smu);
+
+#endif
+#endif