summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c')
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c80
1 files changed, 44 insertions, 36 deletions
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 1fcd4451001f..632a25957477 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -2957,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct smu7_hwmgr *data;
int result = 0;
@@ -2993,40 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* Initalize Dynamic State Adjustment Rule Settings */
result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
- if (0 == result) {
- struct amdgpu_device *adev = hwmgr->adev;
+ if (result)
+ goto fail;
- data->is_tlu_enabled = false;
+ data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
SMU7_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- data->pcie_gen_cap = adev->pm.pcie_gen_mask;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- else
- data->pcie_spc_cap = 16;
- data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
-
- hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
- hwmgr->platform_descriptor.clockStep.engineClock = 500;
- hwmgr->platform_descriptor.clockStep.memoryClock = 500;
- smu7_thermal_parameter_init(hwmgr);
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- smu7_hwmgr_backend_fini(hwmgr);
- }
+ data->pcie_gen_cap = adev->pm.pcie_gen_mask;
+ if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ data->pcie_spc_cap = 20;
+ else
+ data->pcie_spc_cap = 16;
+ data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
+
+ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+ /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+ hwmgr->platform_descriptor.clockStep.engineClock = 500;
+ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+ smu7_thermal_parameter_init(hwmgr);
result = smu7_update_edc_leakage_table(hwmgr);
- if (result) {
- smu7_hwmgr_backend_fini(hwmgr);
- return result;
- }
+ if (result)
+ goto fail;
return 0;
+fail:
+ smu7_hwmgr_backend_fini(hwmgr);
+ return result;
}
static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
@@ -3316,8 +3314,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
const struct pp_power_state *current_ps)
{
struct amdgpu_device *adev = hwmgr->adev;
- struct smu7_power_state *smu7_ps =
- cast_phw_smu7_power_state(&request_ps->hardware);
+ struct smu7_power_state *smu7_ps;
uint32_t sclk;
uint32_t mclk;
struct PP_Clocks minimum_clocks = {0};
@@ -3334,6 +3331,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
uint32_t latency;
bool latency_allowed = false;
+ smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware);
+ if (!smu7_ps)
+ return -EINVAL;
+
data->battery_state = (PP_StateUILabel_Battery ==
request_ps->classification.ui_label);
data->mclk_ignore_signal = false;
@@ -4000,6 +4001,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
uint32_t offset, val_vid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct amdgpu_device *adev = hwmgr->adev;
+ int ret = 0;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@@ -4007,12 +4009,16 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
+ if (ret)
+ return ret;
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
+ if (ret)
+ return ret;
*((uint32_t *)value) = mclk;
*size = 4;
return 0;
@@ -4965,13 +4971,14 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
- int size = 0;
+ int size = 0, ret = 0;
uint32_t i, now, clock, pcie_speed;
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
-
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
+ if (ret)
+ return ret;
for (i = 0; i < sclk_table->count; i++) {
if (clock > sclk_table->dpm_levels[i].value)
continue;
@@ -4985,8 +4992,9 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
-
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
+ if (ret)
+ return ret;
for (i = 0; i < mclk_table->count; i++) {
if (clock > mclk_table->dpm_levels[i].value)
continue;
@@ -5640,7 +5648,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
mode = input[size];
switch (mode) {
case PP_SMC_POWER_PROFILE_CUSTOM:
- if (size < 8 && size != 0)
+ if (size != 8 && size != 0)
return -EINVAL;
/* If only CUSTOM is passed in, use the saved values. Check
* that we actually have a CUSTOM profile by ensuring that