Path: blob/master/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
51609 views
/*1* Copyright 2019 Advanced Micro Devices, Inc.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included in11* all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR17* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,18* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR19* OTHER DEALINGS IN THE SOFTWARE.20*/2122#define SWSMU_CODE_LAYER_L12324#include <linux/firmware.h>25#include <linux/pci.h>26#include <linux/power_supply.h>27#include <linux/reboot.h>2829#include "amdgpu.h"30#include "amdgpu_smu.h"31#include "smu_internal.h"32#include "atom.h"33#include "arcturus_ppt.h"34#include "navi10_ppt.h"35#include "sienna_cichlid_ppt.h"36#include "renoir_ppt.h"37#include "vangogh_ppt.h"38#include "aldebaran_ppt.h"39#include "yellow_carp_ppt.h"40#include "cyan_skillfish_ppt.h"41#include "smu_v13_0_0_ppt.h"42#include "smu_v13_0_4_ppt.h"43#include "smu_v13_0_5_ppt.h"44#include "smu_v13_0_6_ppt.h"45#include "smu_v13_0_7_ppt.h"46#include "smu_v14_0_0_ppt.h"47#include "smu_v14_0_2_ppt.h"48#include "smu_v15_0_0_ppt.h"49#include "amd_pcie.h"5051/*52* DO NOT use these for err/warn/info/debug messages.53* Use dev_err, dev_warn, dev_info and dev_dbg instead.54* They are more MGPU friendly.55*/56#undef pr_err57#undef pr_warn58#undef pr_info59#undef pr_debug6061static const struct amd_pm_funcs swsmu_pm_funcs;62static int smu_force_smuclk_levels(struct smu_context *smu,63enum smu_clk_type clk_type,64uint32_t mask);65static int smu_handle_task(struct smu_context *smu,66enum amd_dpm_forced_level level,67enum amd_pp_task task_id);68static int smu_reset(struct smu_context *smu);69static int smu_set_fan_speed_pwm(void *handle, u32 speed);70static int smu_set_fan_control_mode(void *handle, u32 value);71static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit);72static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);73static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);74static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);75static void smu_power_profile_mode_get(struct smu_context *smu,76enum PP_SMC_POWER_PROFILE profile_mode);77static void smu_power_profile_mode_put(struct smu_context *smu,78enum PP_SMC_POWER_PROFILE profile_mode);79static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);80static int smu_od_edit_dpm_table(void *handle,81enum PP_OD_DPM_TABLE_COMMAND type,82long *input, uint32_t size);8384static int smu_sys_get_pp_feature_mask(void *handle,85char *buf)86{87struct smu_context *smu = handle;8889if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)90return -EOPNOTSUPP;9192return smu_get_pp_feature_mask(smu, buf);93}9495static int smu_sys_set_pp_feature_mask(void *handle,96uint64_t new_mask)97{98struct smu_context *smu = handle;99100if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)101return -EOPNOTSUPP;102103return smu_set_pp_feature_mask(smu, new_mask);104}105106int smu_set_residency_gfxoff(struct smu_context *smu, bool value)107{108if (!smu->ppt_funcs->set_gfx_off_residency)109return -EINVAL;110111return smu_set_gfx_off_residency(smu, value);112}113114int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)115{116if (!smu->ppt_funcs->get_gfx_off_residency)117return -EINVAL;118119return smu_get_gfx_off_residency(smu, value);120}121122int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)123{124if (!smu->ppt_funcs->get_gfx_off_entrycount)125return -EINVAL;126127return smu_get_gfx_off_entrycount(smu, value);128}129130int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)131{132if (!smu->ppt_funcs->get_gfx_off_status)133return -EINVAL;134135*value = smu_get_gfx_off_status(smu);136137return 0;138}139140int smu_set_soft_freq_range(struct smu_context *smu,141enum pp_clock_type type,142uint32_t min,143uint32_t max)144{145enum smu_clk_type clk_type;146int ret = 0;147148clk_type = smu_convert_to_smuclk(type);149if (clk_type == SMU_CLK_COUNT)150return -EINVAL;151152if (smu->ppt_funcs->set_soft_freq_limited_range)153ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,154clk_type,155min,156max,157false);158159return ret;160}161162int smu_get_dpm_freq_range(struct smu_context *smu,163enum smu_clk_type clk_type,164uint32_t *min,165uint32_t *max)166{167int ret = -ENOTSUPP;168169if (!min && !max)170return -EINVAL;171172if (smu->ppt_funcs->get_dpm_ultimate_freq)173ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,174clk_type,175min,176max);177178return ret;179}180181int smu_set_gfx_power_up_by_imu(struct smu_context *smu)182{183int ret = 0;184struct amdgpu_device *adev = smu->adev;185186if (smu->ppt_funcs->set_gfx_power_up_by_imu) {187ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);188if (ret)189dev_err(adev->dev, "Failed to enable gfx imu!\n");190}191return ret;192}193194static u32 smu_get_mclk(void *handle, bool low)195{196struct smu_context *smu = handle;197uint32_t clk_freq;198int ret = 0;199200ret = smu_get_dpm_freq_range(smu, SMU_UCLK,201low ? &clk_freq : NULL,202!low ? &clk_freq : NULL);203if (ret)204return 0;205return clk_freq * 100;206}207208static u32 smu_get_sclk(void *handle, bool low)209{210struct smu_context *smu = handle;211uint32_t clk_freq;212int ret = 0;213214ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,215low ? &clk_freq : NULL,216!low ? &clk_freq : NULL);217if (ret)218return 0;219return clk_freq * 100;220}221222static int smu_set_gfx_imu_enable(struct smu_context *smu)223{224struct amdgpu_device *adev = smu->adev;225226if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)227return 0;228229if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)230return 0;231232return smu_set_gfx_power_up_by_imu(smu);233}234235static bool is_vcn_enabled(struct amdgpu_device *adev)236{237int i;238239for (i = 0; i < adev->num_ip_blocks; i++) {240if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||241adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&242!adev->ip_blocks[i].status.valid)243return false;244}245246return true;247}248249static int smu_dpm_set_vcn_enable(struct smu_context *smu,250bool enable,251int inst)252{253struct smu_power_context *smu_power = &smu->smu_power;254struct smu_power_gate *power_gate = &smu_power->power_gate;255int ret = 0;256257/*258* don't poweron vcn/jpeg when they are skipped.259*/260if (!is_vcn_enabled(smu->adev))261return 0;262263if (!smu->ppt_funcs->dpm_set_vcn_enable)264return 0;265266if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)267return 0;268269ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);270if (!ret)271atomic_set(&power_gate->vcn_gated[inst], !enable);272273return ret;274}275276static int smu_dpm_set_jpeg_enable(struct smu_context *smu,277bool enable)278{279struct smu_power_context *smu_power = &smu->smu_power;280struct smu_power_gate *power_gate = &smu_power->power_gate;281int ret = 0;282283if (!is_vcn_enabled(smu->adev))284return 0;285286if (!smu->ppt_funcs->dpm_set_jpeg_enable)287return 0;288289if (atomic_read(&power_gate->jpeg_gated) ^ enable)290return 0;291292ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);293if (!ret)294atomic_set(&power_gate->jpeg_gated, !enable);295296return ret;297}298299static int smu_dpm_set_vpe_enable(struct smu_context *smu,300bool enable)301{302struct smu_power_context *smu_power = &smu->smu_power;303struct smu_power_gate *power_gate = &smu_power->power_gate;304int ret = 0;305306if (!smu->ppt_funcs->dpm_set_vpe_enable)307return 0;308309if (atomic_read(&power_gate->vpe_gated) ^ enable)310return 0;311312ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);313if (!ret)314atomic_set(&power_gate->vpe_gated, !enable);315316return ret;317}318319static int smu_dpm_set_isp_enable(struct smu_context *smu,320bool enable)321{322struct smu_power_context *smu_power = &smu->smu_power;323struct smu_power_gate *power_gate = &smu_power->power_gate;324int ret;325326if (!smu->ppt_funcs->dpm_set_isp_enable)327return 0;328329if (atomic_read(&power_gate->isp_gated) ^ enable)330return 0;331332ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);333if (!ret)334atomic_set(&power_gate->isp_gated, !enable);335336return ret;337}338339static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,340bool enable)341{342struct smu_power_context *smu_power = &smu->smu_power;343struct smu_power_gate *power_gate = &smu_power->power_gate;344int ret = 0;345346if (!smu->adev->enable_umsch_mm)347return 0;348349if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)350return 0;351352if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)353return 0;354355ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);356if (!ret)357atomic_set(&power_gate->umsch_mm_gated, !enable);358359return ret;360}361362static int smu_set_mall_enable(struct smu_context *smu)363{364int ret = 0;365366if (!smu->ppt_funcs->set_mall_enable)367return 0;368369ret = smu->ppt_funcs->set_mall_enable(smu);370371return ret;372}373374/**375* smu_dpm_set_power_gate - power gate/ungate the specific IP block376*377* @handle: smu_context pointer378* @block_type: the IP block to power gate/ungate379* @gate: to power gate if true, ungate otherwise380* @inst: the instance of the IP block to power gate/ungate381*382* This API uses no smu->mutex lock protection due to:383* 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).384* This is guarded to be race condition free by the caller.385* 2. Or get called on user setting request of power_dpm_force_performance_level.386* Under this case, the smu->mutex lock protection is already enforced on387* the parent API smu_force_performance_level of the call path.388*/389static int smu_dpm_set_power_gate(void *handle,390uint32_t block_type,391bool gate,392int inst)393{394struct smu_context *smu = handle;395int ret = 0;396397if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {398dev_WARN(smu->adev->dev,399"SMU uninitialized but power %s requested for %u!\n",400gate ? "gate" : "ungate", block_type);401return -EOPNOTSUPP;402}403404switch (block_type) {405/*406* Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses407* AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.408*/409case AMD_IP_BLOCK_TYPE_UVD:410case AMD_IP_BLOCK_TYPE_VCN:411ret = smu_dpm_set_vcn_enable(smu, !gate, inst);412if (ret)413dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",414gate ? "gate" : "ungate", inst);415break;416case AMD_IP_BLOCK_TYPE_GFX:417ret = smu_gfx_off_control(smu, gate);418if (ret)419dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",420gate ? "enable" : "disable");421break;422case AMD_IP_BLOCK_TYPE_SDMA:423ret = smu_powergate_sdma(smu, gate);424if (ret)425dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",426gate ? "gate" : "ungate");427break;428case AMD_IP_BLOCK_TYPE_JPEG:429ret = smu_dpm_set_jpeg_enable(smu, !gate);430if (ret)431dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",432gate ? "gate" : "ungate");433break;434case AMD_IP_BLOCK_TYPE_VPE:435ret = smu_dpm_set_vpe_enable(smu, !gate);436if (ret)437dev_err(smu->adev->dev, "Failed to power %s VPE!\n",438gate ? "gate" : "ungate");439break;440case AMD_IP_BLOCK_TYPE_ISP:441ret = smu_dpm_set_isp_enable(smu, !gate);442if (ret)443dev_err(smu->adev->dev, "Failed to power %s ISP!\n",444gate ? "gate" : "ungate");445break;446default:447dev_err(smu->adev->dev, "Unsupported block type!\n");448return -EINVAL;449}450451return ret;452}453454/**455* smu_set_user_clk_dependencies - set user profile clock dependencies456*457* @smu: smu_context pointer458* @clk: enum smu_clk_type type459*460* Enable/Disable the clock dependency for the @clk type.461*/462static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)463{464if (smu->adev->in_suspend)465return;466467if (clk == SMU_MCLK) {468smu->user_dpm_profile.clk_dependency = 0;469smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);470} else if (clk == SMU_FCLK) {471/* MCLK takes precedence over FCLK */472if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))473return;474475smu->user_dpm_profile.clk_dependency = 0;476smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);477} else if (clk == SMU_SOCCLK) {478/* MCLK takes precedence over SOCCLK */479if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))480return;481482smu->user_dpm_profile.clk_dependency = 0;483smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);484} else485/* Add clk dependencies here, if any */486return;487}488489/**490* smu_restore_dpm_user_profile - reinstate user dpm profile491*492* @smu: smu_context pointer493*494* Restore the saved user power configurations include power limit,495* clock frequencies, fan control mode and fan speed.496*/497static void smu_restore_dpm_user_profile(struct smu_context *smu)498{499struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);500int ret = 0;501502if (!smu->adev->in_suspend)503return;504505if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)506return;507508/* Enable restore flag */509smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;510511/* set the user dpm power limits */512for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) {513if (!smu->user_dpm_profile.power_limits[i])514continue;515ret = smu_set_power_limit(smu, i,516smu->user_dpm_profile.power_limits[i]);517if (ret)518dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i);519}520521/* set the user dpm clock configurations */522if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {523enum smu_clk_type clk_type;524525for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {526/*527* Iterate over smu clk type and force the saved user clk528* configs, skip if clock dependency is enabled529*/530if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&531smu->user_dpm_profile.clk_mask[clk_type]) {532ret = smu_force_smuclk_levels(smu, clk_type,533smu->user_dpm_profile.clk_mask[clk_type]);534if (ret)535dev_err(smu->adev->dev,536"Failed to set clock type = %d\n", clk_type);537}538}539}540541/* set the user dpm fan configurations */542if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||543smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {544ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);545if (ret != -EOPNOTSUPP) {546smu->user_dpm_profile.fan_speed_pwm = 0;547smu->user_dpm_profile.fan_speed_rpm = 0;548smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;549dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");550}551552if (smu->user_dpm_profile.fan_speed_pwm) {553ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);554if (ret != -EOPNOTSUPP)555dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");556}557558if (smu->user_dpm_profile.fan_speed_rpm) {559ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);560if (ret != -EOPNOTSUPP)561dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");562}563}564565/* Restore user customized OD settings */566if (smu->user_dpm_profile.user_od) {567if (smu->ppt_funcs->restore_user_od_settings) {568ret = smu->ppt_funcs->restore_user_od_settings(smu);569if (ret)570dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");571}572}573574/* Disable restore flag */575smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;576}577578static int smu_get_power_num_states(void *handle,579struct pp_states_info *state_info)580{581if (!state_info)582return -EINVAL;583584/* not support power state */585memset(state_info, 0, sizeof(struct pp_states_info));586state_info->nums = 1;587state_info->states[0] = POWER_STATE_TYPE_DEFAULT;588589return 0;590}591592bool is_support_sw_smu(struct amdgpu_device *adev)593{594/* vega20 is 11.0.2, but it's supported via the powerplay code */595if (adev->asic_type == CHIP_VEGA20)596return false;597598if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&599amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))600return true;601602return false;603}604605bool is_support_cclk_dpm(struct amdgpu_device *adev)606{607struct smu_context *smu = adev->powerplay.pp_handle;608609if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))610return false;611612return true;613}614615int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,616uint32_t param, uint32_t *read_arg)617{618struct smu_context *smu = adev->powerplay.pp_handle;619int ret = -EOPNOTSUPP;620621if (!smu)622return ret;623624if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg)625ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg);626627return ret;628}629630static int smu_sys_get_pp_table(void *handle,631char **table)632{633struct smu_context *smu = handle;634struct smu_table_context *smu_table = &smu->smu_table;635636if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)637return -EOPNOTSUPP;638639if (!smu_table->power_play_table && !smu_table->hardcode_pptable)640return -EOPNOTSUPP;641642if (smu_table->hardcode_pptable)643*table = smu_table->hardcode_pptable;644else645*table = smu_table->power_play_table;646647return smu_table->power_play_table_size;648}649650static int smu_sys_set_pp_table(void *handle,651const char *buf,652size_t size)653{654struct smu_context *smu = handle;655struct smu_table_context *smu_table = &smu->smu_table;656ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;657int ret = 0;658659if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)660return -EOPNOTSUPP;661662if (header->usStructureSize != size) {663dev_err(smu->adev->dev, "pp table size not matched !\n");664return -EIO;665}666667if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {668kfree(smu_table->hardcode_pptable);669smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);670if (!smu_table->hardcode_pptable)671return -ENOMEM;672}673674memcpy(smu_table->hardcode_pptable, buf, size);675smu_table->power_play_table = smu_table->hardcode_pptable;676smu_table->power_play_table_size = size;677678/*679* Special hw_fini action(for Navi1x, the DPMs disablement will be680* skipped) may be needed for custom pptable uploading.681*/682smu->uploading_custom_pp_table = true;683684ret = smu_reset(smu);685if (ret)686dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);687688smu->uploading_custom_pp_table = false;689690return ret;691}692693static int smu_init_driver_allowed_feature_mask(struct smu_context *smu)694{695/*696* With SCPM enabled, the allowed featuremasks setting(via697* PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.698* That means there is no way to let PMFW knows the settings below.699* Thus, we just assume all the features are allowed under700* such scenario.701*/702if (smu->adev->scpm_enabled) {703smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);704return 0;705}706707smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);708709return smu_init_allowed_features(smu);710}711712static int smu_set_funcs(struct amdgpu_device *adev)713{714struct smu_context *smu = adev->powerplay.pp_handle;715716if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)717smu->od_enabled = true;718719switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {720case IP_VERSION(11, 0, 0):721case IP_VERSION(11, 0, 5):722case IP_VERSION(11, 0, 9):723navi10_set_ppt_funcs(smu);724break;725case IP_VERSION(11, 0, 7):726case IP_VERSION(11, 0, 11):727case IP_VERSION(11, 0, 12):728case IP_VERSION(11, 0, 13):729sienna_cichlid_set_ppt_funcs(smu);730break;731case IP_VERSION(12, 0, 0):732case IP_VERSION(12, 0, 1):733renoir_set_ppt_funcs(smu);734break;735case IP_VERSION(11, 5, 0):736case IP_VERSION(11, 5, 2):737vangogh_set_ppt_funcs(smu);738break;739case IP_VERSION(13, 0, 1):740case IP_VERSION(13, 0, 3):741case IP_VERSION(13, 0, 8):742yellow_carp_set_ppt_funcs(smu);743break;744case IP_VERSION(13, 0, 4):745case IP_VERSION(13, 0, 11):746smu_v13_0_4_set_ppt_funcs(smu);747break;748case IP_VERSION(13, 0, 5):749smu_v13_0_5_set_ppt_funcs(smu);750break;751case IP_VERSION(11, 0, 8):752cyan_skillfish_set_ppt_funcs(smu);753break;754case IP_VERSION(11, 0, 2):755adev->pm.pp_feature &= ~PP_GFXOFF_MASK;756arcturus_set_ppt_funcs(smu);757/* OD is not supported on Arcturus */758smu->od_enabled = false;759break;760case IP_VERSION(13, 0, 2):761aldebaran_set_ppt_funcs(smu);762/* Enable pp_od_clk_voltage node */763smu->od_enabled = true;764break;765case IP_VERSION(13, 0, 0):766case IP_VERSION(13, 0, 10):767smu_v13_0_0_set_ppt_funcs(smu);768break;769case IP_VERSION(13, 0, 6):770case IP_VERSION(13, 0, 14):771case IP_VERSION(13, 0, 12):772smu_v13_0_6_set_ppt_funcs(smu);773/* Enable pp_od_clk_voltage node */774smu->od_enabled = true;775break;776case IP_VERSION(13, 0, 7):777smu_v13_0_7_set_ppt_funcs(smu);778break;779case IP_VERSION(14, 0, 0):780case IP_VERSION(14, 0, 1):781case IP_VERSION(14, 0, 4):782case IP_VERSION(14, 0, 5):783smu_v14_0_0_set_ppt_funcs(smu);784break;785case IP_VERSION(14, 0, 2):786case IP_VERSION(14, 0, 3):787smu_v14_0_2_set_ppt_funcs(smu);788break;789case IP_VERSION(15, 0, 0):790smu_v15_0_0_set_ppt_funcs(smu);791break;792default:793return -EINVAL;794}795796return 0;797}798799static int smu_early_init(struct amdgpu_ip_block *ip_block)800{801struct amdgpu_device *adev = ip_block->adev;802struct smu_context *smu;803int r;804805smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);806if (!smu)807return -ENOMEM;808809smu->adev = adev;810smu->pm_enabled = !!amdgpu_dpm;811smu->is_apu = false;812smu->smu_baco.state = SMU_BACO_STATE_NONE;813smu->smu_baco.platform_support = false;814smu->smu_baco.maco_support = false;815smu->user_dpm_profile.fan_mode = -1;816smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;817818adev->powerplay.pp_handle = smu;819adev->powerplay.pp_funcs = &swsmu_pm_funcs;820821r = smu_set_funcs(adev);822if (r)823return r;824return smu_init_microcode(smu);825}826827static int smu_set_default_dpm_table(struct smu_context *smu)828{829struct amdgpu_device *adev = smu->adev;830struct smu_power_context *smu_power = &smu->smu_power;831struct smu_power_gate *power_gate = &smu_power->power_gate;832int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;833int ret = 0;834835if (!smu->ppt_funcs->set_default_dpm_table)836return 0;837838if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {839for (i = 0; i < adev->vcn.num_vcn_inst; i++)840vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);841}842if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)843jpeg_gate = atomic_read(&power_gate->jpeg_gated);844845if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {846for (i = 0; i < adev->vcn.num_vcn_inst; i++) {847ret = smu_dpm_set_vcn_enable(smu, true, i);848if (ret)849return ret;850}851}852853if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {854ret = smu_dpm_set_jpeg_enable(smu, true);855if (ret)856goto err_out;857}858859ret = smu->ppt_funcs->set_default_dpm_table(smu);860if (ret)861dev_err(smu->adev->dev,862"Failed to setup default dpm clock tables!\n");863864if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)865smu_dpm_set_jpeg_enable(smu, !jpeg_gate);866err_out:867if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {868for (i = 0; i < adev->vcn.num_vcn_inst; i++)869smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);870}871872return ret;873}874875static int smu_apply_default_config_table_settings(struct smu_context *smu)876{877struct amdgpu_device *adev = smu->adev;878int ret = 0;879880ret = smu_get_default_config_table_settings(smu,881&adev->pm.config_table);882if (ret)883return ret;884885return smu_set_config_table(smu, &adev->pm.config_table);886}887888static int smu_late_init(struct amdgpu_ip_block *ip_block)889{890struct amdgpu_device *adev = ip_block->adev;891struct smu_context *smu = adev->powerplay.pp_handle;892int ret = 0;893894smu_set_fine_grain_gfx_freq_parameters(smu);895896if (!smu->pm_enabled)897return 0;898899ret = smu_post_init(smu);900if (ret) {901dev_err(adev->dev, "Failed to post smu init!\n");902return ret;903}904905/*906* Explicitly notify PMFW the power mode the system in. Since907* the PMFW may boot the ASIC with a different mode.908* For those supporting ACDC switch via gpio, PMFW will909* handle the switch automatically. Driver involvement910* is unnecessary.911*/912adev->pm.ac_power = power_supply_is_system_supplied() > 0;913smu_set_ac_dc(smu);914915if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||916(amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))917return 0;918919if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {920ret = smu_set_default_od_settings(smu);921if (ret) {922dev_err(adev->dev, "Failed to setup default OD settings!\n");923return ret;924}925}926927ret = smu_populate_umd_state_clk(smu);928if (ret) {929dev_err(adev->dev, "Failed to populate UMD state clocks!\n");930return ret;931}932933ret = smu_get_asic_power_limits(smu,934&smu->current_power_limit,935&smu->default_power_limit,936&smu->max_power_limit,937&smu->min_power_limit);938if (ret) {939dev_err(adev->dev, "Failed to get asic power limits!\n");940return ret;941}942943if (!amdgpu_sriov_vf(adev))944smu_get_unique_id(smu);945946smu_get_fan_parameters(smu);947948smu_handle_task(smu,949smu->smu_dpm.dpm_level,950AMD_PP_TASK_COMPLETE_INIT);951952ret = smu_apply_default_config_table_settings(smu);953if (ret && (ret != -EOPNOTSUPP)) {954dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");955return ret;956}957958smu_restore_dpm_user_profile(smu);959960return 0;961}962963static int smu_init_fb_allocations(struct smu_context *smu)964{965struct amdgpu_device *adev = smu->adev;966struct smu_table_context *smu_table = &smu->smu_table;967struct smu_table *tables = smu_table->tables;968struct smu_table *driver_table = &(smu_table->driver_table);969uint32_t max_table_size = 0;970int ret, i;971972/* VRAM allocation for tool table */973if (tables[SMU_TABLE_PMSTATUSLOG].size) {974ret = amdgpu_bo_create_kernel(adev,975tables[SMU_TABLE_PMSTATUSLOG].size,976tables[SMU_TABLE_PMSTATUSLOG].align,977tables[SMU_TABLE_PMSTATUSLOG].domain,978&tables[SMU_TABLE_PMSTATUSLOG].bo,979&tables[SMU_TABLE_PMSTATUSLOG].mc_address,980&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);981if (ret) {982dev_err(adev->dev, "VRAM allocation for tool table failed!\n");983return ret;984}985}986987driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;988/* VRAM allocation for driver table */989for (i = 0; i < SMU_TABLE_COUNT; i++) {990if (tables[i].size == 0)991continue;992993/* If one of the tables has VRAM domain restriction, keep it in994* VRAM995*/996if ((tables[i].domain &997(AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==998AMDGPU_GEM_DOMAIN_VRAM)999driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;10001001if (i == SMU_TABLE_PMSTATUSLOG)1002continue;10031004if (max_table_size < tables[i].size)1005max_table_size = tables[i].size;1006}10071008driver_table->size = max_table_size;1009driver_table->align = PAGE_SIZE;10101011ret = amdgpu_bo_create_kernel(adev,1012driver_table->size,1013driver_table->align,1014driver_table->domain,1015&driver_table->bo,1016&driver_table->mc_address,1017&driver_table->cpu_addr);1018if (ret) {1019dev_err(adev->dev, "VRAM allocation for driver table failed!\n");1020if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)1021amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,1022&tables[SMU_TABLE_PMSTATUSLOG].mc_address,1023&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);1024}10251026return ret;1027}10281029static int smu_fini_fb_allocations(struct smu_context *smu)1030{1031struct smu_table_context *smu_table = &smu->smu_table;1032struct smu_table *tables = smu_table->tables;1033struct smu_table *driver_table = &(smu_table->driver_table);10341035if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)1036amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,1037&tables[SMU_TABLE_PMSTATUSLOG].mc_address,1038&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);10391040amdgpu_bo_free_kernel(&driver_table->bo,1041&driver_table->mc_address,1042&driver_table->cpu_addr);10431044return 0;1045}10461047static void smu_update_gpu_addresses(struct smu_context *smu)1048{1049struct smu_table_context *smu_table = &smu->smu_table;1050struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;1051struct smu_table *driver_table = &(smu_table->driver_table);1052struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;10531054if (pm_status_table->bo)1055pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);1056if (driver_table->bo)1057driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);1058if (dummy_read_1_table->bo)1059dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);1060}10611062/**1063* smu_alloc_memory_pool - allocate memory pool in the system memory1064*1065* @smu: amdgpu_device pointer1066*1067* This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr1068* and DramLogSetDramAddr can notify it changed.1069*1070* Returns 0 on success, error on failure.1071*/1072static int smu_alloc_memory_pool(struct smu_context *smu)1073{1074struct amdgpu_device *adev = smu->adev;1075struct smu_table_context *smu_table = &smu->smu_table;1076struct smu_table *memory_pool = &smu_table->memory_pool;1077uint64_t pool_size = smu->pool_size;1078int ret = 0;10791080if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)1081return ret;10821083memory_pool->size = pool_size;1084memory_pool->align = PAGE_SIZE;1085memory_pool->domain =1086(adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ?1087AMDGPU_GEM_DOMAIN_VRAM :1088AMDGPU_GEM_DOMAIN_GTT;10891090switch (pool_size) {1091case SMU_MEMORY_POOL_SIZE_256_MB:1092case SMU_MEMORY_POOL_SIZE_512_MB:1093case SMU_MEMORY_POOL_SIZE_1_GB:1094case SMU_MEMORY_POOL_SIZE_2_GB:1095ret = amdgpu_bo_create_kernel(adev,1096memory_pool->size,1097memory_pool->align,1098memory_pool->domain,1099&memory_pool->bo,1100&memory_pool->mc_address,1101&memory_pool->cpu_addr);1102if (ret)1103dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");1104break;1105default:1106break;1107}11081109return ret;1110}11111112static int smu_free_memory_pool(struct smu_context *smu)1113{1114struct smu_table_context *smu_table = &smu->smu_table;1115struct smu_table *memory_pool = &smu_table->memory_pool;11161117if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)1118return 0;11191120amdgpu_bo_free_kernel(&memory_pool->bo,1121&memory_pool->mc_address,1122&memory_pool->cpu_addr);11231124memset(memory_pool, 0, sizeof(struct smu_table));11251126return 0;1127}11281129static int smu_alloc_dummy_read_table(struct smu_context *smu)1130{1131struct smu_table_context *smu_table = &smu->smu_table;1132struct smu_table *dummy_read_1_table =1133&smu_table->dummy_read_1_table;1134struct amdgpu_device *adev = smu->adev;1135int ret = 0;11361137if (!dummy_read_1_table->size)1138return 0;11391140ret = amdgpu_bo_create_kernel(adev,1141dummy_read_1_table->size,1142dummy_read_1_table->align,1143dummy_read_1_table->domain,1144&dummy_read_1_table->bo,1145&dummy_read_1_table->mc_address,1146&dummy_read_1_table->cpu_addr);1147if (ret)1148dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");11491150return ret;1151}11521153static void smu_free_dummy_read_table(struct smu_context *smu)1154{1155struct smu_table_context *smu_table = &smu->smu_table;1156struct smu_table *dummy_read_1_table =1157&smu_table->dummy_read_1_table;115811591160amdgpu_bo_free_kernel(&dummy_read_1_table->bo,1161&dummy_read_1_table->mc_address,1162&dummy_read_1_table->cpu_addr);11631164memset(dummy_read_1_table, 0, sizeof(struct smu_table));1165}11661167static int smu_smc_table_sw_init(struct smu_context *smu)1168{1169int ret;11701171/**1172* Create smu_table structure, and init smc tables such as1173* TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.1174*/1175ret = smu_init_smc_tables(smu);1176if (ret) {1177dev_err(smu->adev->dev, "Failed to init smc tables!\n");1178return ret;1179}11801181/**1182* Create smu_power_context structure, and allocate smu_dpm_context and1183* context size to fill the smu_power_context data.1184*/1185ret = smu_init_power(smu);1186if (ret) {1187dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");1188return ret;1189}11901191/*1192* allocate vram bos to store smc table contents.1193*/1194ret = smu_init_fb_allocations(smu);1195if (ret)1196return ret;11971198ret = smu_alloc_memory_pool(smu);1199if (ret)1200return ret;12011202ret = smu_alloc_dummy_read_table(smu);1203if (ret)1204return ret;12051206ret = smu_i2c_init(smu);1207if (ret)1208return ret;12091210return 0;1211}12121213static int smu_smc_table_sw_fini(struct smu_context *smu)1214{1215int ret;12161217smu_i2c_fini(smu);12181219smu_free_dummy_read_table(smu);12201221ret = smu_free_memory_pool(smu);1222if (ret)1223return ret;12241225ret = smu_fini_fb_allocations(smu);1226if (ret)1227return ret;12281229ret = smu_fini_power(smu);1230if (ret) {1231dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");1232return ret;1233}12341235ret = smu_fini_smc_tables(smu);1236if (ret) {1237dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");1238return ret;1239}12401241return 0;1242}12431244static void smu_throttling_logging_work_fn(struct work_struct *work)1245{1246struct smu_context *smu = container_of(work, struct smu_context,1247throttling_logging_work);12481249smu_log_thermal_throttling(smu);1250}12511252static void smu_interrupt_work_fn(struct work_struct *work)1253{1254struct smu_context *smu = container_of(work, struct smu_context,1255interrupt_work);12561257if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)1258smu->ppt_funcs->interrupt_work(smu);1259}12601261static void smu_swctf_delayed_work_handler(struct work_struct *work)1262{1263struct smu_context *smu =1264container_of(work, struct smu_context, swctf_delayed_work.work);1265struct smu_temperature_range *range =1266&smu->thermal_range;1267struct amdgpu_device *adev = smu->adev;1268uint32_t hotspot_tmp, size;12691270/*1271* If the hotspot temperature is confirmed as below SW CTF setting point1272* after the delay enforced, nothing will be done.1273* Otherwise, a graceful shutdown will be performed to prevent further damage.1274*/1275if (range->software_shutdown_temp &&1276smu->ppt_funcs->read_sensor &&1277!smu->ppt_funcs->read_sensor(smu,1278AMDGPU_PP_SENSOR_HOTSPOT_TEMP,1279&hotspot_tmp,1280&size) &&1281hotspot_tmp / 1000 < range->software_shutdown_temp)1282return;12831284dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");1285dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");1286orderly_poweroff(true);1287}12881289static void smu_init_xgmi_plpd_mode(struct smu_context *smu)1290{1291struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);1292struct smu_dpm_policy_ctxt *policy_ctxt;1293struct smu_dpm_policy *policy;12941295policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);1296if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {1297if (policy)1298policy->current_level = XGMI_PLPD_DEFAULT;1299return;1300}13011302/* PMFW put PLPD into default policy after enabling the feature */1303if (smu_feature_is_enabled(smu,1304SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {1305if (policy)1306policy->current_level = XGMI_PLPD_DEFAULT;1307} else {1308policy_ctxt = dpm_ctxt->dpm_policies;1309if (policy_ctxt)1310policy_ctxt->policy_mask &=1311~BIT(PP_PM_POLICY_XGMI_PLPD);1312}1313}13141315static void smu_init_power_profile(struct smu_context *smu)1316{1317if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN)1318smu->power_profile_mode =1319PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;1320smu_power_profile_mode_get(smu, smu->power_profile_mode);1321}13221323void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id)1324{1325struct smu_feature_cap *fea_cap = &smu->fea_cap;13261327if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)1328return;13291330set_bit(fea_id, fea_cap->cap_map);1331}13321333bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id)1334{1335struct smu_feature_cap *fea_cap = &smu->fea_cap;13361337if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)1338return false;13391340return test_bit(fea_id, fea_cap->cap_map);1341}13421343static void smu_feature_cap_init(struct smu_context *smu)1344{1345struct smu_feature_cap *fea_cap = &smu->fea_cap;13461347bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT);1348}13491350static int smu_sw_init(struct amdgpu_ip_block *ip_block)1351{1352struct amdgpu_device *adev = ip_block->adev;1353struct smu_context *smu = adev->powerplay.pp_handle;1354int i, ret;13551356smu->pool_size = adev->pm.smu_prv_buffer_size;1357smu_feature_init(smu, SMU_FEATURE_MAX);13581359INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);1360INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);1361atomic64_set(&smu->throttle_int_counter, 0);1362smu->watermarks_bitmap = 0;13631364for (i = 0; i < adev->vcn.num_vcn_inst; i++)1365atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);1366atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);1367atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);1368atomic_set(&smu->smu_power.power_gate.isp_gated, 1);1369atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);13701371smu_init_power_profile(smu);1372smu->display_config = &adev->pm.pm_display_cfg;13731374smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;1375smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;13761377INIT_DELAYED_WORK(&smu->swctf_delayed_work,1378smu_swctf_delayed_work_handler);13791380smu_feature_cap_init(smu);13811382ret = smu_smc_table_sw_init(smu);1383if (ret) {1384dev_err(adev->dev, "Failed to sw init smc table!\n");1385return ret;1386}13871388/* get boot_values from vbios to set revision, gfxclk, and etc. */1389ret = smu_get_vbios_bootup_values(smu);1390if (ret) {1391dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");1392return ret;1393}13941395ret = smu_init_pptable_microcode(smu);1396if (ret) {1397dev_err(adev->dev, "Failed to setup pptable firmware!\n");1398return ret;1399}14001401ret = smu_register_irq_handler(smu);1402if (ret) {1403dev_err(adev->dev, "Failed to register smc irq handler!\n");1404return ret;1405}14061407/* If there is no way to query fan control mode, fan control is not supported */1408if (!smu->ppt_funcs->get_fan_control_mode)1409smu->adev->pm.no_fan = true;14101411return 0;1412}14131414static int smu_sw_fini(struct amdgpu_ip_block *ip_block)1415{1416struct amdgpu_device *adev = ip_block->adev;1417struct smu_context *smu = adev->powerplay.pp_handle;1418int ret;14191420ret = smu_smc_table_sw_fini(smu);1421if (ret) {1422dev_err(adev->dev, "Failed to sw fini smc table!\n");1423return ret;1424}14251426if (smu->custom_profile_params) {1427kfree(smu->custom_profile_params);1428smu->custom_profile_params = NULL;1429}14301431smu_fini_microcode(smu);14321433return 0;1434}14351436static int smu_get_thermal_temperature_range(struct smu_context *smu)1437{1438struct amdgpu_device *adev = smu->adev;1439struct smu_temperature_range *range =1440&smu->thermal_range;1441int ret = 0;14421443if (!smu->ppt_funcs->get_thermal_temperature_range)1444return 0;14451446ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);1447if (ret)1448return ret;14491450adev->pm.dpm.thermal.min_temp = range->min;1451adev->pm.dpm.thermal.max_temp = range->max;1452adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;1453adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;1454adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;1455adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;1456adev->pm.dpm.thermal.min_mem_temp = range->mem_min;1457adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;1458adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;14591460return ret;1461}14621463/**1464* smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges1465*1466* @smu: smu_context pointer1467*1468* Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.1469* Returns 0 on success, error on failure.1470*/1471static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)1472{1473struct wbrf_ranges_in_out wbrf_exclusion = {0};1474struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;1475struct amdgpu_device *adev = smu->adev;1476uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;1477uint64_t start, end;1478int ret, i, j;14791480ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);1481if (ret) {1482dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");1483return ret;1484}14851486/*1487* The exclusion ranges array we got might be filled with holes and duplicate1488* entries. For example:1489* {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}1490* We need to do some sortups to eliminate those holes and duplicate entries.1491* Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}1492*/1493for (i = 0; i < num_of_wbrf_ranges; i++) {1494start = wifi_bands[i].start;1495end = wifi_bands[i].end;14961497/* get the last valid entry to fill the intermediate hole */1498if (!start && !end) {1499for (j = num_of_wbrf_ranges - 1; j > i; j--)1500if (wifi_bands[j].start && wifi_bands[j].end)1501break;15021503/* no valid entry left */1504if (j <= i)1505break;15061507start = wifi_bands[i].start = wifi_bands[j].start;1508end = wifi_bands[i].end = wifi_bands[j].end;1509wifi_bands[j].start = 0;1510wifi_bands[j].end = 0;1511num_of_wbrf_ranges = j;1512}15131514/* eliminate duplicate entries */1515for (j = i + 1; j < num_of_wbrf_ranges; j++) {1516if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {1517wifi_bands[j].start = 0;1518wifi_bands[j].end = 0;1519}1520}1521}15221523/* Send the sorted wifi_bands to PMFW */1524ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);1525/* Try to set the wifi_bands again */1526if (unlikely(ret == -EBUSY)) {1527mdelay(5);1528ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);1529}15301531return ret;1532}15331534/**1535* smu_wbrf_event_handler - handle notify events1536*1537* @nb: notifier block1538* @action: event type1539* @_arg: event data1540*1541* Calls relevant amdgpu function in response to wbrf event1542* notification from kernel.1543*/1544static int smu_wbrf_event_handler(struct notifier_block *nb,1545unsigned long action, void *_arg)1546{1547struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);15481549switch (action) {1550case WBRF_CHANGED:1551schedule_delayed_work(&smu->wbrf_delayed_work,1552msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));1553break;1554default:1555return NOTIFY_DONE;1556}15571558return NOTIFY_OK;1559}15601561/**1562* smu_wbrf_delayed_work_handler - callback on delayed work timer expired1563*1564* @work: struct work_struct pointer1565*1566* Flood is over and driver will consume the latest exclusion ranges.1567*/1568static void smu_wbrf_delayed_work_handler(struct work_struct *work)1569{1570struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);15711572smu_wbrf_handle_exclusion_ranges(smu);1573}15741575/**1576* smu_wbrf_support_check - check wbrf support1577*1578* @smu: smu_context pointer1579*1580* Verifies the ACPI interface whether wbrf is supported.1581*/1582static void smu_wbrf_support_check(struct smu_context *smu)1583{1584struct amdgpu_device *adev = smu->adev;15851586smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&1587acpi_amd_wbrf_supported_consumer(adev->dev);15881589if (smu->wbrf_supported)1590dev_info(adev->dev, "RF interference mitigation is supported\n");1591}15921593/**1594* smu_wbrf_init - init driver wbrf support1595*1596* @smu: smu_context pointer1597*1598* Verifies the AMD ACPI interfaces and registers with the wbrf1599* notifier chain if wbrf feature is supported.1600* Returns 0 on success, error on failure.1601*/1602static int smu_wbrf_init(struct smu_context *smu)1603{1604int ret;16051606if (!smu->wbrf_supported)1607return 0;16081609INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);16101611smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;1612ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);1613if (ret)1614return ret;16151616/*1617* Some wifiband exclusion ranges may be already there1618* before our driver loaded. To make sure our driver1619* is awared of those exclusion ranges.1620*/1621schedule_delayed_work(&smu->wbrf_delayed_work,1622msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));16231624return 0;1625}16261627/**1628* smu_wbrf_fini - tear down driver wbrf support1629*1630* @smu: smu_context pointer1631*1632* Unregisters with the wbrf notifier chain.1633*/1634static void smu_wbrf_fini(struct smu_context *smu)1635{1636if (!smu->wbrf_supported)1637return;16381639amd_wbrf_unregister_notifier(&smu->wbrf_notifier);16401641cancel_delayed_work_sync(&smu->wbrf_delayed_work);1642}16431644static int smu_smc_hw_setup(struct smu_context *smu)1645{1646struct amdgpu_device *adev = smu->adev;1647uint8_t pcie_gen = 0, pcie_width = 0;1648uint64_t features_supported;1649int ret = 0;16501651switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {1652case IP_VERSION(11, 0, 7):1653case IP_VERSION(11, 0, 11):1654case IP_VERSION(11, 5, 0):1655case IP_VERSION(11, 5, 2):1656case IP_VERSION(11, 0, 12):1657if (adev->in_suspend && smu_is_dpm_running(smu)) {1658dev_info(adev->dev, "dpm has been enabled\n");1659ret = smu_system_features_control(smu, true);1660if (ret) {1661dev_err(adev->dev, "Failed system features control!\n");1662return ret;1663}16641665return smu_enable_thermal_alert(smu);1666}1667break;1668default:1669break;1670}16711672ret = smu_init_display_count(smu, 0);1673if (ret) {1674dev_info(adev->dev, "Failed to pre-set display count as 0!\n");1675return ret;1676}16771678ret = smu_set_driver_table_location(smu);1679if (ret) {1680dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");1681return ret;1682}16831684/*1685* Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.1686*/1687ret = smu_set_tool_table_location(smu);1688if (ret) {1689dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");1690return ret;1691}16921693/*1694* Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify1695* pool location.1696*/1697ret = smu_notify_memory_pool_location(smu);1698if (ret) {1699dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");1700return ret;1701}17021703/*1704* It is assumed the pptable used before runpm is same as1705* the one used afterwards. Thus, we can reuse the stored1706* copy and do not need to resetup the pptable again.1707*/1708if (!adev->in_runpm) {1709ret = smu_setup_pptable(smu);1710if (ret) {1711dev_err(adev->dev, "Failed to setup pptable!\n");1712return ret;1713}1714}17151716/* smu_dump_pptable(smu); */17171718/*1719* With SCPM enabled, PSP is responsible for the PPTable transferring1720* (to SMU). Driver involvement is not needed and permitted.1721*/1722if (!adev->scpm_enabled) {1723/*1724* Copy pptable bo in the vram to smc with SMU MSGs such as1725* SetDriverDramAddr and TransferTableDram2Smu.1726*/1727ret = smu_write_pptable(smu);1728if (ret) {1729dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");1730return ret;1731}1732}17331734/* issue Run*Btc msg */1735ret = smu_run_btc(smu);1736if (ret)1737return ret;17381739/* Enable UclkShadow on wbrf supported */1740if (smu->wbrf_supported) {1741ret = smu_enable_uclk_shadow(smu, true);1742if (ret) {1743dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");1744return ret;1745}1746}17471748/*1749* With SCPM enabled, these actions(and relevant messages) are1750* not needed and permitted.1751*/1752if (!adev->scpm_enabled) {1753ret = smu_feature_set_allowed_mask(smu);1754if (ret) {1755dev_err(adev->dev, "Failed to set driver allowed features mask!\n");1756return ret;1757}1758}17591760if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)1761pcie_gen = 4;1762else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)1763pcie_gen = 3;1764else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)1765pcie_gen = 2;1766else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)1767pcie_gen = 1;1768else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)1769pcie_gen = 0;17701771/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM11772* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN41773* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x321774*/1775if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)1776pcie_width = 7;1777else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)1778pcie_width = 6;1779else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)1780pcie_width = 5;1781else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)1782pcie_width = 4;1783else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)1784pcie_width = 3;1785else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)1786pcie_width = 2;1787else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)1788pcie_width = 1;1789ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);1790if (ret) {1791dev_err(adev->dev, "Attempt to override pcie params failed!\n");1792return ret;1793}17941795ret = smu_system_features_control(smu, true);1796if (ret) {1797dev_err(adev->dev, "Failed to enable requested dpm features!\n");1798return ret;1799}18001801smu_init_xgmi_plpd_mode(smu);18021803ret = smu_feature_get_enabled_mask(smu, &features_supported);1804if (ret) {1805dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");1806return ret;1807}1808smu_feature_list_set_bits(smu, SMU_FEATURE_LIST_SUPPORTED,1809(unsigned long *)&features_supported);18101811if (!smu_is_dpm_running(smu))1812dev_info(adev->dev, "dpm has been disabled\n");18131814/*1815* Set initialized values (get from vbios) to dpm tables context such as1816* gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each1817* type of clks.1818*/1819ret = smu_set_default_dpm_table(smu);1820if (ret) {1821dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");1822return ret;1823}18241825ret = smu_get_thermal_temperature_range(smu);1826if (ret) {1827dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");1828return ret;1829}18301831ret = smu_enable_thermal_alert(smu);1832if (ret) {1833dev_err(adev->dev, "Failed to enable thermal alert!\n");1834return ret;1835}18361837ret = smu_notify_display_change(smu);1838if (ret) {1839dev_err(adev->dev, "Failed to notify display change!\n");1840return ret;1841}18421843/*1844* Set min deep sleep dce fclk with bootup value from vbios via1845* SetMinDeepSleepDcefclk MSG.1846*/1847ret = smu_set_min_dcef_deep_sleep(smu,1848smu->smu_table.boot_values.dcefclk / 100);1849if (ret) {1850dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");1851return ret;1852}18531854/* Init wbrf support. Properly setup the notifier */1855ret = smu_wbrf_init(smu);1856if (ret)1857dev_err(adev->dev, "Error during wbrf init call\n");18581859return ret;1860}18611862static int smu_start_smc_engine(struct smu_context *smu)1863{1864struct amdgpu_device *adev = smu->adev;1865int ret = 0;18661867if (amdgpu_virt_xgmi_migrate_enabled(adev))1868smu_update_gpu_addresses(smu);18691870smu->smc_fw_state = SMU_FW_INIT;18711872if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {1873if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {1874if (smu->ppt_funcs->load_microcode) {1875ret = smu->ppt_funcs->load_microcode(smu);1876if (ret)1877return ret;1878}1879}1880}18811882if (smu->ppt_funcs->check_fw_status) {1883ret = smu->ppt_funcs->check_fw_status(smu);1884if (ret) {1885dev_err(adev->dev, "SMC is not ready\n");1886return ret;1887}1888}18891890/*1891* Send msg GetDriverIfVersion to check if the return value is equal1892* with DRIVER_IF_VERSION of smc header.1893*/1894ret = smu_check_fw_version(smu);1895if (ret)1896return ret;18971898return ret;1899}19001901static int smu_hw_init(struct amdgpu_ip_block *ip_block)1902{1903int i, ret;1904struct amdgpu_device *adev = ip_block->adev;1905struct smu_context *smu = adev->powerplay.pp_handle;19061907if (amdgpu_sriov_multi_vf_mode(adev)) {1908smu->pm_enabled = false;1909return 0;1910}19111912ret = smu_start_smc_engine(smu);1913if (ret) {1914dev_err(adev->dev, "SMC engine is not correctly up!\n");1915return ret;1916}19171918/*1919* Check whether wbrf is supported. This needs to be done1920* before SMU setup starts since part of SMU configuration1921* relies on this.1922*/1923smu_wbrf_support_check(smu);19241925if (smu->is_apu) {1926ret = smu_set_gfx_imu_enable(smu);1927if (ret)1928return ret;1929for (i = 0; i < adev->vcn.num_vcn_inst; i++)1930smu_dpm_set_vcn_enable(smu, true, i);1931smu_dpm_set_jpeg_enable(smu, true);1932smu_dpm_set_umsch_mm_enable(smu, true);1933smu_set_mall_enable(smu);1934smu_set_gfx_cgpg(smu, true);1935}19361937if (!smu->pm_enabled)1938return 0;19391940ret = smu_init_driver_allowed_feature_mask(smu);1941if (ret)1942return ret;19431944ret = smu_smc_hw_setup(smu);1945if (ret) {1946dev_err(adev->dev, "Failed to setup smc hw!\n");1947return ret;1948}19491950/*1951* Move maximum sustainable clock retrieving here considering1952* 1. It is not needed on resume(from S3).1953* 2. DAL settings come between .hw_init and .late_init of SMU.1954* And DAL needs to know the maximum sustainable clocks. Thus1955* it cannot be put in .late_init().1956*/1957ret = smu_init_max_sustainable_clocks(smu);1958if (ret) {1959dev_err(adev->dev, "Failed to init max sustainable clocks!\n");1960return ret;1961}19621963adev->pm.dpm_enabled = true;19641965dev_info(adev->dev, "SMU is initialized successfully!\n");19661967return 0;1968}19691970static int smu_disable_dpms(struct smu_context *smu)1971{1972struct amdgpu_device *adev = smu->adev;1973int ret = 0;1974bool use_baco = !smu->is_apu &&1975((amdgpu_in_reset(adev) &&1976(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||1977((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));19781979/*1980* For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)1981* properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.1982*/1983switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {1984case IP_VERSION(13, 0, 0):1985case IP_VERSION(13, 0, 7):1986case IP_VERSION(13, 0, 10):1987case IP_VERSION(14, 0, 2):1988case IP_VERSION(14, 0, 3):1989return 0;1990default:1991break;1992}19931994/*1995* For custom pptable uploading, skip the DPM features1996* disable process on Navi1x ASICs.1997* - As the gfx related features are under control of1998* RLC on those ASICs. RLC reinitialization will be1999* needed to reenable them. That will cost much more2000* efforts.2001*2002* - SMU firmware can handle the DPM reenablement2003* properly.2004*/2005if (smu->uploading_custom_pp_table) {2006switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {2007case IP_VERSION(11, 0, 0):2008case IP_VERSION(11, 0, 5):2009case IP_VERSION(11, 0, 9):2010case IP_VERSION(11, 0, 7):2011case IP_VERSION(11, 0, 11):2012case IP_VERSION(11, 5, 0):2013case IP_VERSION(11, 5, 2):2014case IP_VERSION(11, 0, 12):2015case IP_VERSION(11, 0, 13):2016return 0;2017default:2018break;2019}2020}20212022/*2023* For Sienna_Cichlid, PMFW will handle the features disablement properly2024* on BACO in. Driver involvement is unnecessary.2025*/2026if (use_baco) {2027switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {2028case IP_VERSION(11, 0, 7):2029case IP_VERSION(11, 0, 0):2030case IP_VERSION(11, 0, 5):2031case IP_VERSION(11, 0, 9):2032case IP_VERSION(13, 0, 7):2033return 0;2034default:2035break;2036}2037}20382039/*2040* For GFX11 and subsequent APUs, PMFW will handle the features disablement properly2041* for gpu reset and S0i3 cases. Driver involvement is unnecessary.2042*/2043if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&2044smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))2045return 0;20462047/* vangogh s0ix */2048if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||2049amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) &&2050adev->in_s0ix)2051return 0;20522053/*2054* For gpu reset, runpm and hibernation through BACO,2055* BACO feature has to be kept enabled.2056*/2057if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {2058ret = smu_disable_all_features_with_exception(smu,2059SMU_FEATURE_BACO_BIT);2060if (ret)2061dev_err(adev->dev, "Failed to disable smu features except BACO.\n");2062} else {2063/* DisableAllSmuFeatures message is not permitted with SCPM enabled */2064if (!adev->scpm_enabled) {2065ret = smu_system_features_control(smu, false);2066if (ret)2067dev_err(adev->dev, "Failed to disable smu features.\n");2068}2069}20702071/* Notify SMU RLC is going to be off, stop RLC and SMU interaction.2072* otherwise SMU will hang while interacting with RLC if RLC is halted2073* this is a WA for Vangogh asic which fix the SMU hang issue.2074*/2075ret = smu_notify_rlc_state(smu, false);2076if (ret) {2077dev_err(adev->dev, "Fail to notify rlc status!\n");2078return ret;2079}20802081if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&2082!((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&2083!amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)2084adev->gfx.rlc.funcs->stop(adev);20852086return ret;2087}20882089static int smu_smc_hw_cleanup(struct smu_context *smu)2090{2091struct amdgpu_device *adev = smu->adev;2092int ret = 0;20932094smu_wbrf_fini(smu);20952096cancel_work_sync(&smu->throttling_logging_work);2097cancel_work_sync(&smu->interrupt_work);20982099ret = smu_disable_thermal_alert(smu);2100if (ret) {2101dev_err(adev->dev, "Fail to disable thermal alert!\n");2102return ret;2103}21042105cancel_delayed_work_sync(&smu->swctf_delayed_work);21062107ret = smu_disable_dpms(smu);2108if (ret) {2109dev_err(adev->dev, "Fail to disable dpm features!\n");2110return ret;2111}21122113return 0;2114}21152116static int smu_reset_mp1_state(struct smu_context *smu)2117{2118struct amdgpu_device *adev = smu->adev;2119int ret = 0;21202121if ((!adev->in_runpm) && (!adev->in_suspend) &&2122(!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==2123IP_VERSION(13, 0, 10) &&2124!amdgpu_device_has_display_hardware(adev))2125ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);21262127return ret;2128}21292130static int smu_hw_fini(struct amdgpu_ip_block *ip_block)2131{2132struct amdgpu_device *adev = ip_block->adev;2133struct smu_context *smu = adev->powerplay.pp_handle;2134int i, ret;21352136if (amdgpu_sriov_multi_vf_mode(adev))2137return 0;21382139for (i = 0; i < adev->vcn.num_vcn_inst; i++) {2140smu_dpm_set_vcn_enable(smu, false, i);2141adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE;2142}2143smu_dpm_set_jpeg_enable(smu, false);2144adev->jpeg.cur_state = AMD_PG_STATE_GATE;2145smu_dpm_set_umsch_mm_enable(smu, false);21462147if (!smu->pm_enabled)2148return 0;21492150adev->pm.dpm_enabled = false;21512152ret = smu_smc_hw_cleanup(smu);2153if (ret)2154return ret;21552156ret = smu_reset_mp1_state(smu);2157if (ret)2158return ret;21592160return 0;2161}21622163static void smu_late_fini(struct amdgpu_ip_block *ip_block)2164{2165struct amdgpu_device *adev = ip_block->adev;2166struct smu_context *smu = adev->powerplay.pp_handle;21672168kfree(smu);2169}21702171static int smu_reset(struct smu_context *smu)2172{2173struct amdgpu_device *adev = smu->adev;2174struct amdgpu_ip_block *ip_block;2175int ret;21762177ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);2178if (!ip_block)2179return -EINVAL;21802181ret = smu_hw_fini(ip_block);2182if (ret)2183return ret;21842185ret = smu_hw_init(ip_block);2186if (ret)2187return ret;21882189ret = smu_late_init(ip_block);2190if (ret)2191return ret;21922193return 0;2194}21952196static int smu_suspend(struct amdgpu_ip_block *ip_block)2197{2198struct amdgpu_device *adev = ip_block->adev;2199struct smu_context *smu = adev->powerplay.pp_handle;2200int ret;2201uint64_t count;22022203if (amdgpu_sriov_multi_vf_mode(adev))2204return 0;22052206if (!smu->pm_enabled)2207return 0;22082209adev->pm.dpm_enabled = false;22102211ret = smu_smc_hw_cleanup(smu);2212if (ret)2213return ret;22142215smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);22162217smu_set_gfx_cgpg(smu, false);22182219/*2220* pwfw resets entrycount when device is suspended, so we save the2221* last value to be used when we resume to keep it consistent2222*/2223ret = smu_get_entrycount_gfxoff(smu, &count);2224if (!ret)2225adev->gfx.gfx_off_entrycount = count;22262227/* clear this on suspend so it will get reprogrammed on resume */2228smu->workload_mask = 0;22292230return 0;2231}22322233static int smu_resume(struct amdgpu_ip_block *ip_block)2234{2235int ret;2236struct amdgpu_device *adev = ip_block->adev;2237struct smu_context *smu = adev->powerplay.pp_handle;22382239if (amdgpu_sriov_multi_vf_mode(adev))2240return 0;22412242if (!smu->pm_enabled)2243return 0;22442245dev_info(adev->dev, "SMU is resuming...\n");22462247ret = smu_start_smc_engine(smu);2248if (ret) {2249dev_err(adev->dev, "SMC engine is not correctly up!\n");2250return ret;2251}22522253ret = smu_smc_hw_setup(smu);2254if (ret) {2255dev_err(adev->dev, "Failed to setup smc hw!\n");2256return ret;2257}22582259ret = smu_set_gfx_imu_enable(smu);2260if (ret)2261return ret;22622263smu_set_gfx_cgpg(smu, true);22642265smu->disable_uclk_switch = 0;22662267adev->pm.dpm_enabled = true;22682269dev_info(adev->dev, "SMU is resumed successfully!\n");22702271return 0;2272}22732274static int smu_display_configuration_change(void *handle,2275const struct amd_pp_display_configuration *display_config)2276{2277struct smu_context *smu = handle;22782279if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2280return -EOPNOTSUPP;22812282if (!display_config)2283return -EINVAL;22842285smu_set_min_dcef_deep_sleep(smu,2286display_config->min_dcef_deep_sleep_set_clk / 100);22872288return 0;2289}22902291static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block,2292enum amd_clockgating_state state)2293{2294return 0;2295}22962297static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block,2298enum amd_powergating_state state)2299{2300return 0;2301}23022303static int smu_enable_umd_pstate(void *handle,2304enum amd_dpm_forced_level *level)2305{2306uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |2307AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |2308AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |2309AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;23102311struct smu_context *smu = (struct smu_context*)(handle);2312struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);23132314if (!smu->is_apu && !smu_dpm_ctx->dpm_context)2315return -EINVAL;23162317if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {2318/* enter umd pstate, save current level, disable gfx cg*/2319if (*level & profile_mode_mask) {2320smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;2321smu_gpo_control(smu, false);2322smu_gfx_ulv_control(smu, false);2323smu_deep_sleep_control(smu, false);2324amdgpu_asic_update_umd_stable_pstate(smu->adev, true);2325}2326} else {2327/* exit umd pstate, restore level, enable gfx cg*/2328if (!(*level & profile_mode_mask)) {2329if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)2330*level = smu_dpm_ctx->saved_dpm_level;2331amdgpu_asic_update_umd_stable_pstate(smu->adev, false);2332smu_deep_sleep_control(smu, true);2333smu_gfx_ulv_control(smu, true);2334smu_gpo_control(smu, true);2335}2336}23372338return 0;2339}23402341static int smu_bump_power_profile_mode(struct smu_context *smu,2342long *custom_params,2343u32 custom_params_max_idx)2344{2345u32 workload_mask = 0;2346int i, ret = 0;23472348for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {2349if (smu->workload_refcount[i])2350workload_mask |= 1 << i;2351}23522353if (smu->workload_mask == workload_mask)2354return 0;23552356if (smu->ppt_funcs->set_power_profile_mode)2357ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,2358custom_params,2359custom_params_max_idx);23602361if (!ret)2362smu->workload_mask = workload_mask;23632364return ret;2365}23662367static void smu_power_profile_mode_get(struct smu_context *smu,2368enum PP_SMC_POWER_PROFILE profile_mode)2369{2370smu->workload_refcount[profile_mode]++;2371}23722373static void smu_power_profile_mode_put(struct smu_context *smu,2374enum PP_SMC_POWER_PROFILE profile_mode)2375{2376if (smu->workload_refcount[profile_mode])2377smu->workload_refcount[profile_mode]--;2378}23792380static int smu_adjust_power_state_dynamic(struct smu_context *smu,2381enum amd_dpm_forced_level level,2382bool skip_display_settings)2383{2384int ret = 0;2385struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);23862387if (!skip_display_settings) {2388ret = smu_display_config_changed(smu);2389if (ret) {2390dev_err(smu->adev->dev, "Failed to change display config!");2391return ret;2392}2393}23942395ret = smu_apply_clocks_adjust_rules(smu);2396if (ret) {2397dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");2398return ret;2399}24002401if (!skip_display_settings) {2402ret = smu_notify_smc_display_config(smu);2403if (ret) {2404dev_err(smu->adev->dev, "Failed to notify smc display config!");2405return ret;2406}2407}24082409if (smu_dpm_ctx->dpm_level != level) {2410ret = smu_asic_set_performance_level(smu, level);2411if (ret) {2412if (ret == -EOPNOTSUPP)2413dev_info(smu->adev->dev, "set performance level %d not supported",2414level);2415else2416dev_err(smu->adev->dev, "Failed to set performance level %d",2417level);2418return ret;2419}24202421/* update the saved copy */2422smu_dpm_ctx->dpm_level = level;2423}24242425if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&2426smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)2427smu_bump_power_profile_mode(smu, NULL, 0);24282429return ret;2430}24312432static int smu_handle_task(struct smu_context *smu,2433enum amd_dpm_forced_level level,2434enum amd_pp_task task_id)2435{2436int ret = 0;24372438if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2439return -EOPNOTSUPP;24402441switch (task_id) {2442case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:2443ret = smu_pre_display_config_changed(smu);2444if (ret)2445return ret;2446ret = smu_adjust_power_state_dynamic(smu, level, false);2447break;2448case AMD_PP_TASK_COMPLETE_INIT:2449ret = smu_adjust_power_state_dynamic(smu, level, true);2450break;2451case AMD_PP_TASK_READJUST_POWER_STATE:2452ret = smu_adjust_power_state_dynamic(smu, level, true);2453break;2454default:2455break;2456}24572458return ret;2459}24602461static int smu_handle_dpm_task(void *handle,2462enum amd_pp_task task_id,2463enum amd_pm_state_type *user_state)2464{2465struct smu_context *smu = handle;2466struct smu_dpm_context *smu_dpm = &smu->smu_dpm;24672468return smu_handle_task(smu, smu_dpm->dpm_level, task_id);24692470}24712472static int smu_switch_power_profile(void *handle,2473enum PP_SMC_POWER_PROFILE type,2474bool enable)2475{2476struct smu_context *smu = handle;2477struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);2478int ret;24792480if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2481return -EOPNOTSUPP;24822483if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))2484return -EINVAL;24852486if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&2487smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {2488if (enable)2489smu_power_profile_mode_get(smu, type);2490else2491smu_power_profile_mode_put(smu, type);2492/* don't switch the active workload when paused */2493if (smu->pause_workload)2494ret = 0;2495else2496ret = smu_bump_power_profile_mode(smu, NULL, 0);2497if (ret) {2498if (enable)2499smu_power_profile_mode_put(smu, type);2500else2501smu_power_profile_mode_get(smu, type);2502return ret;2503}2504}25052506return 0;2507}25082509static int smu_pause_power_profile(void *handle,2510bool pause)2511{2512struct smu_context *smu = handle;2513struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);2514u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;2515int ret;25162517if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2518return -EOPNOTSUPP;25192520if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&2521smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {2522smu->pause_workload = pause;25232524/* force to bootup default profile */2525if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)2526ret = smu->ppt_funcs->set_power_profile_mode(smu,2527workload_mask,2528NULL,25290);2530else2531ret = smu_bump_power_profile_mode(smu, NULL, 0);2532return ret;2533}25342535return 0;2536}25372538static enum amd_dpm_forced_level smu_get_performance_level(void *handle)2539{2540struct smu_context *smu = handle;2541struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);25422543if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2544return -EOPNOTSUPP;25452546if (!smu->is_apu && !smu_dpm_ctx->dpm_context)2547return -EINVAL;25482549return smu_dpm_ctx->dpm_level;2550}25512552static int smu_force_performance_level(void *handle,2553enum amd_dpm_forced_level level)2554{2555struct smu_context *smu = handle;2556struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);2557int ret = 0;25582559if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2560return -EOPNOTSUPP;25612562if (!smu->is_apu && !smu_dpm_ctx->dpm_context)2563return -EINVAL;25642565ret = smu_enable_umd_pstate(smu, &level);2566if (ret)2567return ret;25682569ret = smu_handle_task(smu, level,2570AMD_PP_TASK_READJUST_POWER_STATE);25712572/* reset user dpm clock state */2573if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {2574memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));2575smu->user_dpm_profile.clk_dependency = 0;2576}25772578return ret;2579}25802581static int smu_set_display_count(void *handle, uint32_t count)2582{2583struct smu_context *smu = handle;25842585if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2586return -EOPNOTSUPP;25872588return smu_init_display_count(smu, count);2589}25902591static int smu_force_smuclk_levels(struct smu_context *smu,2592enum smu_clk_type clk_type,2593uint32_t mask)2594{2595struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);2596int ret = 0;25972598if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2599return -EOPNOTSUPP;26002601if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {2602dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");2603return -EINVAL;2604}26052606if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {2607ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);2608if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {2609smu->user_dpm_profile.clk_mask[clk_type] = mask;2610smu_set_user_clk_dependencies(smu, clk_type);2611}2612}26132614return ret;2615}26162617static int smu_force_ppclk_levels(void *handle,2618enum pp_clock_type type,2619uint32_t mask)2620{2621struct smu_context *smu = handle;2622enum smu_clk_type clk_type;26232624switch (type) {2625case PP_SCLK:2626clk_type = SMU_SCLK; break;2627case PP_MCLK:2628clk_type = SMU_MCLK; break;2629case PP_PCIE:2630clk_type = SMU_PCIE; break;2631case PP_SOCCLK:2632clk_type = SMU_SOCCLK; break;2633case PP_FCLK:2634clk_type = SMU_FCLK; break;2635case PP_DCEFCLK:2636clk_type = SMU_DCEFCLK; break;2637case PP_VCLK:2638clk_type = SMU_VCLK; break;2639case PP_VCLK1:2640clk_type = SMU_VCLK1; break;2641case PP_DCLK:2642clk_type = SMU_DCLK; break;2643case PP_DCLK1:2644clk_type = SMU_DCLK1; break;2645case OD_SCLK:2646clk_type = SMU_OD_SCLK; break;2647case OD_MCLK:2648clk_type = SMU_OD_MCLK; break;2649case OD_VDDC_CURVE:2650clk_type = SMU_OD_VDDC_CURVE; break;2651case OD_RANGE:2652clk_type = SMU_OD_RANGE; break;2653default:2654return -EINVAL;2655}26562657return smu_force_smuclk_levels(smu, clk_type, mask);2658}26592660/*2661* On system suspending or resetting, the dpm_enabled2662* flag will be cleared. So that those SMU services which2663* are not supported will be gated.2664* However, the mp1 state setting should still be granted2665* even if the dpm_enabled cleared.2666*/2667static int smu_set_mp1_state(void *handle,2668enum pp_mp1_state mp1_state)2669{2670struct smu_context *smu = handle;2671int ret = 0;26722673if (!smu->pm_enabled)2674return -EOPNOTSUPP;26752676if (smu->ppt_funcs &&2677smu->ppt_funcs->set_mp1_state)2678ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);26792680return ret;2681}26822683static int smu_set_df_cstate(void *handle,2684enum pp_df_cstate state)2685{2686struct smu_context *smu = handle;2687int ret = 0;26882689if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2690return -EOPNOTSUPP;26912692if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)2693return 0;26942695ret = smu->ppt_funcs->set_df_cstate(smu, state);2696if (ret)2697dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");26982699return ret;2700}27012702int smu_write_watermarks_table(struct smu_context *smu)2703{2704if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2705return -EOPNOTSUPP;27062707return smu_set_watermarks_table(smu, NULL);2708}27092710static int smu_set_watermarks_for_clock_ranges(void *handle,2711struct pp_smu_wm_range_sets *clock_ranges)2712{2713struct smu_context *smu = handle;27142715if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2716return -EOPNOTSUPP;27172718if (smu->disable_watermark)2719return 0;27202721return smu_set_watermarks_table(smu, clock_ranges);2722}27232724int smu_set_ac_dc(struct smu_context *smu)2725{2726int ret = 0;27272728if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2729return -EOPNOTSUPP;27302731/* controlled by firmware */2732if (smu->dc_controlled_by_gpio)2733return 0;27342735ret = smu_set_power_source(smu,2736smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :2737SMU_POWER_SOURCE_DC);2738if (ret)2739dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",2740smu->adev->pm.ac_power ? "AC" : "DC");27412742return ret;2743}27442745const struct amd_ip_funcs smu_ip_funcs = {2746.name = "smu",2747.early_init = smu_early_init,2748.late_init = smu_late_init,2749.sw_init = smu_sw_init,2750.sw_fini = smu_sw_fini,2751.hw_init = smu_hw_init,2752.hw_fini = smu_hw_fini,2753.late_fini = smu_late_fini,2754.suspend = smu_suspend,2755.resume = smu_resume,2756.is_idle = NULL,2757.check_soft_reset = NULL,2758.wait_for_idle = NULL,2759.soft_reset = NULL,2760.set_clockgating_state = smu_set_clockgating_state,2761.set_powergating_state = smu_set_powergating_state,2762};27632764const struct amdgpu_ip_block_version smu_v11_0_ip_block = {2765.type = AMD_IP_BLOCK_TYPE_SMC,2766.major = 11,2767.minor = 0,2768.rev = 0,2769.funcs = &smu_ip_funcs,2770};27712772const struct amdgpu_ip_block_version smu_v12_0_ip_block = {2773.type = AMD_IP_BLOCK_TYPE_SMC,2774.major = 12,2775.minor = 0,2776.rev = 0,2777.funcs = &smu_ip_funcs,2778};27792780const struct amdgpu_ip_block_version smu_v13_0_ip_block = {2781.type = AMD_IP_BLOCK_TYPE_SMC,2782.major = 13,2783.minor = 0,2784.rev = 0,2785.funcs = &smu_ip_funcs,2786};27872788const struct amdgpu_ip_block_version smu_v14_0_ip_block = {2789.type = AMD_IP_BLOCK_TYPE_SMC,2790.major = 14,2791.minor = 0,2792.rev = 0,2793.funcs = &smu_ip_funcs,2794};27952796const struct amdgpu_ip_block_version smu_v15_0_ip_block = {2797.type = AMD_IP_BLOCK_TYPE_SMC,2798.major = 15,2799.minor = 0,2800.rev = 0,2801.funcs = &smu_ip_funcs,2802};28032804const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle)2805{2806struct smu_context *smu = (struct smu_context *)handle;2807const struct ras_smu_drv *tmp = NULL;2808int ret;28092810ret = smu_get_ras_smu_drv(smu, &tmp);28112812return ret ? NULL : tmp;2813}28142815static int smu_load_microcode(void *handle)2816{2817struct smu_context *smu = handle;2818struct amdgpu_device *adev = smu->adev;2819int ret = 0;28202821if (!smu->pm_enabled)2822return -EOPNOTSUPP;28232824/* This should be used for non PSP loading */2825if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)2826return 0;28272828if (smu->ppt_funcs->load_microcode) {2829ret = smu->ppt_funcs->load_microcode(smu);2830if (ret) {2831dev_err(adev->dev, "Load microcode failed\n");2832return ret;2833}2834}28352836if (smu->ppt_funcs->check_fw_status) {2837ret = smu->ppt_funcs->check_fw_status(smu);2838if (ret) {2839dev_err(adev->dev, "SMC is not ready\n");2840return ret;2841}2842}28432844return ret;2845}28462847static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)2848{2849int ret = 0;28502851if (smu->ppt_funcs->set_gfx_cgpg)2852ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);28532854return ret;2855}28562857static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)2858{2859struct smu_context *smu = handle;2860int ret = 0;28612862if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2863return -EOPNOTSUPP;28642865if (!smu->ppt_funcs->set_fan_speed_rpm)2866return -EOPNOTSUPP;28672868if (speed == U32_MAX)2869return -EINVAL;28702871ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);2872if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {2873smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;2874smu->user_dpm_profile.fan_speed_rpm = speed;28752876/* Override custom PWM setting as they cannot co-exist */2877smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;2878smu->user_dpm_profile.fan_speed_pwm = 0;2879}28802881return ret;2882}28832884/**2885* smu_get_power_limit - Request one of the SMU Power Limits2886*2887* @handle: pointer to smu context2888* @limit: requested limit is written back to this variable2889* @pp_limit_level: &pp_power_limit_level which limit of the power to return2890* @pp_power_type: &pp_power_type type of power2891* Return: 0 on success, <0 on error2892*2893*/2894int smu_get_power_limit(void *handle,2895uint32_t *limit,2896enum pp_power_limit_level pp_limit_level,2897enum pp_power_type pp_power_type)2898{2899struct smu_context *smu = handle;2900struct amdgpu_device *adev = smu->adev;2901enum smu_ppt_limit_level limit_level;2902uint32_t limit_type;2903int ret = 0;29042905if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2906return -EOPNOTSUPP;29072908if (!limit)2909return -EINVAL;29102911switch (pp_power_type) {2912case PP_PWR_TYPE_SUSTAINED:2913limit_type = SMU_DEFAULT_PPT_LIMIT;2914break;2915case PP_PWR_TYPE_FAST:2916limit_type = SMU_FAST_PPT_LIMIT;2917break;2918default:2919return -EOPNOTSUPP;2920}29212922switch (pp_limit_level) {2923case PP_PWR_LIMIT_CURRENT:2924limit_level = SMU_PPT_LIMIT_CURRENT;2925break;2926case PP_PWR_LIMIT_DEFAULT:2927limit_level = SMU_PPT_LIMIT_DEFAULT;2928break;2929case PP_PWR_LIMIT_MAX:2930limit_level = SMU_PPT_LIMIT_MAX;2931break;2932case PP_PWR_LIMIT_MIN:2933limit_level = SMU_PPT_LIMIT_MIN;2934break;2935default:2936return -EOPNOTSUPP;2937}29382939if (limit_type != SMU_DEFAULT_PPT_LIMIT) {2940if (smu->ppt_funcs->get_ppt_limit)2941ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);2942else2943return -EOPNOTSUPP;2944} else {2945switch (limit_level) {2946case SMU_PPT_LIMIT_CURRENT:2947switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {2948case IP_VERSION(13, 0, 2):2949case IP_VERSION(13, 0, 6):2950case IP_VERSION(13, 0, 12):2951case IP_VERSION(13, 0, 14):2952case IP_VERSION(11, 0, 7):2953case IP_VERSION(11, 0, 11):2954case IP_VERSION(11, 0, 12):2955case IP_VERSION(11, 0, 13):2956ret = smu_get_asic_power_limits(smu,2957&smu->current_power_limit,2958NULL, NULL, NULL);2959break;2960default:2961break;2962}2963*limit = smu->current_power_limit;2964break;2965case SMU_PPT_LIMIT_DEFAULT:2966*limit = smu->default_power_limit;2967break;2968case SMU_PPT_LIMIT_MAX:2969*limit = smu->max_power_limit;2970break;2971case SMU_PPT_LIMIT_MIN:2972*limit = smu->min_power_limit;2973break;2974default:2975return -EINVAL;2976}2977}29782979return ret;2980}29812982static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)2983{2984struct smu_context *smu = handle;2985int ret = 0;29862987if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)2988return -EOPNOTSUPP;29892990if (limit_type == SMU_DEFAULT_PPT_LIMIT) {2991if (!limit)2992limit = smu->current_power_limit;2993if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {2994dev_err(smu->adev->dev,2995"New power limit (%d) is out of range [%d,%d]\n",2996limit, smu->min_power_limit, smu->max_power_limit);2997return -EINVAL;2998}2999}30003001if (smu->ppt_funcs->set_power_limit) {3002ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);3003if (ret)3004return ret;3005if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))3006smu->user_dpm_profile.power_limits[limit_type] = limit;3007}30083009return 0;3010}30113012static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)3013{3014enum smu_clk_type clk_type;30153016switch (type) {3017case PP_SCLK:3018clk_type = SMU_SCLK; break;3019case PP_MCLK:3020clk_type = SMU_MCLK; break;3021case PP_PCIE:3022clk_type = SMU_PCIE; break;3023case PP_SOCCLK:3024clk_type = SMU_SOCCLK; break;3025case PP_FCLK:3026clk_type = SMU_FCLK; break;3027case PP_DCEFCLK:3028clk_type = SMU_DCEFCLK; break;3029case PP_VCLK:3030clk_type = SMU_VCLK; break;3031case PP_VCLK1:3032clk_type = SMU_VCLK1; break;3033case PP_DCLK:3034clk_type = SMU_DCLK; break;3035case PP_DCLK1:3036clk_type = SMU_DCLK1; break;3037case PP_ISPICLK:3038clk_type = SMU_ISPICLK;3039break;3040case PP_ISPXCLK:3041clk_type = SMU_ISPXCLK;3042break;3043case OD_SCLK:3044clk_type = SMU_OD_SCLK; break;3045case OD_MCLK:3046clk_type = SMU_OD_MCLK; break;3047case OD_VDDC_CURVE:3048clk_type = SMU_OD_VDDC_CURVE; break;3049case OD_RANGE:3050clk_type = SMU_OD_RANGE; break;3051case OD_VDDGFX_OFFSET:3052clk_type = SMU_OD_VDDGFX_OFFSET; break;3053case OD_CCLK:3054clk_type = SMU_OD_CCLK; break;3055case OD_FAN_CURVE:3056clk_type = SMU_OD_FAN_CURVE; break;3057case OD_ACOUSTIC_LIMIT:3058clk_type = SMU_OD_ACOUSTIC_LIMIT; break;3059case OD_ACOUSTIC_TARGET:3060clk_type = SMU_OD_ACOUSTIC_TARGET; break;3061case OD_FAN_TARGET_TEMPERATURE:3062clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;3063case OD_FAN_MINIMUM_PWM:3064clk_type = SMU_OD_FAN_MINIMUM_PWM; break;3065case OD_FAN_ZERO_RPM_ENABLE:3066clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;3067case OD_FAN_ZERO_RPM_STOP_TEMP:3068clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;3069default:3070clk_type = SMU_CLK_COUNT; break;3071}30723073return clk_type;3074}30753076static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)3077{3078struct smu_context *smu = handle;3079enum smu_clk_type clk_type;30803081clk_type = smu_convert_to_smuclk(type);3082if (clk_type == SMU_CLK_COUNT)3083return -EINVAL;30843085if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3086return -EOPNOTSUPP;30873088if (!smu->ppt_funcs->emit_clk_levels)3089return -ENOENT;30903091return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);30923093}30943095static int smu_od_edit_dpm_table(void *handle,3096enum PP_OD_DPM_TABLE_COMMAND type,3097long *input, uint32_t size)3098{3099struct smu_context *smu = handle;3100int ret = 0;31013102if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3103return -EOPNOTSUPP;31043105if (smu->ppt_funcs->od_edit_dpm_table) {3106ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);3107}31083109return ret;3110}31113112static int smu_read_sensor(void *handle,3113int sensor,3114void *data,3115int *size_arg)3116{3117struct smu_context *smu = handle;3118struct amdgpu_device *adev = smu->adev;3119struct smu_umd_pstate_table *pstate_table =3120&smu->pstate_table;3121int i, ret = 0;3122uint32_t *size, size_val;31233124if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3125return -EOPNOTSUPP;31263127if (!data || !size_arg)3128return -EINVAL;31293130size_val = *size_arg;3131size = &size_val;31323133if (smu->ppt_funcs->read_sensor)3134if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))3135goto unlock;31363137switch (sensor) {3138case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:3139*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;3140*size = 4;3141break;3142case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:3143*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;3144*size = 4;3145break;3146case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:3147*((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;3148*size = 4;3149break;3150case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:3151*((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;3152*size = 4;3153break;3154case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:3155ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);3156*size = 8;3157break;3158case AMDGPU_PP_SENSOR_UVD_POWER:3159*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;3160*size = 4;3161break;3162case AMDGPU_PP_SENSOR_VCE_POWER:3163*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;3164*size = 4;3165break;3166case AMDGPU_PP_SENSOR_VCN_POWER_STATE:3167*(uint32_t *)data = 0;3168for (i = 0; i < adev->vcn.num_vcn_inst; i++) {3169if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {3170*(uint32_t *)data = 1;3171break;3172}3173}3174*size = 4;3175break;3176case AMDGPU_PP_SENSOR_MIN_FAN_RPM:3177*(uint32_t *)data = 0;3178*size = 4;3179break;3180default:3181*size = 0;3182ret = -EOPNOTSUPP;3183break;3184}31853186unlock:3187// assign uint32_t to int3188*size_arg = size_val;31893190return ret;3191}31923193static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)3194{3195int ret = -EOPNOTSUPP;3196struct smu_context *smu = handle;31973198if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)3199ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);32003201return ret;3202}32033204static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)3205{3206int ret = -EOPNOTSUPP;3207struct smu_context *smu = handle;32083209if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)3210ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);32113212return ret;3213}32143215static int smu_get_power_profile_mode(void *handle, char *buf)3216{3217struct smu_context *smu = handle;32183219if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||3220!smu->ppt_funcs->get_power_profile_mode)3221return -EOPNOTSUPP;3222if (!buf)3223return -EINVAL;32243225return smu->ppt_funcs->get_power_profile_mode(smu, buf);3226}32273228static int smu_set_power_profile_mode(void *handle,3229long *param,3230uint32_t param_size)3231{3232struct smu_context *smu = handle;3233bool custom = false;3234int ret = 0;32353236if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||3237!smu->ppt_funcs->set_power_profile_mode)3238return -EOPNOTSUPP;32393240if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {3241custom = true;3242/* clear frontend mask so custom changes propogate */3243smu->workload_mask = 0;3244}32453246if ((param[param_size] != smu->power_profile_mode) || custom) {3247/* clear the old user preference */3248smu_power_profile_mode_put(smu, smu->power_profile_mode);3249/* set the new user preference */3250smu_power_profile_mode_get(smu, param[param_size]);3251ret = smu_bump_power_profile_mode(smu,3252custom ? param : NULL,3253custom ? param_size : 0);3254if (ret)3255smu_power_profile_mode_put(smu, param[param_size]);3256else3257/* store the user's preference */3258smu->power_profile_mode = param[param_size];3259}32603261return ret;3262}32633264static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)3265{3266struct smu_context *smu = handle;32673268if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3269return -EOPNOTSUPP;32703271if (!smu->ppt_funcs->get_fan_control_mode)3272return -EOPNOTSUPP;32733274if (!fan_mode)3275return -EINVAL;32763277*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);32783279return 0;3280}32813282static int smu_set_fan_control_mode(void *handle, u32 value)3283{3284struct smu_context *smu = handle;3285int ret = 0;32863287if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3288return -EOPNOTSUPP;32893290if (!smu->ppt_funcs->set_fan_control_mode)3291return -EOPNOTSUPP;32923293if (value == U32_MAX)3294return -EINVAL;32953296ret = smu->ppt_funcs->set_fan_control_mode(smu, value);3297if (ret)3298goto out;32993300if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {3301smu->user_dpm_profile.fan_mode = value;33023303/* reset user dpm fan speed */3304if (value != AMD_FAN_CTRL_MANUAL) {3305smu->user_dpm_profile.fan_speed_pwm = 0;3306smu->user_dpm_profile.fan_speed_rpm = 0;3307smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);3308}3309}33103311out:3312return ret;3313}33143315static int smu_get_fan_speed_pwm(void *handle, u32 *speed)3316{3317struct smu_context *smu = handle;3318int ret = 0;33193320if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3321return -EOPNOTSUPP;33223323if (!smu->ppt_funcs->get_fan_speed_pwm)3324return -EOPNOTSUPP;33253326if (!speed)3327return -EINVAL;33283329ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);33303331return ret;3332}33333334static int smu_set_fan_speed_pwm(void *handle, u32 speed)3335{3336struct smu_context *smu = handle;3337int ret = 0;33383339if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3340return -EOPNOTSUPP;33413342if (!smu->ppt_funcs->set_fan_speed_pwm)3343return -EOPNOTSUPP;33443345if (speed == U32_MAX)3346return -EINVAL;33473348ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);3349if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {3350smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;3351smu->user_dpm_profile.fan_speed_pwm = speed;33523353/* Override custom RPM setting as they cannot co-exist */3354smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;3355smu->user_dpm_profile.fan_speed_rpm = 0;3356}33573358return ret;3359}33603361static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)3362{3363struct smu_context *smu = handle;3364int ret = 0;33653366if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3367return -EOPNOTSUPP;33683369if (!smu->ppt_funcs->get_fan_speed_rpm)3370return -EOPNOTSUPP;33713372if (!speed)3373return -EINVAL;33743375ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);33763377return ret;3378}33793380static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)3381{3382struct smu_context *smu = handle;33833384if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3385return -EOPNOTSUPP;33863387return smu_set_min_dcef_deep_sleep(smu, clk);3388}33893390static int smu_get_clock_by_type_with_latency(void *handle,3391enum amd_pp_clock_type type,3392struct pp_clock_levels_with_latency *clocks)3393{3394struct smu_context *smu = handle;3395enum smu_clk_type clk_type;3396int ret = 0;33973398if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3399return -EOPNOTSUPP;34003401if (smu->ppt_funcs->get_clock_by_type_with_latency) {3402switch (type) {3403case amd_pp_sys_clock:3404clk_type = SMU_GFXCLK;3405break;3406case amd_pp_mem_clock:3407clk_type = SMU_MCLK;3408break;3409case amd_pp_dcef_clock:3410clk_type = SMU_DCEFCLK;3411break;3412case amd_pp_disp_clock:3413clk_type = SMU_DISPCLK;3414break;3415default:3416dev_err(smu->adev->dev, "Invalid clock type!\n");3417return -EINVAL;3418}34193420ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);3421}34223423return ret;3424}34253426static int smu_display_clock_voltage_request(void *handle,3427struct pp_display_clock_request *clock_req)3428{3429struct smu_context *smu = handle;3430int ret = 0;34313432if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3433return -EOPNOTSUPP;34343435if (smu->ppt_funcs->display_clock_voltage_request)3436ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);34373438return ret;3439}344034413442static int smu_display_disable_memory_clock_switch(void *handle,3443bool disable_memory_clock_switch)3444{3445struct smu_context *smu = handle;3446int ret = -EINVAL;34473448if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3449return -EOPNOTSUPP;34503451if (smu->ppt_funcs->display_disable_memory_clock_switch)3452ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);34533454return ret;3455}34563457static int smu_set_xgmi_pstate(void *handle,3458uint32_t pstate)3459{3460struct smu_context *smu = handle;3461int ret = 0;34623463if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3464return -EOPNOTSUPP;34653466if (smu->ppt_funcs->set_xgmi_pstate)3467ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);34683469if (ret)3470dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");34713472return ret;3473}34743475static int smu_get_baco_capability(void *handle)3476{3477struct smu_context *smu = handle;34783479if (!smu->pm_enabled)3480return false;34813482if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)3483return false;34843485return smu->ppt_funcs->get_bamaco_support(smu);3486}34873488static int smu_baco_set_state(void *handle, int state)3489{3490struct smu_context *smu = handle;3491int ret = 0;34923493if (!smu->pm_enabled)3494return -EOPNOTSUPP;34953496if (state == 0) {3497if (smu->ppt_funcs->baco_exit)3498ret = smu->ppt_funcs->baco_exit(smu);3499} else if (state == 1) {3500if (smu->ppt_funcs->baco_enter)3501ret = smu->ppt_funcs->baco_enter(smu);3502} else {3503return -EINVAL;3504}35053506if (ret)3507dev_err(smu->adev->dev, "Failed to %s BACO state!\n",3508(state)?"enter":"exit");35093510return ret;3511}35123513bool smu_mode1_reset_is_support(struct smu_context *smu)3514{3515bool ret = false;35163517if (!smu->pm_enabled)3518return false;35193520if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)3521ret = smu->ppt_funcs->mode1_reset_is_support(smu);35223523return ret;3524}35253526bool smu_link_reset_is_support(struct smu_context *smu)3527{3528if (!smu->pm_enabled)3529return false;35303531return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET);3532}35333534int smu_mode1_reset(struct smu_context *smu)3535{3536int ret = 0;35373538if (!smu->pm_enabled)3539return -EOPNOTSUPP;35403541if (smu->ppt_funcs->mode1_reset)3542ret = smu->ppt_funcs->mode1_reset(smu);35433544return ret;3545}35463547static int smu_mode2_reset(void *handle)3548{3549struct smu_context *smu = handle;3550int ret = 0;35513552if (!smu->pm_enabled)3553return -EOPNOTSUPP;35543555if (smu->ppt_funcs->mode2_reset)3556ret = smu->ppt_funcs->mode2_reset(smu);35573558if (ret)3559dev_err(smu->adev->dev, "Mode2 reset failed!\n");35603561return ret;3562}35633564int smu_link_reset(struct smu_context *smu)3565{3566int ret = 0;35673568if (!smu->pm_enabled)3569return -EOPNOTSUPP;35703571if (smu->ppt_funcs->link_reset)3572ret = smu->ppt_funcs->link_reset(smu);35733574return ret;3575}35763577static int smu_enable_gfx_features(void *handle)3578{3579struct smu_context *smu = handle;3580int ret = 0;35813582if (!smu->pm_enabled)3583return -EOPNOTSUPP;35843585if (smu->ppt_funcs->enable_gfx_features)3586ret = smu->ppt_funcs->enable_gfx_features(smu);35873588if (ret)3589dev_err(smu->adev->dev, "enable gfx features failed!\n");35903591return ret;3592}35933594static int smu_get_max_sustainable_clocks_by_dc(void *handle,3595struct pp_smu_nv_clock_table *max_clocks)3596{3597struct smu_context *smu = handle;3598int ret = 0;35993600if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3601return -EOPNOTSUPP;36023603if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)3604ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);36053606return ret;3607}36083609static int smu_get_uclk_dpm_states(void *handle,3610unsigned int *clock_values_in_khz,3611unsigned int *num_states)3612{3613struct smu_context *smu = handle;3614int ret = 0;36153616if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3617return -EOPNOTSUPP;36183619if (smu->ppt_funcs->get_uclk_dpm_states)3620ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);36213622return ret;3623}36243625static enum amd_pm_state_type smu_get_current_power_state(void *handle)3626{3627struct smu_context *smu = handle;3628enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;36293630if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3631return -EOPNOTSUPP;36323633if (smu->ppt_funcs->get_current_power_state)3634pm_state = smu->ppt_funcs->get_current_power_state(smu);36353636return pm_state;3637}36383639static int smu_get_dpm_clock_table(void *handle,3640struct dpm_clocks *clock_table)3641{3642struct smu_context *smu = handle;3643int ret = 0;36443645if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3646return -EOPNOTSUPP;36473648if (smu->ppt_funcs->get_dpm_clock_table)3649ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);36503651return ret;3652}36533654static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)3655{3656struct smu_context *smu = handle;3657struct smu_table_context *smu_table = &smu->smu_table;3658struct smu_driver_table *driver_tables = smu_table->driver_tables;3659struct smu_driver_table *gpu_metrics_table;36603661if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3662return -EOPNOTSUPP;36633664if (!smu->ppt_funcs->get_gpu_metrics)3665return -EOPNOTSUPP;36663667gpu_metrics_table = &driver_tables[SMU_DRIVER_TABLE_GPU_METRICS];36683669/* If cached table is valid, return it */3670if (smu_driver_table_is_valid(gpu_metrics_table)) {3671*table = gpu_metrics_table->cache.buffer;3672return gpu_metrics_table->cache.size;3673}36743675return smu->ppt_funcs->get_gpu_metrics(smu, table);3676}36773678static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,3679size_t size)3680{3681struct smu_context *smu = handle;36823683if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3684return -EOPNOTSUPP;36853686if (!smu->ppt_funcs->get_pm_metrics)3687return -EOPNOTSUPP;36883689return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);3690}36913692static int smu_enable_mgpu_fan_boost(void *handle)3693{3694struct smu_context *smu = handle;3695int ret = 0;36963697if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3698return -EOPNOTSUPP;36993700if (smu->ppt_funcs->enable_mgpu_fan_boost)3701ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);37023703return ret;3704}37053706static int smu_gfx_state_change_set(void *handle,3707uint32_t state)3708{3709struct smu_context *smu = handle;3710int ret = 0;37113712if (smu->ppt_funcs->gfx_state_change_set)3713ret = smu->ppt_funcs->gfx_state_change_set(smu, state);37143715return ret;3716}37173718int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)3719{3720int ret = 0;37213722if (smu->ppt_funcs->smu_handle_passthrough_sbr)3723ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);37243725return ret;3726}37273728int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)3729{3730int ret = -EOPNOTSUPP;37313732if (smu->ppt_funcs &&3733smu->ppt_funcs->get_ecc_info)3734ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);37353736return ret;37373738}37393740static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)3741{3742struct smu_context *smu = handle;3743struct smu_table_context *smu_table = &smu->smu_table;3744struct smu_table *memory_pool = &smu_table->memory_pool;37453746if (!addr || !size)3747return -EINVAL;37483749*addr = NULL;3750*size = 0;3751if (memory_pool->bo) {3752*addr = memory_pool->cpu_addr;3753*size = memory_pool->size;3754}37553756return 0;3757}37583759static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,3760size_t *size)3761{3762size_t offset = *size;3763int level;37643765for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {3766if (level == policy->current_level)3767offset += sysfs_emit_at(sysbuf, offset,3768"%d : %s*\n", level,3769policy->desc->get_desc(policy, level));3770else3771offset += sysfs_emit_at(sysbuf, offset,3772"%d : %s\n", level,3773policy->desc->get_desc(policy, level));3774}37753776*size = offset;3777}37783779ssize_t smu_get_pm_policy_info(struct smu_context *smu,3780enum pp_pm_policy p_type, char *sysbuf)3781{3782struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;3783struct smu_dpm_policy_ctxt *policy_ctxt;3784struct smu_dpm_policy *dpm_policy;3785size_t offset = 0;37863787policy_ctxt = dpm_ctxt->dpm_policies;3788if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||3789!policy_ctxt->policy_mask)3790return -EOPNOTSUPP;37913792if (p_type == PP_PM_POLICY_NONE)3793return -EINVAL;37943795dpm_policy = smu_get_pm_policy(smu, p_type);3796if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)3797return -ENOENT;37983799if (!sysbuf)3800return -EINVAL;38013802smu_print_dpm_policy(dpm_policy, sysbuf, &offset);38033804return offset;3805}38063807struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,3808enum pp_pm_policy p_type)3809{3810struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;3811struct smu_dpm_policy_ctxt *policy_ctxt;3812int i;38133814policy_ctxt = dpm_ctxt->dpm_policies;3815if (!policy_ctxt)3816return NULL;38173818for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {3819if (policy_ctxt->policies[i].policy_type == p_type)3820return &policy_ctxt->policies[i];3821}38223823return NULL;3824}38253826int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,3827int level)3828{3829struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;3830struct smu_dpm_policy *dpm_policy = NULL;3831struct smu_dpm_policy_ctxt *policy_ctxt;3832int ret = -EOPNOTSUPP;38333834policy_ctxt = dpm_ctxt->dpm_policies;3835if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||3836!policy_ctxt->policy_mask)3837return ret;38383839if (level < 0 || level >= PP_POLICY_MAX_LEVELS)3840return -EINVAL;38413842dpm_policy = smu_get_pm_policy(smu, p_type);38433844if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)3845return ret;38463847if (dpm_policy->current_level == level)3848return 0;38493850ret = dpm_policy->set_policy(smu, level);38513852if (!ret)3853dpm_policy->current_level = level;38543855return ret;3856}38573858static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)3859{3860struct smu_context *smu = handle;3861struct smu_table_context *smu_table = &smu->smu_table;3862struct smu_driver_table *driver_tables = smu_table->driver_tables;3863enum smu_driver_table_id table_id;3864struct smu_driver_table *temp_table;38653866if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3867return -EOPNOTSUPP;38683869if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics)3870return -EOPNOTSUPP;38713872table_id = smu_metrics_get_temp_table_id(type);38733874if (table_id == SMU_DRIVER_TABLE_COUNT)3875return -EINVAL;38763877temp_table = &driver_tables[table_id];38783879/* If the request is to get size alone, return the cached table size */3880if (!table && temp_table->cache.size)3881return temp_table->cache.size;38823883if (smu_driver_table_is_valid(temp_table)) {3884memcpy(table, temp_table->cache.buffer, temp_table->cache.size);3885return temp_table->cache.size;3886}38873888return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);3889}38903891static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type)3892{3893struct smu_context *smu = handle;3894bool ret = false;38953896if (!smu->pm_enabled)3897return false;38983899if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported)3900ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type);39013902return ret;3903}39043905static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)3906{3907struct smu_context *smu = handle;39083909if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)3910return -EOPNOTSUPP;39113912if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)3913return -EOPNOTSUPP;39143915return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);3916}39173918static const struct amd_pm_funcs swsmu_pm_funcs = {3919/* export for sysfs */3920.set_fan_control_mode = smu_set_fan_control_mode,3921.get_fan_control_mode = smu_get_fan_control_mode,3922.set_fan_speed_pwm = smu_set_fan_speed_pwm,3923.get_fan_speed_pwm = smu_get_fan_speed_pwm,3924.force_clock_level = smu_force_ppclk_levels,3925.emit_clock_levels = smu_emit_ppclk_levels,3926.force_performance_level = smu_force_performance_level,3927.read_sensor = smu_read_sensor,3928.get_apu_thermal_limit = smu_get_apu_thermal_limit,3929.set_apu_thermal_limit = smu_set_apu_thermal_limit,3930.get_performance_level = smu_get_performance_level,3931.get_current_power_state = smu_get_current_power_state,3932.get_fan_speed_rpm = smu_get_fan_speed_rpm,3933.set_fan_speed_rpm = smu_set_fan_speed_rpm,3934.get_pp_num_states = smu_get_power_num_states,3935.get_pp_table = smu_sys_get_pp_table,3936.set_pp_table = smu_sys_set_pp_table,3937.switch_power_profile = smu_switch_power_profile,3938.pause_power_profile = smu_pause_power_profile,3939/* export to amdgpu */3940.dispatch_tasks = smu_handle_dpm_task,3941.load_firmware = smu_load_microcode,3942.set_powergating_by_smu = smu_dpm_set_power_gate,3943.set_power_limit = smu_set_power_limit,3944.get_power_limit = smu_get_power_limit,3945.get_power_profile_mode = smu_get_power_profile_mode,3946.set_power_profile_mode = smu_set_power_profile_mode,3947.odn_edit_dpm_table = smu_od_edit_dpm_table,3948.set_mp1_state = smu_set_mp1_state,3949.gfx_state_change_set = smu_gfx_state_change_set,3950/* export to DC */3951.get_sclk = smu_get_sclk,3952.get_mclk = smu_get_mclk,3953.display_configuration_change = smu_display_configuration_change,3954.get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,3955.display_clock_voltage_request = smu_display_clock_voltage_request,3956.enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,3957.set_active_display_count = smu_set_display_count,3958.set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,3959.get_asic_baco_capability = smu_get_baco_capability,3960.set_asic_baco_state = smu_baco_set_state,3961.get_ppfeature_status = smu_sys_get_pp_feature_mask,3962.set_ppfeature_status = smu_sys_set_pp_feature_mask,3963.asic_reset_mode_2 = smu_mode2_reset,3964.asic_reset_enable_gfx_features = smu_enable_gfx_features,3965.set_df_cstate = smu_set_df_cstate,3966.set_xgmi_pstate = smu_set_xgmi_pstate,3967.get_gpu_metrics = smu_sys_get_gpu_metrics,3968.get_pm_metrics = smu_sys_get_pm_metrics,3969.set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,3970.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,3971.get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,3972.get_uclk_dpm_states = smu_get_uclk_dpm_states,3973.get_dpm_clock_table = smu_get_dpm_clock_table,3974.get_smu_prv_buf_details = smu_get_prv_buffer_details,3975.get_xcp_metrics = smu_sys_get_xcp_metrics,3976.get_temp_metrics = smu_sys_get_temp_metrics,3977.temp_metrics_is_supported = smu_temp_metrics_is_supported,3978};39793980int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,3981uint64_t event_arg)3982{3983int ret = -EINVAL;39843985if (smu->ppt_funcs->wait_for_event)3986ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);39873988return ret;3989}39903991int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)3992{39933994if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)3995return -EOPNOTSUPP;39963997/* Confirm the buffer allocated is of correct size */3998if (size != smu->stb_context.stb_buf_size)3999return -EINVAL;40004001/*4002* No need to lock smu mutex as we access STB directly through MMIO4003* and not going through SMU messaging route (for now at least).4004* For registers access rely on implementation internal locking.4005*/4006return smu->ppt_funcs->stb_collect_info(smu, buf, size);4007}40084009#if defined(CONFIG_DEBUG_FS)40104011static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)4012{4013struct amdgpu_device *adev = filp->f_inode->i_private;4014struct smu_context *smu = adev->powerplay.pp_handle;4015unsigned char *buf;4016int r;40174018buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);4019if (!buf)4020return -ENOMEM;40214022r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);4023if (r)4024goto out;40254026filp->private_data = buf;40274028return 0;40294030out:4031kvfree(buf);4032return r;4033}40344035static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,4036loff_t *pos)4037{4038struct amdgpu_device *adev = filp->f_inode->i_private;4039struct smu_context *smu = adev->powerplay.pp_handle;404040414042if (!filp->private_data)4043return -EINVAL;40444045return simple_read_from_buffer(buf,4046size,4047pos, filp->private_data,4048smu->stb_context.stb_buf_size);4049}40504051static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)4052{4053kvfree(filp->private_data);4054filp->private_data = NULL;40554056return 0;4057}40584059/*4060* We have to define not only read method but also4061* open and release because .read takes up to PAGE_SIZE4062* data each time so and so is invoked multiple times.4063* We allocate the STB buffer in .open and release it4064* in .release4065*/4066static const struct file_operations smu_stb_debugfs_fops = {4067.owner = THIS_MODULE,4068.open = smu_stb_debugfs_open,4069.read = smu_stb_debugfs_read,4070.release = smu_stb_debugfs_release,4071.llseek = default_llseek,4072};40734074#endif40754076void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)4077{4078#if defined(CONFIG_DEBUG_FS)40794080struct smu_context *smu = adev->powerplay.pp_handle;40814082if (!smu || (!smu->stb_context.stb_buf_size))4083return;40844085debugfs_create_file_size("amdgpu_smu_stb_dump",4086S_IRUSR,4087adev_to_drm(adev)->primary->debugfs_root,4088adev,4089&smu_stb_debugfs_fops,4090smu->stb_context.stb_buf_size);4091#endif4092}40934094int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)4095{4096int ret = 0;40974098if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)4099ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);41004101return ret;4102}41034104int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)4105{4106int ret = 0;41074108if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)4109ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);41104111return ret;4112}41134114int smu_send_rma_reason(struct smu_context *smu)4115{4116int ret = 0;41174118if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)4119ret = smu->ppt_funcs->send_rma_reason(smu);41204121return ret;4122}41234124/**4125* smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU4126* @smu: smu_context pointer4127*4128* This function checks if the SMU supports resetting the SDMA engine.4129* It returns true if supported, false otherwise.4130*/4131bool smu_reset_sdma_is_supported(struct smu_context *smu)4132{4133return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);4134}41354136int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)4137{4138int ret = 0;41394140if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)4141ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);41424143return ret;4144}41454146bool smu_reset_vcn_is_supported(struct smu_context *smu)4147{4148return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET);4149}41504151int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)4152{4153if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)4154smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);41554156return 0;4157}415841594160