Path: blob/master/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
26535 views
/*1* Copyright 2020 Advanced Micro Devices, Inc.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included in11* all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR17* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,18* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR19* OTHER DEALINGS IN THE SOFTWARE.20*/2122#define SWSMU_CODE_LAYER_L42324#include "amdgpu.h"25#include "amdgpu_smu.h"26#include "smu_cmn.h"27#include "soc15_common.h"2829/*30* DO NOT use these for err/warn/info/debug messages.31* Use dev_err, dev_warn, dev_info and dev_dbg instead.32* They are more MGPU friendly.33*/34#undef pr_err35#undef pr_warn36#undef pr_info37#undef pr_debug3839#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL4041const int link_speed[] = {25, 50, 80, 160, 320, 640};4243#undef __SMU_DUMMY_MAP44#define __SMU_DUMMY_MAP(type) #type45static const char * const __smu_message_names[] = {46SMU_MESSAGE_TYPES47};4849#define smu_cmn_call_asic_func(intf, smu, args...) \50((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \51(smu)->ppt_funcs->intf(smu, ##args) : \52-ENOTSUPP) : \53-EINVAL)5455static const char *smu_get_message_name(struct smu_context *smu,56enum smu_message_type type)57{58if (type >= SMU_MSG_MAX_COUNT)59return "unknown smu message";6061return __smu_message_names[type];62}6364static void smu_cmn_read_arg(struct smu_context *smu,65uint32_t *arg)66{67struct amdgpu_device *adev = smu->adev;6869*arg = RREG32(smu->param_reg);70}7172/* Redefine the SMU error codes here.73*74* Note that these definitions are redundant and should be removed75* when the SMU has exported a unified header file containing these76* macros, which header file we can just include and use the SMU's77* macros. At the moment, these error codes are defined by the SMU78* per-ASIC unfortunately, yet we're a one driver for all ASICs.79*/80#define SMU_RESP_NONE 081#define SMU_RESP_OK 182#define SMU_RESP_CMD_FAIL 0xFF83#define SMU_RESP_CMD_UNKNOWN 0xFE84#define SMU_RESP_CMD_BAD_PREREQ 0xFD85#define SMU_RESP_BUSY_OTHER 0xFC86#define SMU_RESP_DEBUG_END 0xFB8788#define SMU_RESP_UNEXP (~0U)89/**90* __smu_cmn_poll_stat -- poll for a status from the SMU91* @smu: a pointer to SMU context92*93* Returns the status of the SMU, which could be,94* 0, the SMU is busy with your command;95* 1, execution status: success, execution result: success;96* 0xFF, execution status: success, execution result: failure;97* 0xFE, unknown command;98* 0xFD, valid command, but bad (command) prerequisites;99* 0xFC, the command was rejected as the SMU is busy;100* 0xFB, "SMC_Result_DebugDataDumpEnd".101*102* The values here are not defined by macros, because I'd rather we103* include a single header file which defines them, which is104* maintained by the SMU FW team, so that we're impervious to firmware105* changes. At the moment those values are defined in various header106* files, one for each ASIC, yet here we're a single ASIC-agnostic107* interface. Such a change can be followed-up by a subsequent patch.108*/109static u32 __smu_cmn_poll_stat(struct smu_context *smu)110{111struct amdgpu_device *adev = smu->adev;112int timeout = adev->usec_timeout * 20;113u32 reg;114115for ( ; timeout > 0; timeout--) {116reg = RREG32(smu->resp_reg);117if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)118break;119120udelay(1);121}122123return reg;124}125126static void __smu_cmn_reg_print_error(struct smu_context *smu,127u32 reg_c2pmsg_90,128int msg_index,129u32 param,130enum smu_message_type msg)131{132struct amdgpu_device *adev = smu->adev;133const char *message = smu_get_message_name(smu, msg);134u32 msg_idx, prm;135136switch (reg_c2pmsg_90) {137case SMU_RESP_NONE: {138msg_idx = RREG32(smu->msg_reg);139prm = RREG32(smu->param_reg);140dev_err_ratelimited(adev->dev,141"SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",142msg_idx, prm);143}144break;145case SMU_RESP_OK:146/* The SMU executed the command. It completed with a147* successful result.148*/149break;150case SMU_RESP_CMD_FAIL:151/* The SMU executed the command. It completed with an152* unsuccessful result.153*/154break;155case SMU_RESP_CMD_UNKNOWN:156dev_err_ratelimited(adev->dev,157"SMU: unknown command: index:%d param:0x%08X message:%s",158msg_index, param, message);159break;160case SMU_RESP_CMD_BAD_PREREQ:161dev_err_ratelimited(adev->dev,162"SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",163msg_index, param, message);164break;165case SMU_RESP_BUSY_OTHER:166dev_err_ratelimited(adev->dev,167"SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",168msg_index, param, message);169break;170case SMU_RESP_DEBUG_END:171dev_err_ratelimited(adev->dev,172"SMU: I'm debugging!");173break;174case SMU_RESP_UNEXP:175if (amdgpu_device_bus_status_check(smu->adev)) {176/* print error immediately if device is off the bus */177dev_err(adev->dev,178"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",179reg_c2pmsg_90, msg_index, param, message);180break;181}182fallthrough;183default:184dev_err_ratelimited(adev->dev,185"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",186reg_c2pmsg_90, msg_index, param, message);187break;188}189}190191static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)192{193int res;194195switch (reg_c2pmsg_90) {196case SMU_RESP_NONE:197/* The SMU is busy--still executing your command.198*/199res = -ETIME;200break;201case SMU_RESP_OK:202res = 0;203break;204case SMU_RESP_CMD_FAIL:205/* Command completed successfully, but the command206* status was failure.207*/208res = -EIO;209break;210case SMU_RESP_CMD_UNKNOWN:211/* Unknown command--ignored by the SMU.212*/213res = -EOPNOTSUPP;214break;215case SMU_RESP_CMD_BAD_PREREQ:216/* Valid command--bad prerequisites.217*/218res = -EINVAL;219break;220case SMU_RESP_BUSY_OTHER:221/* The SMU is busy with other commands. The client222* should retry in 10 us.223*/224res = -EBUSY;225break;226default:227/* Unknown or debug response from the SMU.228*/229res = -EREMOTEIO;230break;231}232233return res;234}235236static void __smu_cmn_send_msg(struct smu_context *smu,237u16 msg,238u32 param)239{240struct amdgpu_device *adev = smu->adev;241242WREG32(smu->resp_reg, 0);243WREG32(smu->param_reg, param);244WREG32(smu->msg_reg, msg);245}246247static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu,248enum smu_message_type msg)249{250return smu->message_map[msg].flags;251}252253static int __smu_cmn_ras_filter_msg(struct smu_context *smu,254enum smu_message_type msg, bool *poll)255{256struct amdgpu_device *adev = smu->adev;257uint32_t flags, resp;258bool fed_status;259260flags = __smu_cmn_get_msg_flags(smu, msg);261*poll = true;262263/* When there is RAS fatal error, FW won't process non-RAS priority264* messages. Don't allow any messages other than RAS priority messages.265*/266fed_status = amdgpu_ras_get_fed_status(adev);267if (fed_status) {268if (!(flags & SMU_MSG_RAS_PRI)) {269dev_dbg(adev->dev,270"RAS error detected, skip sending %s",271smu_get_message_name(smu, msg));272return -EACCES;273}274275/* FW will ignore non-priority messages when a RAS fatal error276* is detected. Hence it is possible that a previous message277* wouldn't have got response. Allow to continue without polling278* for response status for priority messages.279*/280resp = RREG32(smu->resp_reg);281dev_dbg(adev->dev,282"Sending RAS priority message %s response status: %x",283smu_get_message_name(smu, msg), resp);284if (resp == 0)285*poll = false;286}287288return 0;289}290291static int __smu_cmn_send_debug_msg(struct smu_context *smu,292u32 msg,293u32 param)294{295struct amdgpu_device *adev = smu->adev;296297WREG32(smu->debug_param_reg, param);298WREG32(smu->debug_msg_reg, msg);299WREG32(smu->debug_resp_reg, 0);300301return 0;302}303/**304* smu_cmn_send_msg_without_waiting -- send the message; don't wait for status305* @smu: pointer to an SMU context306* @msg_index: message index307* @param: message parameter to send to the SMU308*309* Send a message to the SMU with the parameter passed. Do not wait310* for status/result of the message, thus the "without_waiting".311*312* Return 0 on success, -errno on error if we weren't able to _send_313* the message for some reason. See __smu_cmn_reg2errno() for details314* of the -errno.315*/316int smu_cmn_send_msg_without_waiting(struct smu_context *smu,317uint16_t msg_index,318uint32_t param)319{320struct amdgpu_device *adev = smu->adev;321u32 reg;322int res;323324if (adev->no_hw_access)325return 0;326327if (smu->smc_fw_state == SMU_FW_HANG) {328dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");329res = -EREMOTEIO;330goto Out;331}332333if (smu->smc_fw_state == SMU_FW_INIT) {334smu->smc_fw_state = SMU_FW_RUNTIME;335} else {336reg = __smu_cmn_poll_stat(smu);337res = __smu_cmn_reg2errno(smu, reg);338if (reg == SMU_RESP_NONE || res == -EREMOTEIO)339goto Out;340}341342__smu_cmn_send_msg(smu, msg_index, param);343res = 0;344Out:345if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&346res && (res != -ETIME)) {347amdgpu_device_halt(adev);348WARN_ON(1);349}350351return res;352}353354/**355* smu_cmn_wait_for_response -- wait for response from the SMU356* @smu: pointer to an SMU context357*358* Wait for status from the SMU.359*360* Return 0 on success, -errno on error, indicating the execution361* status and result of the message being waited for. See362* __smu_cmn_reg2errno() for details of the -errno.363*/364int smu_cmn_wait_for_response(struct smu_context *smu)365{366u32 reg;367int res;368369reg = __smu_cmn_poll_stat(smu);370res = __smu_cmn_reg2errno(smu, reg);371372if (res == -EREMOTEIO)373smu->smc_fw_state = SMU_FW_HANG;374375if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&376res && (res != -ETIME)) {377amdgpu_device_halt(smu->adev);378WARN_ON(1);379}380381return res;382}383384/**385* smu_cmn_send_smc_msg_with_param -- send a message with parameter386* @smu: pointer to an SMU context387* @msg: message to send388* @param: parameter to send to the SMU389* @read_arg: pointer to u32 to return a value from the SMU back390* to the caller391*392* Send the message @msg with parameter @param to the SMU, wait for393* completion of the command, and return back a value from the SMU in394* @read_arg pointer.395*396* Return 0 on success, -errno when a problem is encountered sending397* message or receiving reply. If there is a PCI bus recovery or398* the destination is a virtual GPU which does not allow this message399* type, the message is simply dropped and success is also returned.400* See __smu_cmn_reg2errno() for details of the -errno.401*402* If we weren't able to send the message to the SMU, we also print403* the error to the standard log.404*405* Command completion status is printed only if the -errno is406* -EREMOTEIO, indicating that the SMU returned back an407* undefined/unknown/unspecified result. All other cases are408* well-defined, not printed, but instead given back to the client to409* decide what further to do.410*411* The return value, @read_arg is read back regardless, to give back412* more information to the client, which on error would most likely be413* @param, but we can't assume that. This also eliminates more414* conditionals.415*/416int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,417enum smu_message_type msg,418uint32_t param,419uint32_t *read_arg)420{421struct amdgpu_device *adev = smu->adev;422int res, index;423bool poll = true;424u32 reg;425426if (adev->no_hw_access)427return 0;428429index = smu_cmn_to_asic_specific_index(smu,430CMN2ASIC_MAPPING_MSG,431msg);432if (index < 0)433return index == -EACCES ? 0 : index;434435mutex_lock(&smu->message_lock);436437if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) {438res = __smu_cmn_ras_filter_msg(smu, msg, &poll);439if (res)440goto Out;441}442443if (smu->smc_fw_state == SMU_FW_HANG) {444dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");445res = -EREMOTEIO;446goto Out;447} else if (smu->smc_fw_state == SMU_FW_INIT) {448/* Ignore initial smu response register value */449poll = false;450smu->smc_fw_state = SMU_FW_RUNTIME;451}452453if (poll) {454reg = __smu_cmn_poll_stat(smu);455res = __smu_cmn_reg2errno(smu, reg);456if (reg == SMU_RESP_NONE || res == -EREMOTEIO) {457__smu_cmn_reg_print_error(smu, reg, index, param, msg);458goto Out;459}460}461__smu_cmn_send_msg(smu, (uint16_t) index, param);462reg = __smu_cmn_poll_stat(smu);463res = __smu_cmn_reg2errno(smu, reg);464if (res != 0) {465if (res == -EREMOTEIO)466smu->smc_fw_state = SMU_FW_HANG;467__smu_cmn_reg_print_error(smu, reg, index, param, msg);468}469if (read_arg) {470smu_cmn_read_arg(smu, read_arg);471dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n",472smu_get_message_name(smu, msg), index, param, reg, *read_arg);473} else {474dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",475smu_get_message_name(smu, msg), index, param, reg);476}477Out:478if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {479amdgpu_device_halt(adev);480WARN_ON(1);481}482483mutex_unlock(&smu->message_lock);484return res;485}486487int smu_cmn_send_smc_msg(struct smu_context *smu,488enum smu_message_type msg,489uint32_t *read_arg)490{491return smu_cmn_send_smc_msg_with_param(smu,492msg,4930,494read_arg);495}496497int smu_cmn_send_debug_smc_msg(struct smu_context *smu,498uint32_t msg)499{500return __smu_cmn_send_debug_msg(smu, msg, 0);501}502503int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,504uint32_t msg, uint32_t param)505{506return __smu_cmn_send_debug_msg(smu, msg, param);507}508509int smu_cmn_to_asic_specific_index(struct smu_context *smu,510enum smu_cmn2asic_mapping_type type,511uint32_t index)512{513struct cmn2asic_msg_mapping msg_mapping;514struct cmn2asic_mapping mapping;515516switch (type) {517case CMN2ASIC_MAPPING_MSG:518if (index >= SMU_MSG_MAX_COUNT ||519!smu->message_map)520return -EINVAL;521522msg_mapping = smu->message_map[index];523if (!msg_mapping.valid_mapping)524return -EINVAL;525526if (amdgpu_sriov_vf(smu->adev) &&527!(msg_mapping.flags & SMU_MSG_VF_FLAG))528return -EACCES;529530return msg_mapping.map_to;531532case CMN2ASIC_MAPPING_CLK:533if (index >= SMU_CLK_COUNT ||534!smu->clock_map)535return -EINVAL;536537mapping = smu->clock_map[index];538if (!mapping.valid_mapping)539return -EINVAL;540541return mapping.map_to;542543case CMN2ASIC_MAPPING_FEATURE:544if (index >= SMU_FEATURE_COUNT ||545!smu->feature_map)546return -EINVAL;547548mapping = smu->feature_map[index];549if (!mapping.valid_mapping)550return -EINVAL;551552return mapping.map_to;553554case CMN2ASIC_MAPPING_TABLE:555if (index >= SMU_TABLE_COUNT ||556!smu->table_map)557return -EINVAL;558559mapping = smu->table_map[index];560if (!mapping.valid_mapping)561return -EINVAL;562563return mapping.map_to;564565case CMN2ASIC_MAPPING_PWR:566if (index >= SMU_POWER_SOURCE_COUNT ||567!smu->pwr_src_map)568return -EINVAL;569570mapping = smu->pwr_src_map[index];571if (!mapping.valid_mapping)572return -EINVAL;573574return mapping.map_to;575576case CMN2ASIC_MAPPING_WORKLOAD:577if (index >= PP_SMC_POWER_PROFILE_COUNT ||578!smu->workload_map)579return -EINVAL;580581mapping = smu->workload_map[index];582if (!mapping.valid_mapping)583return -ENOTSUPP;584585return mapping.map_to;586587default:588return -EINVAL;589}590}591592int smu_cmn_feature_is_supported(struct smu_context *smu,593enum smu_feature_mask mask)594{595struct smu_feature *feature = &smu->smu_feature;596int feature_id;597598feature_id = smu_cmn_to_asic_specific_index(smu,599CMN2ASIC_MAPPING_FEATURE,600mask);601if (feature_id < 0)602return 0;603604WARN_ON(feature_id > feature->feature_num);605606return test_bit(feature_id, feature->supported);607}608609static int __smu_get_enabled_features(struct smu_context *smu,610uint64_t *enabled_features)611{612return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);613}614615int smu_cmn_feature_is_enabled(struct smu_context *smu,616enum smu_feature_mask mask)617{618struct amdgpu_device *adev = smu->adev;619uint64_t enabled_features;620int feature_id;621622if (__smu_get_enabled_features(smu, &enabled_features)) {623dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");624return 0;625}626627/*628* For Renoir and Cyan Skillfish, they are assumed to have all features629* enabled. Also considering they have no feature_map available, the630* check here can avoid unwanted feature_map check below.631*/632if (enabled_features == ULLONG_MAX)633return 1;634635feature_id = smu_cmn_to_asic_specific_index(smu,636CMN2ASIC_MAPPING_FEATURE,637mask);638if (feature_id < 0)639return 0;640641return test_bit(feature_id, (unsigned long *)&enabled_features);642}643644bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,645enum smu_clk_type clk_type)646{647enum smu_feature_mask feature_id = 0;648649switch (clk_type) {650case SMU_MCLK:651case SMU_UCLK:652feature_id = SMU_FEATURE_DPM_UCLK_BIT;653break;654case SMU_GFXCLK:655case SMU_SCLK:656feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;657break;658case SMU_SOCCLK:659feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;660break;661case SMU_VCLK:662case SMU_VCLK1:663feature_id = SMU_FEATURE_DPM_VCLK_BIT;664break;665case SMU_DCLK:666case SMU_DCLK1:667feature_id = SMU_FEATURE_DPM_DCLK_BIT;668break;669case SMU_FCLK:670feature_id = SMU_FEATURE_DPM_FCLK_BIT;671break;672default:673return true;674}675676if (!smu_cmn_feature_is_enabled(smu, feature_id))677return false;678679return true;680}681682int smu_cmn_get_enabled_mask(struct smu_context *smu,683uint64_t *feature_mask)684{685uint32_t *feature_mask_high;686uint32_t *feature_mask_low;687int ret = 0, index = 0;688689if (!feature_mask)690return -EINVAL;691692feature_mask_low = &((uint32_t *)feature_mask)[0];693feature_mask_high = &((uint32_t *)feature_mask)[1];694695index = smu_cmn_to_asic_specific_index(smu,696CMN2ASIC_MAPPING_MSG,697SMU_MSG_GetEnabledSmuFeatures);698if (index > 0) {699ret = smu_cmn_send_smc_msg_with_param(smu,700SMU_MSG_GetEnabledSmuFeatures,7010,702feature_mask_low);703if (ret)704return ret;705706ret = smu_cmn_send_smc_msg_with_param(smu,707SMU_MSG_GetEnabledSmuFeatures,7081,709feature_mask_high);710} else {711ret = smu_cmn_send_smc_msg(smu,712SMU_MSG_GetEnabledSmuFeaturesHigh,713feature_mask_high);714if (ret)715return ret;716717ret = smu_cmn_send_smc_msg(smu,718SMU_MSG_GetEnabledSmuFeaturesLow,719feature_mask_low);720}721722return ret;723}724725uint64_t smu_cmn_get_indep_throttler_status(726const unsigned long dep_status,727const uint8_t *throttler_map)728{729uint64_t indep_status = 0;730uint8_t dep_bit = 0;731732for_each_set_bit(dep_bit, &dep_status, 32)733indep_status |= 1ULL << throttler_map[dep_bit];734735return indep_status;736}737738int smu_cmn_feature_update_enable_state(struct smu_context *smu,739uint64_t feature_mask,740bool enabled)741{742int ret = 0;743744if (enabled) {745ret = smu_cmn_send_smc_msg_with_param(smu,746SMU_MSG_EnableSmuFeaturesLow,747lower_32_bits(feature_mask),748NULL);749if (ret)750return ret;751ret = smu_cmn_send_smc_msg_with_param(smu,752SMU_MSG_EnableSmuFeaturesHigh,753upper_32_bits(feature_mask),754NULL);755} else {756ret = smu_cmn_send_smc_msg_with_param(smu,757SMU_MSG_DisableSmuFeaturesLow,758lower_32_bits(feature_mask),759NULL);760if (ret)761return ret;762ret = smu_cmn_send_smc_msg_with_param(smu,763SMU_MSG_DisableSmuFeaturesHigh,764upper_32_bits(feature_mask),765NULL);766}767768return ret;769}770771int smu_cmn_feature_set_enabled(struct smu_context *smu,772enum smu_feature_mask mask,773bool enable)774{775int feature_id;776777feature_id = smu_cmn_to_asic_specific_index(smu,778CMN2ASIC_MAPPING_FEATURE,779mask);780if (feature_id < 0)781return -EINVAL;782783return smu_cmn_feature_update_enable_state(smu,7841ULL << feature_id,785enable);786}787788#undef __SMU_DUMMY_MAP789#define __SMU_DUMMY_MAP(fea) #fea790static const char *__smu_feature_names[] = {791SMU_FEATURE_MASKS792};793794static const char *smu_get_feature_name(struct smu_context *smu,795enum smu_feature_mask feature)796{797if (feature >= SMU_FEATURE_COUNT)798return "unknown smu feature";799return __smu_feature_names[feature];800}801802size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,803char *buf)804{805int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];806uint64_t feature_mask;807int i, feature_index;808uint32_t count = 0;809size_t size = 0;810811if (__smu_get_enabled_features(smu, &feature_mask))812return 0;813814size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",815upper_32_bits(feature_mask), lower_32_bits(feature_mask));816817memset(sort_feature, -1, sizeof(sort_feature));818819for (i = 0; i < SMU_FEATURE_COUNT; i++) {820feature_index = smu_cmn_to_asic_specific_index(smu,821CMN2ASIC_MAPPING_FEATURE,822i);823if (feature_index < 0)824continue;825826sort_feature[feature_index] = i;827}828829size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",830"No", "Feature", "Bit", "State");831832for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {833if (sort_feature[feature_index] < 0)834continue;835836size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",837count++,838smu_get_feature_name(smu, sort_feature[feature_index]),839feature_index,840!!test_bit(feature_index, (unsigned long *)&feature_mask) ?841"enabled" : "disabled");842}843844return size;845}846847int smu_cmn_set_pp_feature_mask(struct smu_context *smu,848uint64_t new_mask)849{850int ret = 0;851uint64_t feature_mask;852uint64_t feature_2_enabled = 0;853uint64_t feature_2_disabled = 0;854855ret = __smu_get_enabled_features(smu, &feature_mask);856if (ret)857return ret;858859feature_2_enabled = ~feature_mask & new_mask;860feature_2_disabled = feature_mask & ~new_mask;861862if (feature_2_enabled) {863ret = smu_cmn_feature_update_enable_state(smu,864feature_2_enabled,865true);866if (ret)867return ret;868}869if (feature_2_disabled) {870ret = smu_cmn_feature_update_enable_state(smu,871feature_2_disabled,872false);873if (ret)874return ret;875}876877return ret;878}879880/**881* smu_cmn_disable_all_features_with_exception - disable all dpm features882* except this specified by883* @mask884*885* @smu: smu_context pointer886* @mask: the dpm feature which should not be disabled887* SMU_FEATURE_COUNT: no exception, all dpm features888* to disable889*890* Returns:891* 0 on success or a negative error code on failure.892*/893int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,894enum smu_feature_mask mask)895{896uint64_t features_to_disable = U64_MAX;897int skipped_feature_id;898899if (mask != SMU_FEATURE_COUNT) {900skipped_feature_id = smu_cmn_to_asic_specific_index(smu,901CMN2ASIC_MAPPING_FEATURE,902mask);903if (skipped_feature_id < 0)904return -EINVAL;905906features_to_disable &= ~(1ULL << skipped_feature_id);907}908909return smu_cmn_feature_update_enable_state(smu,910features_to_disable,9110);912}913914int smu_cmn_get_smc_version(struct smu_context *smu,915uint32_t *if_version,916uint32_t *smu_version)917{918int ret = 0;919920if (!if_version && !smu_version)921return -EINVAL;922923if (smu->smc_fw_if_version && smu->smc_fw_version)924{925if (if_version)926*if_version = smu->smc_fw_if_version;927928if (smu_version)929*smu_version = smu->smc_fw_version;930931return 0;932}933934if (if_version) {935ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);936if (ret)937return ret;938939smu->smc_fw_if_version = *if_version;940}941942if (smu_version) {943ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);944if (ret)945return ret;946947smu->smc_fw_version = *smu_version;948}949950return ret;951}952953int smu_cmn_update_table(struct smu_context *smu,954enum smu_table_id table_index,955int argument,956void *table_data,957bool drv2smu)958{959struct smu_table_context *smu_table = &smu->smu_table;960struct amdgpu_device *adev = smu->adev;961struct smu_table *table = &smu_table->driver_table;962int table_id = smu_cmn_to_asic_specific_index(smu,963CMN2ASIC_MAPPING_TABLE,964table_index);965uint32_t table_size;966int ret = 0;967if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)968return -EINVAL;969970table_size = smu_table->tables[table_index].size;971972if (drv2smu) {973memcpy(table->cpu_addr, table_data, table_size);974/*975* Flush hdp cache: to guard the content seen by976* GPU is consitent with CPU.977*/978amdgpu_asic_flush_hdp(adev, NULL);979}980981ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?982SMU_MSG_TransferTableDram2Smu :983SMU_MSG_TransferTableSmu2Dram,984table_id | ((argument & 0xFFFF) << 16),985NULL);986if (ret)987return ret;988989if (!drv2smu) {990amdgpu_asic_invalidate_hdp(adev, NULL);991memcpy(table_data, table->cpu_addr, table_size);992}993994return 0;995}996997int smu_cmn_write_watermarks_table(struct smu_context *smu)998{999void *watermarks_table = smu->smu_table.watermarks_table;10001001if (!watermarks_table)1002return -EINVAL;10031004return smu_cmn_update_table(smu,1005SMU_TABLE_WATERMARKS,10060,1007watermarks_table,1008true);1009}10101011int smu_cmn_write_pptable(struct smu_context *smu)1012{1013void *pptable = smu->smu_table.driver_pptable;10141015return smu_cmn_update_table(smu,1016SMU_TABLE_PPTABLE,10170,1018pptable,1019true);1020}10211022int smu_cmn_get_metrics_table(struct smu_context *smu,1023void *metrics_table,1024bool bypass_cache)1025{1026struct smu_table_context *smu_table = &smu->smu_table;1027uint32_t table_size =1028smu_table->tables[SMU_TABLE_SMU_METRICS].size;1029int ret = 0;10301031if (bypass_cache ||1032!smu_table->metrics_time ||1033time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {1034ret = smu_cmn_update_table(smu,1035SMU_TABLE_SMU_METRICS,10360,1037smu_table->metrics_table,1038false);1039if (ret) {1040dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");1041return ret;1042}1043smu_table->metrics_time = jiffies;1044}10451046if (metrics_table)1047memcpy(metrics_table, smu_table->metrics_table, table_size);10481049return 0;1050}10511052int smu_cmn_get_combo_pptable(struct smu_context *smu)1053{1054void *pptable = smu->smu_table.combo_pptable;10551056return smu_cmn_update_table(smu,1057SMU_TABLE_COMBO_PPTABLE,10580,1059pptable,1060false);1061}10621063int smu_cmn_set_mp1_state(struct smu_context *smu,1064enum pp_mp1_state mp1_state)1065{1066enum smu_message_type msg;1067int ret;10681069switch (mp1_state) {1070case PP_MP1_STATE_SHUTDOWN:1071msg = SMU_MSG_PrepareMp1ForShutdown;1072break;1073case PP_MP1_STATE_UNLOAD:1074msg = SMU_MSG_PrepareMp1ForUnload;1075break;1076case PP_MP1_STATE_RESET:1077msg = SMU_MSG_PrepareMp1ForReset;1078break;1079case PP_MP1_STATE_NONE:1080default:1081return 0;1082}10831084ret = smu_cmn_send_smc_msg(smu, msg, NULL);1085if (ret)1086dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");10871088return ret;1089}10901091bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)1092{1093struct pci_dev *p = NULL;1094bool snd_driver_loaded;10951096/*1097* If the ASIC comes with no audio function, we always assume1098* it is "enabled".1099*/1100p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),1101adev->pdev->bus->number, 1);1102if (!p)1103return true;11041105snd_driver_loaded = pci_is_enabled(p) ? true : false;11061107pci_dev_put(p);11081109return snd_driver_loaded;1110}11111112static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level)1113{1114if (level < 0 || !(policy->level_mask & BIT(level)))1115return "Invalid";11161117switch (level) {1118case SOC_PSTATE_DEFAULT:1119return "soc_pstate_default";1120case SOC_PSTATE_0:1121return "soc_pstate_0";1122case SOC_PSTATE_1:1123return "soc_pstate_1";1124case SOC_PSTATE_2:1125return "soc_pstate_2";1126}11271128return "Invalid";1129}11301131static struct smu_dpm_policy_desc pstate_policy_desc = {1132.name = STR_SOC_PSTATE_POLICY,1133.get_desc = smu_soc_policy_get_desc,1134};11351136void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy)1137{1138policy->desc = &pstate_policy_desc;1139}11401141static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy,1142int level)1143{1144if (level < 0 || !(policy->level_mask & BIT(level)))1145return "Invalid";11461147switch (level) {1148case XGMI_PLPD_DISALLOW:1149return "plpd_disallow";1150case XGMI_PLPD_DEFAULT:1151return "plpd_default";1152case XGMI_PLPD_OPTIMIZED:1153return "plpd_optimized";1154}11551156return "Invalid";1157}11581159static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = {1160.name = STR_XGMI_PLPD_POLICY,1161.get_desc = smu_xgmi_plpd_policy_get_desc,1162};11631164void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)1165{1166policy->desc = &xgmi_plpd_policy_desc;1167}11681169void smu_cmn_get_backend_workload_mask(struct smu_context *smu,1170u32 workload_mask,1171u32 *backend_workload_mask)1172{1173int workload_type;1174u32 profile_mode;11751176*backend_workload_mask = 0;11771178for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {1179if (!(workload_mask & (1 << profile_mode)))1180continue;11811182/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */1183workload_type = smu_cmn_to_asic_specific_index(smu,1184CMN2ASIC_MAPPING_WORKLOAD,1185profile_mode);11861187if (workload_type < 0)1188continue;11891190*backend_workload_mask |= 1 << workload_type;1191}1192}119311941195