Path: blob/master/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
53415 views
/*1* Copyright 2020 Advanced Micro Devices, Inc.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included in11* all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR17* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,18* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR19* OTHER DEALINGS IN THE SOFTWARE.20*/2122#define SWSMU_CODE_LAYER_L42324#include "amdgpu.h"25#include "amdgpu_smu.h"26#include "smu_cmn.h"27#include "soc15_common.h"2829/*30* DO NOT use these for err/warn/info/debug messages.31* Use dev_err, dev_warn, dev_info and dev_dbg instead.32* They are more MGPU friendly.33*/34#undef pr_err35#undef pr_warn36#undef pr_info37#undef pr_debug3839#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL4041const int link_speed[] = {25, 50, 80, 160, 320, 640};4243#undef __SMU_DUMMY_MAP44#define __SMU_DUMMY_MAP(type) #type45static const char * const __smu_message_names[] = {46SMU_MESSAGE_TYPES47};4849#define smu_cmn_call_asic_func(intf, smu, args...) \50((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \51(smu)->ppt_funcs->intf(smu, ##args) : \52-ENOTSUPP) : \53-EINVAL)5455#define SMU_MSG_V1_DEFAULT_RATELIMIT_INTERVAL (5 * HZ)56#define SMU_MSG_V1_DEFAULT_RATELIMIT_BURST 105758static const char *smu_get_message_name(struct smu_context *smu,59enum smu_message_type type)60{61if (type >= SMU_MSG_MAX_COUNT)62return "unknown smu message";6364return __smu_message_names[type];65}6667/* Redefine the SMU error codes here.68*69* Note that these definitions are redundant and should be removed70* when the SMU has exported a unified header file containing these71* macros, which header file we can just include and use the SMU's72* macros. At the moment, these error codes are defined by the SMU73* per-ASIC unfortunately, yet we're a one driver for all ASICs.74*/75#define SMU_RESP_NONE 076#define SMU_RESP_OK 177#define SMU_RESP_CMD_FAIL 0xFF78#define SMU_RESP_CMD_UNKNOWN 0xFE79#define SMU_RESP_CMD_BAD_PREREQ 0xFD80#define SMU_RESP_BUSY_OTHER 0xFC81#define SMU_RESP_DEBUG_END 0xFB8283#define SMU_RESP_UNEXP (~0U)8485static int smu_msg_v1_send_debug_msg(struct smu_msg_ctl *ctl, u32 msg, u32 param)86{87struct amdgpu_device *adev = ctl->smu->adev;88struct smu_msg_config *cfg = &ctl->config;8990if (!(ctl->flags & SMU_MSG_CTL_DEBUG_MAILBOX))91return -EOPNOTSUPP;9293mutex_lock(&ctl->lock);9495WREG32(cfg->debug_param_reg, param);96WREG32(cfg->debug_msg_reg, msg);97WREG32(cfg->debug_resp_reg, 0);9899mutex_unlock(&ctl->lock);100101return 0;102}103104static int __smu_cmn_send_debug_msg(struct smu_msg_ctl *ctl,105u32 msg,106u32 param)107{108if (!ctl->ops || !ctl->ops->send_debug_msg)109return -EOPNOTSUPP;110111return ctl->ops->send_debug_msg(ctl, msg, param);112}113114/**115* smu_cmn_wait_for_response -- wait for response from the SMU116* @smu: pointer to an SMU context117*118* Wait for status from the SMU.119*120* Return 0 on success, -errno on error, indicating the execution121* status and result of the message being waited for. See122* smu_msg_v1_decode_response() for details of the -errno.123*/124int smu_cmn_wait_for_response(struct smu_context *smu)125{126return smu_msg_wait_response(&smu->msg_ctl, 0);127}128129/**130* smu_cmn_send_smc_msg_with_param -- send a message with parameter131* @smu: pointer to an SMU context132* @msg: message to send133* @param: parameter to send to the SMU134* @read_arg: pointer to u32 to return a value from the SMU back135* to the caller136*137* Send the message @msg with parameter @param to the SMU, wait for138* completion of the command, and return back a value from the SMU in139* @read_arg pointer.140*141* Return 0 on success, -errno when a problem is encountered sending142* message or receiving reply. If there is a PCI bus recovery or143* the destination is a virtual GPU which does not allow this message144* type, the message is simply dropped and success is also returned.145* See smu_msg_v1_decode_response() for details of the -errno.146*147* If we weren't able to send the message to the SMU, we also print148* the error to the standard log.149*150* Command completion status is printed only if the -errno is151* -EREMOTEIO, indicating that the SMU returned back an152* undefined/unknown/unspecified result. All other cases are153* well-defined, not printed, but instead given back to the client to154* decide what further to do.155*156* The return value, @read_arg is read back regardless, to give back157* more information to the client, which on error would most likely be158* @param, but we can't assume that. This also eliminates more159* conditionals.160*/161int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,162enum smu_message_type msg,163uint32_t param,164uint32_t *read_arg)165{166struct smu_msg_ctl *ctl = &smu->msg_ctl;167struct smu_msg_args args = {168.msg = msg,169.args[0] = param,170.num_args = 1,171.num_out_args = read_arg ? 1 : 0,172.flags = 0,173.timeout = 0,174};175int ret;176177ret = ctl->ops->send_msg(ctl, &args);178179if (read_arg)180*read_arg = args.out_args[0];181182return ret;183}184185int smu_cmn_send_smc_msg(struct smu_context *smu,186enum smu_message_type msg,187uint32_t *read_arg)188{189return smu_cmn_send_smc_msg_with_param(smu,190msg,1910,192read_arg);193}194195int smu_cmn_send_debug_smc_msg(struct smu_context *smu,196uint32_t msg)197{198return __smu_cmn_send_debug_msg(&smu->msg_ctl, msg, 0);199}200201int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,202uint32_t msg, uint32_t param)203{204return __smu_cmn_send_debug_msg(&smu->msg_ctl, msg, param);205}206207static int smu_msg_v1_decode_response(u32 resp)208{209int res;210211switch (resp) {212case SMU_RESP_NONE:213/* The SMU is busy--still executing your command.214*/215res = -ETIME;216break;217case SMU_RESP_OK:218res = 0;219break;220case SMU_RESP_CMD_FAIL:221/* Command completed successfully, but the command222* status was failure.223*/224res = -EIO;225break;226case SMU_RESP_CMD_UNKNOWN:227/* Unknown command--ignored by the SMU.228*/229res = -EOPNOTSUPP;230break;231case SMU_RESP_CMD_BAD_PREREQ:232/* Valid command--bad prerequisites.233*/234res = -EINVAL;235break;236case SMU_RESP_BUSY_OTHER:237/* The SMU is busy with other commands. The client238* should retry in 10 us.239*/240res = -EBUSY;241break;242default:243/* Unknown or debug response from the SMU.244*/245res = -EREMOTEIO;246break;247}248249return res;250}251252static u32 __smu_msg_v1_poll_stat(struct smu_msg_ctl *ctl, u32 timeout_us)253{254struct amdgpu_device *adev = ctl->smu->adev;255struct smu_msg_config *cfg = &ctl->config;256u32 timeout = timeout_us ? timeout_us : ctl->default_timeout;257u32 reg;258259for (; timeout > 0; timeout--) {260reg = RREG32(cfg->resp_reg);261if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)262break;263udelay(1);264}265266return reg;267}268269static void __smu_msg_v1_send(struct smu_msg_ctl *ctl, u16 index,270struct smu_msg_args *args)271{272struct amdgpu_device *adev = ctl->smu->adev;273struct smu_msg_config *cfg = &ctl->config;274int i;275276WREG32(cfg->resp_reg, 0);277for (i = 0; i < args->num_args; i++)278WREG32(cfg->arg_regs[i], args->args[i]);279WREG32(cfg->msg_reg, index);280}281282static void __smu_msg_v1_read_out_args(struct smu_msg_ctl *ctl,283struct smu_msg_args *args)284{285struct amdgpu_device *adev = ctl->smu->adev;286int i;287288for (i = 0; i < args->num_out_args; i++)289args->out_args[i] = RREG32(ctl->config.arg_regs[i]);290}291292static void __smu_msg_v1_print_err_limited(struct smu_msg_ctl *ctl,293struct smu_msg_args *args,294char *err_msg)295{296static DEFINE_RATELIMIT_STATE(_rs,297SMU_MSG_V1_DEFAULT_RATELIMIT_INTERVAL,298SMU_MSG_V1_DEFAULT_RATELIMIT_BURST);299struct smu_context *smu = ctl->smu;300struct amdgpu_device *adev = smu->adev;301302if (__ratelimit(&_rs)) {303u32 in[SMU_MSG_MAX_ARGS];304int i;305306dev_err(adev->dev, "%s msg_reg: %x resp_reg: %x", err_msg,307RREG32(ctl->config.msg_reg),308RREG32(ctl->config.resp_reg));309if (args->num_args > 0) {310for (i = 0; i < args->num_args; i++)311in[i] = RREG32(ctl->config.arg_regs[i]);312print_hex_dump(KERN_ERR, "in params:", DUMP_PREFIX_NONE,31316, 4, in, args->num_args * sizeof(u32),314false);315}316}317}318319static void __smu_msg_v1_print_error(struct smu_msg_ctl *ctl,320u32 resp,321struct smu_msg_args *args)322{323struct smu_context *smu = ctl->smu;324struct amdgpu_device *adev = smu->adev;325int index = ctl->message_map[args->msg].map_to;326327switch (resp) {328case SMU_RESP_NONE:329__smu_msg_v1_print_err_limited(ctl, args, "SMU: No response");330break;331case SMU_RESP_OK:332break;333case SMU_RESP_CMD_FAIL:334break;335case SMU_RESP_CMD_UNKNOWN:336__smu_msg_v1_print_err_limited(ctl, args,337"SMU: unknown command");338break;339case SMU_RESP_CMD_BAD_PREREQ:340__smu_msg_v1_print_err_limited(341ctl, args, "SMU: valid command, bad prerequisites");342break;343case SMU_RESP_BUSY_OTHER:344if (args->msg != SMU_MSG_GetBadPageCount)345__smu_msg_v1_print_err_limited(ctl, args,346"SMU: I'm very busy");347break;348case SMU_RESP_DEBUG_END:349__smu_msg_v1_print_err_limited(ctl, args, "SMU: Debug Err");350break;351case SMU_RESP_UNEXP:352if (amdgpu_device_bus_status_check(adev)) {353dev_err(adev->dev,354"SMU: bus error for message: %s(%d) response:0x%08X ",355smu_get_message_name(smu, args->msg), index,356resp);357if (args->num_args > 0)358print_hex_dump(KERN_ERR,359"in params:", DUMP_PREFIX_NONE,36016, 4, args->args,361args->num_args * sizeof(u32),362false);363}364break;365default:366__smu_msg_v1_print_err_limited(ctl, args,367"SMU: unknown response");368break;369}370}371372static int __smu_msg_v1_ras_filter(struct smu_msg_ctl *ctl,373enum smu_message_type msg, u32 msg_flags,374bool *skip_pre_poll)375{376struct smu_context *smu = ctl->smu;377struct amdgpu_device *adev = smu->adev;378bool fed_status;379u32 reg;380381if (!(smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI))382return 0;383384fed_status = amdgpu_ras_get_fed_status(adev);385386/* Block non-RAS-priority messages during RAS error */387if (fed_status && !(msg_flags & SMU_MSG_RAS_PRI)) {388dev_dbg(adev->dev, "RAS error detected, skip sending %s",389smu_get_message_name(smu, msg));390return -EACCES;391}392393/* Skip pre-poll for priority messages or during RAS error */394if ((msg_flags & SMU_MSG_NO_PRECHECK) || fed_status) {395reg = RREG32(ctl->config.resp_reg);396dev_dbg(adev->dev,397"Sending priority message %s response status: %x",398smu_get_message_name(smu, msg), reg);399if (reg == 0)400*skip_pre_poll = true;401}402403return 0;404}405406/**407* smu_msg_proto_v1_send_msg - Complete V1 protocol with all filtering408* @ctl: Message control block409* @args: Message arguments410*411* Return: 0 on success, negative errno on failure412*/413static int smu_msg_v1_send_msg(struct smu_msg_ctl *ctl,414struct smu_msg_args *args)415{416struct smu_context *smu = ctl->smu;417struct amdgpu_device *adev = smu->adev;418const struct cmn2asic_msg_mapping *mapping;419u32 reg, msg_flags;420int ret, index;421bool skip_pre_poll = false;422bool lock_held = args->flags & SMU_MSG_FLAG_LOCK_HELD;423424/* Early exit if no HW access */425if (adev->no_hw_access)426return 0;427428/* Message index translation */429if (args->msg >= SMU_MSG_MAX_COUNT || !ctl->message_map)430return -EINVAL;431432if (args->num_args > ctl->config.num_arg_regs ||433args->num_out_args > ctl->config.num_arg_regs)434return -EINVAL;435436mapping = &ctl->message_map[args->msg];437if (!mapping->valid_mapping)438return -EINVAL;439440msg_flags = mapping->flags;441index = mapping->map_to;442443/* VF filter - skip messages not valid for VF */444if (amdgpu_sriov_vf(adev) && !(msg_flags & SMU_MSG_VF_FLAG))445return 0;446447if (!lock_held)448mutex_lock(&ctl->lock);449450/* RAS priority filter */451ret = __smu_msg_v1_ras_filter(ctl, args->msg, msg_flags,452&skip_pre_poll);453if (ret)454goto out;455456/* FW state checks */457if (smu->smc_fw_state == SMU_FW_HANG) {458dev_err(adev->dev,459"SMU is in hanged state, failed to send smu message!\n");460ret = -EREMOTEIO;461goto out;462} else if (smu->smc_fw_state == SMU_FW_INIT) {463skip_pre_poll = true;464smu->smc_fw_state = SMU_FW_RUNTIME;465}466467/* Pre-poll: ensure previous message completed */468if (!skip_pre_poll) {469reg = __smu_msg_v1_poll_stat(ctl, args->timeout);470ret = smu_msg_v1_decode_response(reg);471if (reg == SMU_RESP_NONE || ret == -EREMOTEIO) {472__smu_msg_v1_print_error(ctl, reg, args);473goto out;474}475}476477/* Send message */478__smu_msg_v1_send(ctl, (u16)index, args);479480/* Post-poll (skip if ASYNC) */481if (args->flags & SMU_MSG_FLAG_ASYNC) {482ret = 0;483goto out;484}485486reg = __smu_msg_v1_poll_stat(ctl, args->timeout);487ret = smu_msg_v1_decode_response(reg);488489/* FW state update on fatal error */490if (ret == -EREMOTEIO) {491smu->smc_fw_state = SMU_FW_HANG;492__smu_msg_v1_print_error(ctl, reg, args);493} else if (ret != 0) {494__smu_msg_v1_print_error(ctl, reg, args);495}496497/* Read output args */498if (ret == 0 && args->num_out_args > 0) {499__smu_msg_v1_read_out_args(ctl, args);500dev_dbg(adev->dev, "smu send message: %s(%d) resp : 0x%08x",501smu_get_message_name(smu, args->msg), index, reg);502if (args->num_args > 0)503print_hex_dump_debug("in params:", DUMP_PREFIX_NONE, 16,5044, args->args,505args->num_args * sizeof(u32),506false);507print_hex_dump_debug("out params:", DUMP_PREFIX_NONE, 16, 4,508args->out_args,509args->num_out_args * sizeof(u32), false);510} else {511dev_dbg(adev->dev, "smu send message: %s(%d), resp: 0x%08x\n",512smu_get_message_name(smu, args->msg), index, reg);513if (args->num_args > 0)514print_hex_dump_debug("in params:", DUMP_PREFIX_NONE, 16,5154, args->args,516args->num_args * sizeof(u32),517false);518}519520out:521/* Debug halt on error */522if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&523ret) {524amdgpu_device_halt(adev);525WARN_ON(1);526}527528if (!lock_held)529mutex_unlock(&ctl->lock);530return ret;531}532533static int smu_msg_v1_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us)534{535struct smu_context *smu = ctl->smu;536struct amdgpu_device *adev = smu->adev;537u32 reg;538int ret;539540reg = __smu_msg_v1_poll_stat(ctl, timeout_us);541ret = smu_msg_v1_decode_response(reg);542543if (ret == -EREMOTEIO)544smu->smc_fw_state = SMU_FW_HANG;545546if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&547ret && (ret != -ETIME)) {548amdgpu_device_halt(adev);549WARN_ON(1);550}551552return ret;553}554555const struct smu_msg_ops smu_msg_v1_ops = {556.send_msg = smu_msg_v1_send_msg,557.wait_response = smu_msg_v1_wait_response,558.decode_response = smu_msg_v1_decode_response,559.send_debug_msg = smu_msg_v1_send_debug_msg,560};561562int smu_msg_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us)563{564return ctl->ops->wait_response(ctl, timeout_us);565}566567/**568* smu_msg_send_async_locked - Send message asynchronously, caller holds lock569* @ctl: Message control block570* @msg: Message type571* @param: Message parameter572*573* Send an SMU message without waiting for response. Caller must hold ctl->lock574* and call smu_msg_wait_response() later to get the result.575*576* Return: 0 on success, negative errno on failure577*/578int smu_msg_send_async_locked(struct smu_msg_ctl *ctl,579enum smu_message_type msg, u32 param)580{581struct smu_msg_args args = {582.msg = msg,583.args[0] = param,584.num_args = 1,585.num_out_args = 0,586.flags = SMU_MSG_FLAG_ASYNC | SMU_MSG_FLAG_LOCK_HELD,587.timeout = 0,588};589590return ctl->ops->send_msg(ctl, &args);591}592593int smu_cmn_to_asic_specific_index(struct smu_context *smu,594enum smu_cmn2asic_mapping_type type,595uint32_t index)596{597struct cmn2asic_msg_mapping msg_mapping;598struct cmn2asic_mapping mapping;599600switch (type) {601case CMN2ASIC_MAPPING_MSG:602if (index >= SMU_MSG_MAX_COUNT ||603!smu->msg_ctl.message_map)604return -EINVAL;605606msg_mapping = smu->msg_ctl.message_map[index];607if (!msg_mapping.valid_mapping)608return -EINVAL;609610if (amdgpu_sriov_vf(smu->adev) &&611!(msg_mapping.flags & SMU_MSG_VF_FLAG))612return -EACCES;613614return msg_mapping.map_to;615616case CMN2ASIC_MAPPING_CLK:617if (index >= SMU_CLK_COUNT ||618!smu->clock_map)619return -EINVAL;620621mapping = smu->clock_map[index];622if (!mapping.valid_mapping)623return -EINVAL;624625return mapping.map_to;626627case CMN2ASIC_MAPPING_FEATURE:628if (index >= SMU_FEATURE_COUNT ||629!smu->feature_map)630return -EINVAL;631632mapping = smu->feature_map[index];633if (!mapping.valid_mapping)634return -EINVAL;635636return mapping.map_to;637638case CMN2ASIC_MAPPING_TABLE:639if (index >= SMU_TABLE_COUNT ||640!smu->table_map)641return -EINVAL;642643mapping = smu->table_map[index];644if (!mapping.valid_mapping)645return -EINVAL;646647return mapping.map_to;648649case CMN2ASIC_MAPPING_PWR:650if (index >= SMU_POWER_SOURCE_COUNT ||651!smu->pwr_src_map)652return -EINVAL;653654mapping = smu->pwr_src_map[index];655if (!mapping.valid_mapping)656return -EINVAL;657658return mapping.map_to;659660case CMN2ASIC_MAPPING_WORKLOAD:661if (index >= PP_SMC_POWER_PROFILE_COUNT ||662!smu->workload_map)663return -EINVAL;664665mapping = smu->workload_map[index];666if (!mapping.valid_mapping)667return -ENOTSUPP;668669return mapping.map_to;670671default:672return -EINVAL;673}674}675676int smu_cmn_feature_is_supported(struct smu_context *smu,677enum smu_feature_mask mask)678{679int feature_id;680681feature_id = smu_cmn_to_asic_specific_index(smu,682CMN2ASIC_MAPPING_FEATURE,683mask);684if (feature_id < 0)685return 0;686687return smu_feature_list_is_set(smu, SMU_FEATURE_LIST_SUPPORTED,688feature_id);689}690691static int __smu_get_enabled_features(struct smu_context *smu,692uint64_t *enabled_features)693{694return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);695}696697int smu_cmn_feature_is_enabled(struct smu_context *smu,698enum smu_feature_mask mask)699{700struct amdgpu_device *adev = smu->adev;701uint64_t enabled_features;702int feature_id;703704if (__smu_get_enabled_features(smu, &enabled_features)) {705dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");706return 0;707}708709/*710* For Renoir and Cyan Skillfish, they are assumed to have all features711* enabled. Also considering they have no feature_map available, the712* check here can avoid unwanted feature_map check below.713*/714if (enabled_features == ULLONG_MAX)715return 1;716717feature_id = smu_cmn_to_asic_specific_index(smu,718CMN2ASIC_MAPPING_FEATURE,719mask);720if (feature_id < 0)721return 0;722723return test_bit(feature_id, (unsigned long *)&enabled_features);724}725726bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,727enum smu_clk_type clk_type)728{729enum smu_feature_mask feature_id = 0;730731switch (clk_type) {732case SMU_MCLK:733case SMU_UCLK:734feature_id = SMU_FEATURE_DPM_UCLK_BIT;735break;736case SMU_GFXCLK:737case SMU_SCLK:738feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;739break;740case SMU_SOCCLK:741feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;742break;743case SMU_VCLK:744case SMU_VCLK1:745feature_id = SMU_FEATURE_DPM_VCLK_BIT;746break;747case SMU_DCLK:748case SMU_DCLK1:749feature_id = SMU_FEATURE_DPM_DCLK_BIT;750break;751case SMU_FCLK:752feature_id = SMU_FEATURE_DPM_FCLK_BIT;753break;754default:755return true;756}757758if (!smu_cmn_feature_is_enabled(smu, feature_id))759return false;760761return true;762}763764int smu_cmn_get_enabled_mask(struct smu_context *smu,765uint64_t *feature_mask)766{767uint32_t *feature_mask_high;768uint32_t *feature_mask_low;769int ret = 0, index = 0;770771if (!feature_mask)772return -EINVAL;773774feature_mask_low = &((uint32_t *)feature_mask)[0];775feature_mask_high = &((uint32_t *)feature_mask)[1];776777index = smu_cmn_to_asic_specific_index(smu,778CMN2ASIC_MAPPING_MSG,779SMU_MSG_GetEnabledSmuFeatures);780if (index > 0) {781ret = smu_cmn_send_smc_msg_with_param(smu,782SMU_MSG_GetEnabledSmuFeatures,7830,784feature_mask_low);785if (ret)786return ret;787788ret = smu_cmn_send_smc_msg_with_param(smu,789SMU_MSG_GetEnabledSmuFeatures,7901,791feature_mask_high);792} else {793ret = smu_cmn_send_smc_msg(smu,794SMU_MSG_GetEnabledSmuFeaturesHigh,795feature_mask_high);796if (ret)797return ret;798799ret = smu_cmn_send_smc_msg(smu,800SMU_MSG_GetEnabledSmuFeaturesLow,801feature_mask_low);802}803804return ret;805}806807uint64_t smu_cmn_get_indep_throttler_status(808const unsigned long dep_status,809const uint8_t *throttler_map)810{811uint64_t indep_status = 0;812uint8_t dep_bit = 0;813814for_each_set_bit(dep_bit, &dep_status, 32)815indep_status |= 1ULL << throttler_map[dep_bit];816817return indep_status;818}819820int smu_cmn_feature_update_enable_state(struct smu_context *smu,821uint64_t feature_mask,822bool enabled)823{824int ret = 0;825826if (enabled) {827ret = smu_cmn_send_smc_msg_with_param(smu,828SMU_MSG_EnableSmuFeaturesLow,829lower_32_bits(feature_mask),830NULL);831if (ret)832return ret;833ret = smu_cmn_send_smc_msg_with_param(smu,834SMU_MSG_EnableSmuFeaturesHigh,835upper_32_bits(feature_mask),836NULL);837} else {838ret = smu_cmn_send_smc_msg_with_param(smu,839SMU_MSG_DisableSmuFeaturesLow,840lower_32_bits(feature_mask),841NULL);842if (ret)843return ret;844ret = smu_cmn_send_smc_msg_with_param(smu,845SMU_MSG_DisableSmuFeaturesHigh,846upper_32_bits(feature_mask),847NULL);848}849850return ret;851}852853int smu_cmn_feature_set_enabled(struct smu_context *smu,854enum smu_feature_mask mask,855bool enable)856{857int feature_id;858859feature_id = smu_cmn_to_asic_specific_index(smu,860CMN2ASIC_MAPPING_FEATURE,861mask);862if (feature_id < 0)863return -EINVAL;864865return smu_cmn_feature_update_enable_state(smu,8661ULL << feature_id,867enable);868}869870#undef __SMU_DUMMY_MAP871#define __SMU_DUMMY_MAP(fea) #fea872static const char *__smu_feature_names[] = {873SMU_FEATURE_MASKS874};875876static const char *smu_get_feature_name(struct smu_context *smu,877enum smu_feature_mask feature)878{879if (feature >= SMU_FEATURE_COUNT)880return "unknown smu feature";881return __smu_feature_names[feature];882}883884size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,885char *buf)886{887int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];888uint64_t feature_mask;889int i, feature_index;890uint32_t count = 0;891size_t size = 0;892893if (__smu_get_enabled_features(smu, &feature_mask))894return 0;895896size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",897upper_32_bits(feature_mask), lower_32_bits(feature_mask));898899memset(sort_feature, -1, sizeof(sort_feature));900901for (i = 0; i < SMU_FEATURE_COUNT; i++) {902feature_index = smu_cmn_to_asic_specific_index(smu,903CMN2ASIC_MAPPING_FEATURE,904i);905if (feature_index < 0)906continue;907908sort_feature[feature_index] = i;909}910911size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",912"No", "Feature", "Bit", "State");913914for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {915if (sort_feature[feature_index] < 0)916continue;917918size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",919count++,920smu_get_feature_name(smu, sort_feature[feature_index]),921feature_index,922!!test_bit(feature_index, (unsigned long *)&feature_mask) ?923"enabled" : "disabled");924}925926return size;927}928929int smu_cmn_set_pp_feature_mask(struct smu_context *smu,930uint64_t new_mask)931{932int ret = 0;933uint64_t feature_mask;934uint64_t feature_2_enabled = 0;935uint64_t feature_2_disabled = 0;936937ret = __smu_get_enabled_features(smu, &feature_mask);938if (ret)939return ret;940941feature_2_enabled = ~feature_mask & new_mask;942feature_2_disabled = feature_mask & ~new_mask;943944if (feature_2_enabled) {945ret = smu_cmn_feature_update_enable_state(smu,946feature_2_enabled,947true);948if (ret)949return ret;950}951if (feature_2_disabled) {952ret = smu_cmn_feature_update_enable_state(smu,953feature_2_disabled,954false);955if (ret)956return ret;957}958959return ret;960}961962/**963* smu_cmn_disable_all_features_with_exception - disable all dpm features964* except this specified by965* @mask966*967* @smu: smu_context pointer968* @mask: the dpm feature which should not be disabled969* SMU_FEATURE_COUNT: no exception, all dpm features970* to disable971*972* Returns:973* 0 on success or a negative error code on failure.974*/975int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,976enum smu_feature_mask mask)977{978uint64_t features_to_disable = U64_MAX;979int skipped_feature_id;980981if (mask != SMU_FEATURE_COUNT) {982skipped_feature_id = smu_cmn_to_asic_specific_index(smu,983CMN2ASIC_MAPPING_FEATURE,984mask);985if (skipped_feature_id < 0)986return -EINVAL;987988features_to_disable &= ~(1ULL << skipped_feature_id);989}990991return smu_cmn_feature_update_enable_state(smu,992features_to_disable,9930);994}995996int smu_cmn_get_smc_version(struct smu_context *smu,997uint32_t *if_version,998uint32_t *smu_version)999{1000int ret = 0;10011002if (!if_version && !smu_version)1003return -EINVAL;10041005if (smu->smc_fw_if_version && smu->smc_fw_version)1006{1007if (if_version)1008*if_version = smu->smc_fw_if_version;10091010if (smu_version)1011*smu_version = smu->smc_fw_version;10121013return 0;1014}10151016if (if_version) {1017ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);1018if (ret)1019return ret;10201021smu->smc_fw_if_version = *if_version;1022}10231024if (smu_version) {1025ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);1026if (ret)1027return ret;10281029smu->smc_fw_version = *smu_version;1030}10311032return ret;1033}10341035int smu_cmn_update_table(struct smu_context *smu,1036enum smu_table_id table_index,1037int argument,1038void *table_data,1039bool drv2smu)1040{1041struct smu_table_context *smu_table = &smu->smu_table;1042struct amdgpu_device *adev = smu->adev;1043struct smu_table *table = &smu_table->driver_table;1044int table_id = smu_cmn_to_asic_specific_index(smu,1045CMN2ASIC_MAPPING_TABLE,1046table_index);1047uint32_t table_size;1048int ret = 0;1049if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)1050return -EINVAL;10511052table_size = smu_table->tables[table_index].size;10531054if (drv2smu) {1055memcpy(table->cpu_addr, table_data, table_size);1056/*1057* Flush hdp cache: to guard the content seen by1058* GPU is consitent with CPU.1059*/1060amdgpu_hdp_flush(adev, NULL);1061}10621063ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?1064SMU_MSG_TransferTableDram2Smu :1065SMU_MSG_TransferTableSmu2Dram,1066table_id | ((argument & 0xFFFF) << 16),1067NULL);1068if (ret)1069return ret;10701071if (!drv2smu) {1072amdgpu_hdp_invalidate(adev, NULL);1073memcpy(table_data, table->cpu_addr, table_size);1074}10751076return 0;1077}10781079int smu_cmn_write_watermarks_table(struct smu_context *smu)1080{1081void *watermarks_table = smu->smu_table.watermarks_table;10821083if (!watermarks_table)1084return -EINVAL;10851086return smu_cmn_update_table(smu,1087SMU_TABLE_WATERMARKS,10880,1089watermarks_table,1090true);1091}10921093int smu_cmn_write_pptable(struct smu_context *smu)1094{1095void *pptable = smu->smu_table.driver_pptable;10961097return smu_cmn_update_table(smu,1098SMU_TABLE_PPTABLE,10990,1100pptable,1101true);1102}11031104int smu_cmn_get_metrics_table(struct smu_context *smu,1105void *metrics_table,1106bool bypass_cache)1107{1108struct smu_table_context *smu_table = &smu->smu_table;1109uint32_t table_size =1110smu_table->tables[SMU_TABLE_SMU_METRICS].size;1111int ret = 0;11121113if (bypass_cache ||1114!smu_table->metrics_time ||1115time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {1116ret = smu_cmn_update_table(smu,1117SMU_TABLE_SMU_METRICS,11180,1119smu_table->metrics_table,1120false);1121if (ret) {1122dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");1123return ret;1124}1125smu_table->metrics_time = jiffies;1126}11271128if (metrics_table)1129memcpy(metrics_table, smu_table->metrics_table, table_size);11301131return 0;1132}11331134int smu_cmn_get_combo_pptable(struct smu_context *smu)1135{1136void *pptable = smu->smu_table.combo_pptable;11371138return smu_cmn_update_table(smu,1139SMU_TABLE_COMBO_PPTABLE,11400,1141pptable,1142false);1143}11441145int smu_cmn_set_mp1_state(struct smu_context *smu,1146enum pp_mp1_state mp1_state)1147{1148enum smu_message_type msg;1149int ret;11501151switch (mp1_state) {1152case PP_MP1_STATE_SHUTDOWN:1153msg = SMU_MSG_PrepareMp1ForShutdown;1154break;1155case PP_MP1_STATE_UNLOAD:1156msg = SMU_MSG_PrepareMp1ForUnload;1157break;1158case PP_MP1_STATE_RESET:1159msg = SMU_MSG_PrepareMp1ForReset;1160break;1161case PP_MP1_STATE_NONE:1162default:1163return 0;1164}11651166ret = smu_cmn_send_smc_msg(smu, msg, NULL);1167if (ret)1168dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");11691170return ret;1171}11721173bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)1174{1175struct pci_dev *p = NULL;1176bool snd_driver_loaded;11771178/*1179* If the ASIC comes with no audio function, we always assume1180* it is "enabled".1181*/1182p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),1183adev->pdev->bus->number, 1);1184if (!p)1185return true;11861187snd_driver_loaded = pci_is_enabled(p) ? true : false;11881189pci_dev_put(p);11901191return snd_driver_loaded;1192}11931194static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level)1195{1196if (level < 0 || !(policy->level_mask & BIT(level)))1197return "Invalid";11981199switch (level) {1200case SOC_PSTATE_DEFAULT:1201return "soc_pstate_default";1202case SOC_PSTATE_0:1203return "soc_pstate_0";1204case SOC_PSTATE_1:1205return "soc_pstate_1";1206case SOC_PSTATE_2:1207return "soc_pstate_2";1208}12091210return "Invalid";1211}12121213static struct smu_dpm_policy_desc pstate_policy_desc = {1214.name = STR_SOC_PSTATE_POLICY,1215.get_desc = smu_soc_policy_get_desc,1216};12171218void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy)1219{1220policy->desc = &pstate_policy_desc;1221}12221223static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy,1224int level)1225{1226if (level < 0 || !(policy->level_mask & BIT(level)))1227return "Invalid";12281229switch (level) {1230case XGMI_PLPD_DISALLOW:1231return "plpd_disallow";1232case XGMI_PLPD_DEFAULT:1233return "plpd_default";1234case XGMI_PLPD_OPTIMIZED:1235return "plpd_optimized";1236}12371238return "Invalid";1239}12401241static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = {1242.name = STR_XGMI_PLPD_POLICY,1243.get_desc = smu_xgmi_plpd_policy_get_desc,1244};12451246void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)1247{1248policy->desc = &xgmi_plpd_policy_desc;1249}12501251void smu_cmn_get_backend_workload_mask(struct smu_context *smu,1252u32 workload_mask,1253u32 *backend_workload_mask)1254{1255int workload_type;1256u32 profile_mode;12571258*backend_workload_mask = 0;12591260for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {1261if (!(workload_mask & (1 << profile_mode)))1262continue;12631264/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */1265workload_type = smu_cmn_to_asic_specific_index(smu,1266CMN2ASIC_MAPPING_WORKLOAD,1267profile_mode);12681269if (workload_type < 0)1270continue;12711272*backend_workload_mask |= 1 << workload_type;1273}1274}12751276static inline bool smu_cmn_freqs_match(uint32_t freq1, uint32_t freq2)1277{1278/* Frequencies within 25 MHz are considered equal */1279return (abs((int)freq1 - (int)freq2) <= 25);1280}12811282int smu_cmn_print_dpm_clk_levels(struct smu_context *smu,1283struct smu_dpm_table *dpm_table,1284uint32_t cur_clk, char *buf, int *offset)1285{1286uint32_t min_clk, max_clk, level_index, count;1287uint32_t freq_values[3];1288int size, lvl, i;1289bool is_fine_grained;1290bool is_deep_sleep;1291bool freq_match;12921293if (!dpm_table || !buf)1294return -EINVAL;12951296level_index = 0;1297size = *offset;1298count = dpm_table->count;1299is_fine_grained = dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED;1300min_clk = SMU_DPM_TABLE_MIN(dpm_table);1301max_clk = SMU_DPM_TABLE_MAX(dpm_table);13021303/* Deep sleep - current clock < min_clock/2, TBD: cur_clk = 0 as GFXOFF */1304is_deep_sleep = cur_clk < min_clk / 2;1305if (is_deep_sleep) {1306size += sysfs_emit_at(buf, size, "S: %uMhz *\n", cur_clk);1307level_index = 1;1308}13091310if (!is_fine_grained) {1311for (i = 0; i < count; i++) {1312freq_match = !is_deep_sleep &&1313smu_cmn_freqs_match(1314cur_clk,1315dpm_table->dpm_levels[i].value);1316size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",1317level_index + i,1318dpm_table->dpm_levels[i].value,1319freq_match ? "*" : "");1320}1321} else {1322count = 2;1323freq_values[0] = min_clk;1324freq_values[1] = max_clk;13251326if (!is_deep_sleep) {1327if (smu_cmn_freqs_match(cur_clk, min_clk)) {1328lvl = 0;1329} else if (smu_cmn_freqs_match(cur_clk, max_clk)) {1330lvl = 1;1331} else {1332/* NOTE: use index '1' to show current clock value */1333lvl = 1;1334count = 3;1335freq_values[1] = cur_clk;1336freq_values[2] = max_clk;1337}1338}13391340for (i = 0; i < count; i++) {1341size += sysfs_emit_at(1342buf, size, "%d: %uMhz %s\n", level_index + i,1343freq_values[i],1344(!is_deep_sleep && i == lvl) ? "*" : "");1345}1346}13471348*offset = size;13491350return 0;1351}13521353int smu_cmn_print_pcie_levels(struct smu_context *smu,1354struct smu_pcie_table *pcie_table,1355uint32_t cur_gen, uint32_t cur_lane, char *buf,1356int *offset)1357{1358int size, i;13591360if (!pcie_table || !buf)1361return -EINVAL;13621363size = *offset;13641365for (i = 0; i < pcie_table->lclk_levels; i++) {1366size += sysfs_emit_at(1367buf, size, "%d: %s %s %dMhz %s\n", i,1368(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :1369(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :1370(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :1371(pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :1372(pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," :1373(pcie_table->pcie_gen[i] == 5) ? "64.0GT/s," :1374"",1375(pcie_table->pcie_lane[i] == 1) ? "x1" :1376(pcie_table->pcie_lane[i] == 2) ? "x2" :1377(pcie_table->pcie_lane[i] == 3) ? "x4" :1378(pcie_table->pcie_lane[i] == 4) ? "x8" :1379(pcie_table->pcie_lane[i] == 5) ? "x12" :1380(pcie_table->pcie_lane[i] == 6) ? "x16" :1381(pcie_table->pcie_lane[i] == 7) ? "x32" :1382"",1383pcie_table->lclk_freq[i],1384(cur_gen == pcie_table->pcie_gen[i]) &&1385(cur_lane == pcie_table->pcie_lane[i]) ?1386"*" :1387"");1388}13891390*offset = size;13911392return 0;1393}13941395int smu_cmn_dpm_pcie_gen_idx(int gen)1396{1397int ret;13981399switch (gen) {1400case 1 ... 5:1401ret = gen - 1;1402break;1403default:1404ret = -1;1405break;1406}14071408return ret;1409}14101411int smu_cmn_dpm_pcie_width_idx(int width)1412{1413int ret;14141415switch (width) {1416case 1:1417ret = 1;1418break;1419case 2:1420ret = 2;1421break;1422case 4:1423ret = 3;1424break;1425case 8:1426ret = 4;1427break;1428case 12:1429ret = 5;1430break;1431case 16:1432ret = 6;1433break;1434case 32:1435ret = 7;1436break;1437default:1438ret = -1;1439break;1440}14411442return ret;1443}144414451446