Path: blob/master/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
53444 views
/*1* Copyright 2020 Advanced Micro Devices, Inc.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included in11* all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR17* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,18* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR19* OTHER DEALINGS IN THE SOFTWARE.20*/2122#ifndef __SMU_CMN_H__23#define __SMU_CMN_H__2425#include "amdgpu_smu.h"2627extern const struct smu_msg_ops smu_msg_v1_ops;2829int smu_msg_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us);30int smu_msg_send_async_locked(struct smu_msg_ctl *ctl,31enum smu_message_type msg, u32 param);3233#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)3435#define FDO_PWM_MODE_STATIC 136#define FDO_PWM_MODE_STATIC_RPM 53738#define SMU_IH_INTERRUPT_ID_TO_DRIVER 0xFE39#define SMU_IH_INTERRUPT_CONTEXT_ID_BACO 0x240#define SMU_IH_INTERRUPT_CONTEXT_ID_AC 0x341#define SMU_IH_INTERRUPT_CONTEXT_ID_DC 0x442#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x543#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x644#define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x745#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x846#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x94748#define SMU_IGNORE_IF_VERSION 0xFFFFFFFF4950#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \51do { \52typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \53struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \54struct metrics_table_header *header = \55(struct metrics_table_header *)tmp; \56memset(header, 0xFF, sizeof(*tmp)); \57header->format_revision = frev; \58header->content_revision = crev; \59header->structure_size = sizeof(*tmp); \60} while (0)6162#define smu_cmn_init_partition_metrics(ptr, fr, cr) \63do { \64typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \65(ptr)); \66struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \67struct metrics_table_header *header = \68(struct metrics_table_header *)tmp; \69memset(header, 0xFF, sizeof(*tmp)); \70header->format_revision = fr; \71header->content_revision = cr; \72header->structure_size = sizeof(*tmp); \73} while (0)7475#define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \76do { \77typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \78(ptr)); \79struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \80struct metrics_table_header *header = \81(struct metrics_table_header *)tmp; \82memset(header, 0xFF, sizeof(*tmp)); \83header->format_revision = fr; \84header->content_revision = cr; \85header->structure_size = sizeof(*tmp); \86} while (0)8788#define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \89do { \90typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \91(ptr)); \92struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \93struct metrics_table_header *header = \94(struct metrics_table_header *)tmp; \95memset(header, 0xFF, sizeof(*tmp)); \96header->format_revision = fr; \97header->content_revision = cr; \98header->structure_size = sizeof(*tmp); \99} while (0)100101#define SMU_DPM_PCIE_GEN_IDX(gen) smu_cmn_dpm_pcie_gen_idx((gen))102#define SMU_DPM_PCIE_WIDTH_IDX(width) smu_cmn_dpm_pcie_width_idx((width))103104extern const int link_speed[];105106/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */107static inline int pcie_gen_to_speed(uint32_t gen)108{109return ((gen == 0) ? link_speed[0] : link_speed[gen - 1]);110}111112int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,113enum smu_message_type msg,114uint32_t param,115uint32_t *read_arg);116117int smu_cmn_send_smc_msg(struct smu_context *smu,118enum smu_message_type msg,119uint32_t *read_arg);120121int smu_cmn_send_debug_smc_msg(struct smu_context *smu,122uint32_t msg);123124int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,125uint32_t msg, uint32_t param);126127int smu_cmn_wait_for_response(struct smu_context *smu);128129int smu_cmn_to_asic_specific_index(struct smu_context *smu,130enum smu_cmn2asic_mapping_type type,131uint32_t index);132133int smu_cmn_feature_is_supported(struct smu_context *smu,134enum smu_feature_mask mask);135136int smu_cmn_feature_is_enabled(struct smu_context *smu,137enum smu_feature_mask mask);138139bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,140enum smu_clk_type clk_type);141142int smu_cmn_get_enabled_mask(struct smu_context *smu,143uint64_t *feature_mask);144145uint64_t smu_cmn_get_indep_throttler_status(146const unsigned long dep_status,147const uint8_t *throttler_map);148149int smu_cmn_feature_update_enable_state(struct smu_context *smu,150uint64_t feature_mask,151bool enabled);152153int smu_cmn_feature_set_enabled(struct smu_context *smu,154enum smu_feature_mask mask,155bool enable);156157size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,158char *buf);159160int smu_cmn_set_pp_feature_mask(struct smu_context *smu,161uint64_t new_mask);162163int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,164enum smu_feature_mask mask);165166int smu_cmn_get_smc_version(struct smu_context *smu,167uint32_t *if_version,168uint32_t *smu_version);169170int smu_cmn_update_table(struct smu_context *smu,171enum smu_table_id table_index,172int argument,173void *table_data,174bool drv2smu);175176int smu_cmn_write_watermarks_table(struct smu_context *smu);177178int smu_cmn_write_pptable(struct smu_context *smu);179180int smu_cmn_get_metrics_table(struct smu_context *smu,181void *metrics_table,182bool bypass_cache);183184int smu_cmn_get_combo_pptable(struct smu_context *smu);185186int smu_cmn_set_mp1_state(struct smu_context *smu,187enum pp_mp1_state mp1_state);188189bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);190void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);191void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);192193void smu_cmn_get_backend_workload_mask(struct smu_context *smu,194u32 workload_mask,195u32 *backend_workload_mask);196197int smu_cmn_print_dpm_clk_levels(struct smu_context *smu,198struct smu_dpm_table *dpm_table,199uint32_t cur_clk,200char *buf, int *offset);201202int smu_cmn_print_pcie_levels(struct smu_context *smu,203struct smu_pcie_table *pcie_table,204uint32_t cur_gen, uint32_t cur_lane,205char *buf, int *offset);206207int smu_cmn_dpm_pcie_gen_idx(int gen);208int smu_cmn_dpm_pcie_width_idx(int width);209210/*SMU gpu metrics */211212/* Attribute ID mapping */213#define SMU_MATTR(X) AMDGPU_METRICS_ATTR_ID_##X214/* Type ID mapping */215#define SMU_MTYPE(X) AMDGPU_METRICS_TYPE_##X216/* Unit ID mapping */217#define SMU_MUNIT(X) AMDGPU_METRICS_UNIT_##X218219/* Map TYPEID to C type */220#define SMU_CTYPE(TYPEID) SMU_CTYPE_##TYPEID221222#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U8 u8223#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S8 s8224#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U16 u16225#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S16 s16226#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U32 u32227#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S32 s32228#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U64 u64229#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S64 s64230231/* struct members */232#define SMU_METRICS_SCALAR(ID, UNIT, TYPEID, NAME) \233u64 NAME##_ftype; \234SMU_CTYPE(TYPEID) NAME235236#define SMU_METRICS_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \237u64 NAME##_ftype; \238SMU_CTYPE(TYPEID) NAME[SIZE]239240/* Init functions for scalar/array fields - init to 0xFFs */241#define SMU_METRICS_INIT_SCALAR(ID, UNIT, TYPEID, NAME) \242do { \243obj->NAME##_ftype = \244AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, 1); \245obj->NAME = (SMU_CTYPE(TYPEID)) ~0; \246count++; \247} while (0)248249#define SMU_METRICS_INIT_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \250do { \251obj->NAME##_ftype = \252AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, SIZE); \253memset(obj->NAME, 0xFF, sizeof(obj->NAME)); \254count++; \255} while (0)256257/* Declare Metrics Class and Template object */258#define DECLARE_SMU_METRICS_CLASS(CLASSNAME, SMU_METRICS_FIELD_LIST) \259struct __packed CLASSNAME { \260struct metrics_table_header header; \261int attr_count; \262SMU_METRICS_FIELD_LIST(SMU_METRICS_SCALAR, SMU_METRICS_ARRAY); \263}; \264static inline void CLASSNAME##_init(struct CLASSNAME *obj, \265uint8_t frev, uint8_t crev) \266{ \267int count = 0; \268memset(obj, 0xFF, sizeof(*obj)); \269obj->header.format_revision = frev; \270obj->header.content_revision = crev; \271obj->header.structure_size = sizeof(*obj); \272SMU_METRICS_FIELD_LIST(SMU_METRICS_INIT_SCALAR, \273SMU_METRICS_INIT_ARRAY) \274obj->attr_count = count; \275}276277#endif278#endif279280281