Path: blob/master/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
26535 views
/*1* Copyright 2013 Advanced Micro Devices, Inc.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included in11* all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR17* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,18* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR19* OTHER DEALINGS IN THE SOFTWARE.20*21*/2223#include "amdgpu.h"24#include "amdgpu_pm.h"25#include "cikd.h"26#include "atom.h"27#include "amdgpu_atombios.h"28#include "amdgpu_dpm.h"29#include "kv_dpm.h"30#include "gfx_v7_0.h"31#include <linux/seq_file.h>3233#include "smu/smu_7_0_0_d.h"34#include "smu/smu_7_0_0_sh_mask.h"3536#include "gca/gfx_7_2_d.h"37#include "gca/gfx_7_2_sh_mask.h"38#include "legacy_dpm.h"3940#define KV_MAX_DEEPSLEEP_DIVIDER_ID 541#define KV_MINIMUM_ENGINE_CLOCK 80042#define SMC_RAM_END 0x400004344static const struct amd_pm_funcs kv_dpm_funcs;4546static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);47static int kv_enable_nb_dpm(struct amdgpu_device *adev,48bool enable);49static void kv_init_graphics_levels(struct amdgpu_device *adev);50static int kv_calculate_ds_divider(struct amdgpu_device *adev);51static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);52static int kv_calculate_dpm_settings(struct amdgpu_device *adev);53static void kv_enable_new_levels(struct amdgpu_device *adev);54static void kv_program_nbps_index_settings(struct amdgpu_device *adev,55struct amdgpu_ps *new_rps);56static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);57static int kv_set_enabled_levels(struct amdgpu_device *adev);58static int kv_force_dpm_highest(struct amdgpu_device *adev);59static int kv_force_dpm_lowest(struct amdgpu_device *adev);60static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,61struct amdgpu_ps *new_rps,62struct amdgpu_ps *old_rps);63static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,64int min_temp, int max_temp);65static int kv_init_fps_limits(struct amdgpu_device *adev);6667static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);68static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);697071static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,72struct sumo_vid_mapping_table *vid_mapping_table,73u32 vid_2bit)74{75struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =76&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;77u32 i;7879if (vddc_sclk_table && vddc_sclk_table->count) {80if (vid_2bit < vddc_sclk_table->count)81return vddc_sclk_table->entries[vid_2bit].v;82else83return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;84} else {85for (i = 0; i < vid_mapping_table->num_entries; i++) {86if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)87return vid_mapping_table->entries[i].vid_7bit;88}89return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;90}91}9293static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,94struct sumo_vid_mapping_table *vid_mapping_table,95u32 vid_7bit)96{97struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =98&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;99u32 i;100101if (vddc_sclk_table && vddc_sclk_table->count) {102for (i = 0; i < vddc_sclk_table->count; i++) {103if (vddc_sclk_table->entries[i].v == vid_7bit)104return i;105}106return vddc_sclk_table->count - 1;107} else {108for (i = 0; i < vid_mapping_table->num_entries; i++) {109if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)110return vid_mapping_table->entries[i].vid_2bit;111}112113return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;114}115}116117static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)118{119/* This bit selects who handles display phy powergating.120* Clear the bit to let atom handle it.121* Set it to let the driver handle it.122* For now we just let atom handle it.123*/124#if 0125u32 v = RREG32(mmDOUT_SCRATCH3);126127if (enable)128v |= 0x4;129else130v &= 0xFFFFFFFB;131132WREG32(mmDOUT_SCRATCH3, v);133#endif134}135136static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,137struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,138ATOM_AVAILABLE_SCLK_LIST *table)139{140u32 i;141u32 n = 0;142u32 prev_sclk = 0;143144for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {145if (table[i].ulSupportedSCLK > prev_sclk) {146sclk_voltage_mapping_table->entries[n].sclk_frequency =147table[i].ulSupportedSCLK;148sclk_voltage_mapping_table->entries[n].vid_2bit =149table[i].usVoltageIndex;150prev_sclk = table[i].ulSupportedSCLK;151n++;152}153}154155sclk_voltage_mapping_table->num_max_dpm_entries = n;156}157158static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,159struct sumo_vid_mapping_table *vid_mapping_table,160ATOM_AVAILABLE_SCLK_LIST *table)161{162u32 i, j;163164for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {165if (table[i].ulSupportedSCLK != 0) {166if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)167continue;168vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =169table[i].usVoltageID;170vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =171table[i].usVoltageIndex;172}173}174175for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {176if (vid_mapping_table->entries[i].vid_7bit == 0) {177for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {178if (vid_mapping_table->entries[j].vid_7bit != 0) {179vid_mapping_table->entries[i] =180vid_mapping_table->entries[j];181vid_mapping_table->entries[j].vid_7bit = 0;182break;183}184}185186if (j == SUMO_MAX_NUMBER_VOLTAGES)187break;188}189}190191vid_mapping_table->num_entries = i;192}193194#if 0195static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = {196{ 0, 4, 1 },197{ 1, 4, 1 },198{ 2, 5, 1 },199{ 3, 4, 2 },200{ 4, 1, 1 },201{ 5, 5, 2 },202{ 6, 6, 1 },203{ 7, 9, 2 },204{ 0xffffffff }205};206207static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = {208{ 0, 4, 1 },209{ 0xffffffff }210};211212static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = {213{ 0, 4, 1 },214{ 0xffffffff }215};216217static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = {218{ 0, 4, 1 },219{ 0xffffffff }220};221222static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = {223{ 0, 4, 1 },224{ 0xffffffff }225};226227static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = {228{ 0, 4, 1 },229{ 1, 4, 1 },230{ 2, 5, 1 },231{ 3, 4, 1 },232{ 4, 1, 1 },233{ 5, 5, 1 },234{ 6, 6, 1 },235{ 7, 9, 1 },236{ 8, 4, 1 },237{ 9, 2, 1 },238{ 10, 3, 1 },239{ 11, 6, 1 },240{ 12, 8, 2 },241{ 13, 1, 1 },242{ 14, 2, 1 },243{ 15, 3, 1 },244{ 16, 1, 1 },245{ 17, 4, 1 },246{ 18, 3, 1 },247{ 19, 1, 1 },248{ 20, 8, 1 },249{ 21, 5, 1 },250{ 22, 1, 1 },251{ 23, 1, 1 },252{ 24, 4, 1 },253{ 27, 6, 1 },254{ 28, 1, 1 },255{ 0xffffffff }256};257258static const struct kv_lcac_config_reg sx0_cac_config_reg[] = {259{ 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }260};261262static const struct kv_lcac_config_reg mc0_cac_config_reg[] = {263{ 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }264};265266static const struct kv_lcac_config_reg mc1_cac_config_reg[] = {267{ 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }268};269270static const struct kv_lcac_config_reg mc2_cac_config_reg[] = {271{ 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }272};273274static const struct kv_lcac_config_reg mc3_cac_config_reg[] = {275{ 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }276};277278static const struct kv_lcac_config_reg cpl_cac_config_reg[] = {279{ 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }280};281#endif282283static const struct kv_pt_config_reg didt_config_kv[] = {284{ 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },285{ 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },286{ 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },287{ 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },288{ 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },289{ 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },290{ 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },291{ 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },292{ 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },293{ 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },294{ 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },295{ 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },296{ 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },297{ 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },298{ 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },299{ 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },300{ 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },301{ 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },302{ 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },303{ 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },304{ 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },305{ 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },306{ 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },307{ 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },308{ 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },309{ 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },310{ 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },311{ 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },312{ 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },313{ 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },314{ 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },315{ 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },316{ 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },317{ 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },318{ 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },319{ 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },320{ 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },321{ 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },322{ 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },323{ 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },324{ 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },325{ 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },326{ 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },327{ 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },328{ 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },329{ 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },330{ 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },331{ 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },332{ 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },333{ 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },334{ 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },335{ 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },336{ 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },337{ 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },338{ 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },339{ 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },340{ 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },341{ 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },342{ 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },343{ 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },344{ 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },345{ 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },346{ 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },347{ 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },348{ 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },349{ 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },350{ 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },351{ 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },352{ 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },353{ 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },354{ 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },355{ 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },356{ 0xFFFFFFFF }357};358359static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)360{361struct kv_ps *ps = rps->ps_priv;362363return ps;364}365366static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)367{368struct kv_power_info *pi = adev->pm.dpm.priv;369370return pi;371}372373#if 0374static void kv_program_local_cac_table(struct amdgpu_device *adev,375const struct kv_lcac_config_values *local_cac_table,376const struct kv_lcac_config_reg *local_cac_reg)377{378u32 i, count, data;379const struct kv_lcac_config_values *values = local_cac_table;380381while (values->block_id != 0xffffffff) {382count = values->signal_id;383for (i = 0; i < count; i++) {384data = ((values->block_id << local_cac_reg->block_shift) &385local_cac_reg->block_mask);386data |= ((i << local_cac_reg->signal_shift) &387local_cac_reg->signal_mask);388data |= ((values->t << local_cac_reg->t_shift) &389local_cac_reg->t_mask);390data |= ((1 << local_cac_reg->enable_shift) &391local_cac_reg->enable_mask);392WREG32_SMC(local_cac_reg->cntl, data);393}394values++;395}396}397#endif398399static int kv_program_pt_config_registers(struct amdgpu_device *adev,400const struct kv_pt_config_reg *cac_config_regs)401{402const struct kv_pt_config_reg *config_regs = cac_config_regs;403u32 data;404u32 cache = 0;405406if (config_regs == NULL)407return -EINVAL;408409while (config_regs->offset != 0xFFFFFFFF) {410if (config_regs->type == KV_CONFIGREG_CACHE) {411cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);412} else {413switch (config_regs->type) {414case KV_CONFIGREG_SMC_IND:415data = RREG32_SMC(config_regs->offset);416break;417case KV_CONFIGREG_DIDT_IND:418data = RREG32_DIDT(config_regs->offset);419break;420default:421data = RREG32(config_regs->offset);422break;423}424425data &= ~config_regs->mask;426data |= ((config_regs->value << config_regs->shift) & config_regs->mask);427data |= cache;428cache = 0;429430switch (config_regs->type) {431case KV_CONFIGREG_SMC_IND:432WREG32_SMC(config_regs->offset, data);433break;434case KV_CONFIGREG_DIDT_IND:435WREG32_DIDT(config_regs->offset, data);436break;437default:438WREG32(config_regs->offset, data);439break;440}441}442config_regs++;443}444445return 0;446}447448static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)449{450struct kv_power_info *pi = kv_get_pi(adev);451u32 data;452453if (pi->caps_sq_ramping) {454data = RREG32_DIDT(ixDIDT_SQ_CTRL0);455if (enable)456data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;457else458data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;459WREG32_DIDT(ixDIDT_SQ_CTRL0, data);460}461462if (pi->caps_db_ramping) {463data = RREG32_DIDT(ixDIDT_DB_CTRL0);464if (enable)465data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;466else467data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;468WREG32_DIDT(ixDIDT_DB_CTRL0, data);469}470471if (pi->caps_td_ramping) {472data = RREG32_DIDT(ixDIDT_TD_CTRL0);473if (enable)474data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;475else476data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;477WREG32_DIDT(ixDIDT_TD_CTRL0, data);478}479480if (pi->caps_tcp_ramping) {481data = RREG32_DIDT(ixDIDT_TCP_CTRL0);482if (enable)483data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;484else485data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;486WREG32_DIDT(ixDIDT_TCP_CTRL0, data);487}488}489490static int kv_enable_didt(struct amdgpu_device *adev, bool enable)491{492struct kv_power_info *pi = kv_get_pi(adev);493int ret;494495if (pi->caps_sq_ramping ||496pi->caps_db_ramping ||497pi->caps_td_ramping ||498pi->caps_tcp_ramping) {499amdgpu_gfx_rlc_enter_safe_mode(adev, 0);500501if (enable) {502ret = kv_program_pt_config_registers(adev, didt_config_kv);503if (ret) {504amdgpu_gfx_rlc_exit_safe_mode(adev, 0);505return ret;506}507}508509kv_do_enable_didt(adev, enable);510511amdgpu_gfx_rlc_exit_safe_mode(adev, 0);512}513514return 0;515}516517#if 0518static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)519{520struct kv_power_info *pi = kv_get_pi(adev);521522if (pi->caps_cac) {523WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);524WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);525kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);526527WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);528WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);529kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);530531WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);532WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);533kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);534535WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);536WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);537kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);538539WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);540WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);541kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);542543WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);544WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);545kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);546}547}548#endif549550static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)551{552struct kv_power_info *pi = kv_get_pi(adev);553int ret = 0;554555if (pi->caps_cac) {556if (enable) {557ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);558if (ret)559pi->cac_enabled = false;560else561pi->cac_enabled = true;562} else if (pi->cac_enabled) {563amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);564pi->cac_enabled = false;565}566}567568return ret;569}570571static int kv_process_firmware_header(struct amdgpu_device *adev)572{573struct kv_power_info *pi = kv_get_pi(adev);574u32 tmp;575int ret;576577ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +578offsetof(SMU7_Firmware_Header, DpmTable),579&tmp, pi->sram_end);580581if (ret == 0)582pi->dpm_table_start = tmp;583584ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +585offsetof(SMU7_Firmware_Header, SoftRegisters),586&tmp, pi->sram_end);587588if (ret == 0)589pi->soft_regs_start = tmp;590591return ret;592}593594static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)595{596struct kv_power_info *pi = kv_get_pi(adev);597int ret;598599pi->graphics_voltage_change_enable = 1;600601ret = amdgpu_kv_copy_bytes_to_smc(adev,602pi->dpm_table_start +603offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),604&pi->graphics_voltage_change_enable,605sizeof(u8), pi->sram_end);606607return ret;608}609610static int kv_set_dpm_interval(struct amdgpu_device *adev)611{612struct kv_power_info *pi = kv_get_pi(adev);613int ret;614615pi->graphics_interval = 1;616617ret = amdgpu_kv_copy_bytes_to_smc(adev,618pi->dpm_table_start +619offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),620&pi->graphics_interval,621sizeof(u8), pi->sram_end);622623return ret;624}625626static int kv_set_dpm_boot_state(struct amdgpu_device *adev)627{628struct kv_power_info *pi = kv_get_pi(adev);629int ret;630631ret = amdgpu_kv_copy_bytes_to_smc(adev,632pi->dpm_table_start +633offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),634&pi->graphics_boot_level,635sizeof(u8), pi->sram_end);636637return ret;638}639640static void kv_program_vc(struct amdgpu_device *adev)641{642WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);643}644645static void kv_clear_vc(struct amdgpu_device *adev)646{647WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);648}649650static int kv_set_divider_value(struct amdgpu_device *adev,651u32 index, u32 sclk)652{653struct kv_power_info *pi = kv_get_pi(adev);654struct atom_clock_dividers dividers;655int ret;656657ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,658sclk, false, ÷rs);659if (ret)660return ret;661662pi->graphics_level[index].SclkDid = (u8)dividers.post_div;663pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);664665return 0;666}667668static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,669u16 voltage)670{671return 6200 - (voltage * 25);672}673674static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,675u32 vid_2bit)676{677struct kv_power_info *pi = kv_get_pi(adev);678u32 vid_8bit = kv_convert_vid2_to_vid7(adev,679&pi->sys_info.vid_mapping_table,680vid_2bit);681682return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);683}684685686static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)687{688struct kv_power_info *pi = kv_get_pi(adev);689690pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;691pi->graphics_level[index].MinVddNb =692cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));693694return 0;695}696697static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)698{699struct kv_power_info *pi = kv_get_pi(adev);700701pi->graphics_level[index].AT = cpu_to_be16((u16)at);702703return 0;704}705706static void kv_dpm_power_level_enable(struct amdgpu_device *adev,707u32 index, bool enable)708{709struct kv_power_info *pi = kv_get_pi(adev);710711pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;712}713714static void kv_start_dpm(struct amdgpu_device *adev)715{716u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);717718tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;719WREG32_SMC(ixGENERAL_PWRMGT, tmp);720721amdgpu_kv_smc_dpm_enable(adev, true);722}723724static void kv_stop_dpm(struct amdgpu_device *adev)725{726amdgpu_kv_smc_dpm_enable(adev, false);727}728729static void kv_start_am(struct amdgpu_device *adev)730{731u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);732733sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |734SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);735sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;736737WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);738}739740static void kv_reset_am(struct amdgpu_device *adev)741{742u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);743744sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |745SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);746747WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);748}749750static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)751{752return amdgpu_kv_notify_message_to_smu(adev, freeze ?753PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);754}755756static int kv_force_lowest_valid(struct amdgpu_device *adev)757{758return kv_force_dpm_lowest(adev);759}760761static int kv_unforce_levels(struct amdgpu_device *adev)762{763if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)764return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);765else766return kv_set_enabled_levels(adev);767}768769static int kv_update_sclk_t(struct amdgpu_device *adev)770{771struct kv_power_info *pi = kv_get_pi(adev);772u32 low_sclk_interrupt_t = 0;773int ret = 0;774775if (pi->caps_sclk_throttle_low_notification) {776low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);777778ret = amdgpu_kv_copy_bytes_to_smc(adev,779pi->dpm_table_start +780offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),781(u8 *)&low_sclk_interrupt_t,782sizeof(u32), pi->sram_end);783}784return ret;785}786787static int kv_program_bootup_state(struct amdgpu_device *adev)788{789struct kv_power_info *pi = kv_get_pi(adev);790u32 i;791struct amdgpu_clock_voltage_dependency_table *table =792&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;793794if (table && table->count) {795for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {796if (table->entries[i].clk == pi->boot_pl.sclk)797break;798}799800pi->graphics_boot_level = (u8)i;801kv_dpm_power_level_enable(adev, i, true);802} else {803struct sumo_sclk_voltage_mapping_table *table =804&pi->sys_info.sclk_voltage_mapping_table;805806if (table->num_max_dpm_entries == 0)807return -EINVAL;808809for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {810if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)811break;812}813814pi->graphics_boot_level = (u8)i;815kv_dpm_power_level_enable(adev, i, true);816}817return 0;818}819820static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)821{822struct kv_power_info *pi = kv_get_pi(adev);823int ret;824825pi->graphics_therm_throttle_enable = 1;826827ret = amdgpu_kv_copy_bytes_to_smc(adev,828pi->dpm_table_start +829offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),830&pi->graphics_therm_throttle_enable,831sizeof(u8), pi->sram_end);832833return ret;834}835836static int kv_upload_dpm_settings(struct amdgpu_device *adev)837{838struct kv_power_info *pi = kv_get_pi(adev);839int ret;840841ret = amdgpu_kv_copy_bytes_to_smc(adev,842pi->dpm_table_start +843offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),844(u8 *)&pi->graphics_level,845sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,846pi->sram_end);847848if (ret)849return ret;850851ret = amdgpu_kv_copy_bytes_to_smc(adev,852pi->dpm_table_start +853offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),854&pi->graphics_dpm_level_count,855sizeof(u8), pi->sram_end);856857return ret;858}859860static u32 kv_get_clock_difference(u32 a, u32 b)861{862return (a >= b) ? a - b : b - a;863}864865static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)866{867struct kv_power_info *pi = kv_get_pi(adev);868u32 value;869870if (pi->caps_enable_dfs_bypass) {871if (kv_get_clock_difference(clk, 40000) < 200)872value = 3;873else if (kv_get_clock_difference(clk, 30000) < 200)874value = 2;875else if (kv_get_clock_difference(clk, 20000) < 200)876value = 7;877else if (kv_get_clock_difference(clk, 15000) < 200)878value = 6;879else if (kv_get_clock_difference(clk, 10000) < 200)880value = 8;881else882value = 0;883} else {884value = 0;885}886887return value;888}889890static int kv_populate_uvd_table(struct amdgpu_device *adev)891{892struct kv_power_info *pi = kv_get_pi(adev);893struct amdgpu_uvd_clock_voltage_dependency_table *table =894&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;895struct atom_clock_dividers dividers;896int ret;897u32 i;898899if (table == NULL || table->count == 0)900return 0;901902pi->uvd_level_count = 0;903for (i = 0; i < table->count; i++) {904if (pi->high_voltage_t &&905(pi->high_voltage_t < table->entries[i].v))906break;907908pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);909pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);910pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);911912pi->uvd_level[i].VClkBypassCntl =913(u8)kv_get_clk_bypass(adev, table->entries[i].vclk);914pi->uvd_level[i].DClkBypassCntl =915(u8)kv_get_clk_bypass(adev, table->entries[i].dclk);916917ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,918table->entries[i].vclk, false, ÷rs);919if (ret)920return ret;921pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;922923ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,924table->entries[i].dclk, false, ÷rs);925if (ret)926return ret;927pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;928929pi->uvd_level_count++;930}931932ret = amdgpu_kv_copy_bytes_to_smc(adev,933pi->dpm_table_start +934offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),935(u8 *)&pi->uvd_level_count,936sizeof(u8), pi->sram_end);937if (ret)938return ret;939940pi->uvd_interval = 1;941942ret = amdgpu_kv_copy_bytes_to_smc(adev,943pi->dpm_table_start +944offsetof(SMU7_Fusion_DpmTable, UVDInterval),945&pi->uvd_interval,946sizeof(u8), pi->sram_end);947if (ret)948return ret;949950ret = amdgpu_kv_copy_bytes_to_smc(adev,951pi->dpm_table_start +952offsetof(SMU7_Fusion_DpmTable, UvdLevel),953(u8 *)&pi->uvd_level,954sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,955pi->sram_end);956957return ret;958959}960961static int kv_populate_vce_table(struct amdgpu_device *adev)962{963struct kv_power_info *pi = kv_get_pi(adev);964int ret;965u32 i;966struct amdgpu_vce_clock_voltage_dependency_table *table =967&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;968struct atom_clock_dividers dividers;969970if (table == NULL || table->count == 0)971return 0;972973pi->vce_level_count = 0;974for (i = 0; i < table->count; i++) {975if (pi->high_voltage_t &&976pi->high_voltage_t < table->entries[i].v)977break;978979pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);980pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);981982pi->vce_level[i].ClkBypassCntl =983(u8)kv_get_clk_bypass(adev, table->entries[i].evclk);984985ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,986table->entries[i].evclk, false, ÷rs);987if (ret)988return ret;989pi->vce_level[i].Divider = (u8)dividers.post_div;990991pi->vce_level_count++;992}993994ret = amdgpu_kv_copy_bytes_to_smc(adev,995pi->dpm_table_start +996offsetof(SMU7_Fusion_DpmTable, VceLevelCount),997(u8 *)&pi->vce_level_count,998sizeof(u8),999pi->sram_end);1000if (ret)1001return ret;10021003pi->vce_interval = 1;10041005ret = amdgpu_kv_copy_bytes_to_smc(adev,1006pi->dpm_table_start +1007offsetof(SMU7_Fusion_DpmTable, VCEInterval),1008(u8 *)&pi->vce_interval,1009sizeof(u8),1010pi->sram_end);1011if (ret)1012return ret;10131014ret = amdgpu_kv_copy_bytes_to_smc(adev,1015pi->dpm_table_start +1016offsetof(SMU7_Fusion_DpmTable, VceLevel),1017(u8 *)&pi->vce_level,1018sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,1019pi->sram_end);10201021return ret;1022}10231024static int kv_populate_samu_table(struct amdgpu_device *adev)1025{1026struct kv_power_info *pi = kv_get_pi(adev);1027struct amdgpu_clock_voltage_dependency_table *table =1028&adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;1029struct atom_clock_dividers dividers;1030int ret;1031u32 i;10321033if (table == NULL || table->count == 0)1034return 0;10351036pi->samu_level_count = 0;1037for (i = 0; i < table->count; i++) {1038if (pi->high_voltage_t &&1039pi->high_voltage_t < table->entries[i].v)1040break;10411042pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);1043pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);10441045pi->samu_level[i].ClkBypassCntl =1046(u8)kv_get_clk_bypass(adev, table->entries[i].clk);10471048ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,1049table->entries[i].clk, false, ÷rs);1050if (ret)1051return ret;1052pi->samu_level[i].Divider = (u8)dividers.post_div;10531054pi->samu_level_count++;1055}10561057ret = amdgpu_kv_copy_bytes_to_smc(adev,1058pi->dpm_table_start +1059offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),1060(u8 *)&pi->samu_level_count,1061sizeof(u8),1062pi->sram_end);1063if (ret)1064return ret;10651066pi->samu_interval = 1;10671068ret = amdgpu_kv_copy_bytes_to_smc(adev,1069pi->dpm_table_start +1070offsetof(SMU7_Fusion_DpmTable, SAMUInterval),1071(u8 *)&pi->samu_interval,1072sizeof(u8),1073pi->sram_end);1074if (ret)1075return ret;10761077ret = amdgpu_kv_copy_bytes_to_smc(adev,1078pi->dpm_table_start +1079offsetof(SMU7_Fusion_DpmTable, SamuLevel),1080(u8 *)&pi->samu_level,1081sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,1082pi->sram_end);1083if (ret)1084return ret;10851086return ret;1087}108810891090static int kv_populate_acp_table(struct amdgpu_device *adev)1091{1092struct kv_power_info *pi = kv_get_pi(adev);1093struct amdgpu_clock_voltage_dependency_table *table =1094&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;1095struct atom_clock_dividers dividers;1096int ret;1097u32 i;10981099if (table == NULL || table->count == 0)1100return 0;11011102pi->acp_level_count = 0;1103for (i = 0; i < table->count; i++) {1104pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);1105pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);11061107ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,1108table->entries[i].clk, false, ÷rs);1109if (ret)1110return ret;1111pi->acp_level[i].Divider = (u8)dividers.post_div;11121113pi->acp_level_count++;1114}11151116ret = amdgpu_kv_copy_bytes_to_smc(adev,1117pi->dpm_table_start +1118offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),1119(u8 *)&pi->acp_level_count,1120sizeof(u8),1121pi->sram_end);1122if (ret)1123return ret;11241125pi->acp_interval = 1;11261127ret = amdgpu_kv_copy_bytes_to_smc(adev,1128pi->dpm_table_start +1129offsetof(SMU7_Fusion_DpmTable, ACPInterval),1130(u8 *)&pi->acp_interval,1131sizeof(u8),1132pi->sram_end);1133if (ret)1134return ret;11351136ret = amdgpu_kv_copy_bytes_to_smc(adev,1137pi->dpm_table_start +1138offsetof(SMU7_Fusion_DpmTable, AcpLevel),1139(u8 *)&pi->acp_level,1140sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,1141pi->sram_end);1142if (ret)1143return ret;11441145return ret;1146}11471148static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)1149{1150struct kv_power_info *pi = kv_get_pi(adev);1151u32 i;1152struct amdgpu_clock_voltage_dependency_table *table =1153&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;11541155if (table && table->count) {1156for (i = 0; i < pi->graphics_dpm_level_count; i++) {1157if (pi->caps_enable_dfs_bypass) {1158if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)1159pi->graphics_level[i].ClkBypassCntl = 3;1160else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)1161pi->graphics_level[i].ClkBypassCntl = 2;1162else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)1163pi->graphics_level[i].ClkBypassCntl = 7;1164else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200)1165pi->graphics_level[i].ClkBypassCntl = 6;1166else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200)1167pi->graphics_level[i].ClkBypassCntl = 8;1168else1169pi->graphics_level[i].ClkBypassCntl = 0;1170} else {1171pi->graphics_level[i].ClkBypassCntl = 0;1172}1173}1174} else {1175struct sumo_sclk_voltage_mapping_table *table =1176&pi->sys_info.sclk_voltage_mapping_table;1177for (i = 0; i < pi->graphics_dpm_level_count; i++) {1178if (pi->caps_enable_dfs_bypass) {1179if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)1180pi->graphics_level[i].ClkBypassCntl = 3;1181else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)1182pi->graphics_level[i].ClkBypassCntl = 2;1183else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)1184pi->graphics_level[i].ClkBypassCntl = 7;1185else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)1186pi->graphics_level[i].ClkBypassCntl = 6;1187else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)1188pi->graphics_level[i].ClkBypassCntl = 8;1189else1190pi->graphics_level[i].ClkBypassCntl = 0;1191} else {1192pi->graphics_level[i].ClkBypassCntl = 0;1193}1194}1195}1196}11971198static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)1199{1200return amdgpu_kv_notify_message_to_smu(adev, enable ?1201PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);1202}12031204static void kv_reset_acp_boot_level(struct amdgpu_device *adev)1205{1206struct kv_power_info *pi = kv_get_pi(adev);12071208pi->acp_boot_level = 0xff;1209}12101211static void kv_update_current_ps(struct amdgpu_device *adev,1212struct amdgpu_ps *rps)1213{1214struct kv_ps *new_ps = kv_get_ps(rps);1215struct kv_power_info *pi = kv_get_pi(adev);12161217pi->current_rps = *rps;1218pi->current_ps = *new_ps;1219pi->current_rps.ps_priv = &pi->current_ps;1220adev->pm.dpm.current_ps = &pi->current_rps;1221}12221223static void kv_update_requested_ps(struct amdgpu_device *adev,1224struct amdgpu_ps *rps)1225{1226struct kv_ps *new_ps = kv_get_ps(rps);1227struct kv_power_info *pi = kv_get_pi(adev);12281229pi->requested_rps = *rps;1230pi->requested_ps = *new_ps;1231pi->requested_rps.ps_priv = &pi->requested_ps;1232adev->pm.dpm.requested_ps = &pi->requested_rps;1233}12341235static void kv_dpm_enable_bapm(void *handle, bool enable)1236{1237struct amdgpu_device *adev = (struct amdgpu_device *)handle;1238struct kv_power_info *pi = kv_get_pi(adev);1239int ret;12401241if (pi->bapm_enable) {1242ret = amdgpu_kv_smc_bapm_enable(adev, enable);1243if (ret)1244drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");1245}1246}12471248static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)1249{1250switch (sensor) {1251case THERMAL_TYPE_KV:1252return true;1253case THERMAL_TYPE_NONE:1254case THERMAL_TYPE_EXTERNAL:1255case THERMAL_TYPE_EXTERNAL_GPIO:1256default:1257return false;1258}1259}12601261static int kv_dpm_enable(struct amdgpu_device *adev)1262{1263struct kv_power_info *pi = kv_get_pi(adev);1264int ret;12651266ret = kv_process_firmware_header(adev);1267if (ret) {1268drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n");1269return ret;1270}1271kv_init_fps_limits(adev);1272kv_init_graphics_levels(adev);1273ret = kv_program_bootup_state(adev);1274if (ret) {1275drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n");1276return ret;1277}1278kv_calculate_dfs_bypass_settings(adev);1279ret = kv_upload_dpm_settings(adev);1280if (ret) {1281drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n");1282return ret;1283}1284ret = kv_populate_uvd_table(adev);1285if (ret) {1286drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n");1287return ret;1288}1289ret = kv_populate_vce_table(adev);1290if (ret) {1291drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n");1292return ret;1293}1294ret = kv_populate_samu_table(adev);1295if (ret) {1296drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n");1297return ret;1298}1299ret = kv_populate_acp_table(adev);1300if (ret) {1301drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n");1302return ret;1303}1304kv_program_vc(adev);1305#if 01306kv_initialize_hardware_cac_manager(adev);1307#endif1308kv_start_am(adev);1309if (pi->enable_auto_thermal_throttling) {1310ret = kv_enable_auto_thermal_throttling(adev);1311if (ret) {1312drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n");1313return ret;1314}1315}1316ret = kv_enable_dpm_voltage_scaling(adev);1317if (ret) {1318drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n");1319return ret;1320}1321ret = kv_set_dpm_interval(adev);1322if (ret) {1323drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n");1324return ret;1325}1326ret = kv_set_dpm_boot_state(adev);1327if (ret) {1328drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n");1329return ret;1330}1331ret = kv_enable_ulv(adev, true);1332if (ret) {1333drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n");1334return ret;1335}1336kv_start_dpm(adev);1337ret = kv_enable_didt(adev, true);1338if (ret) {1339drm_err(adev_to_drm(adev), "kv_enable_didt failed\n");1340return ret;1341}1342ret = kv_enable_smc_cac(adev, true);1343if (ret) {1344drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n");1345return ret;1346}13471348kv_reset_acp_boot_level(adev);13491350ret = amdgpu_kv_smc_bapm_enable(adev, false);1351if (ret) {1352drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");1353return ret;1354}13551356if (adev->irq.installed &&1357kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {1358ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);1359if (ret) {1360drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n");1361return ret;1362}1363amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,1364AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);1365amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,1366AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);1367}13681369return ret;1370}13711372static void kv_dpm_disable(struct amdgpu_device *adev)1373{1374struct kv_power_info *pi = kv_get_pi(adev);1375int err;13761377amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,1378AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);1379amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,1380AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);13811382err = amdgpu_kv_smc_bapm_enable(adev, false);1383if (err)1384drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");13851386if (adev->asic_type == CHIP_MULLINS)1387kv_enable_nb_dpm(adev, false);13881389/* powerup blocks */1390kv_dpm_powergate_acp(adev, false);1391kv_dpm_powergate_samu(adev, false);1392if (pi->caps_vce_pg) /* power on the VCE block */1393amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);1394if (pi->caps_uvd_pg) /* power on the UVD block */1395amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);13961397kv_enable_smc_cac(adev, false);1398kv_enable_didt(adev, false);1399kv_clear_vc(adev);1400kv_stop_dpm(adev);1401kv_enable_ulv(adev, false);1402kv_reset_am(adev);14031404kv_update_current_ps(adev, adev->pm.dpm.boot_ps);1405}14061407#if 01408static int kv_write_smc_soft_register(struct amdgpu_device *adev,1409u16 reg_offset, u32 value)1410{1411struct kv_power_info *pi = kv_get_pi(adev);14121413return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,1414(u8 *)&value, sizeof(u16), pi->sram_end);1415}14161417static int kv_read_smc_soft_register(struct amdgpu_device *adev,1418u16 reg_offset, u32 *value)1419{1420struct kv_power_info *pi = kv_get_pi(adev);14211422return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,1423value, pi->sram_end);1424}1425#endif14261427static void kv_init_sclk_t(struct amdgpu_device *adev)1428{1429struct kv_power_info *pi = kv_get_pi(adev);14301431pi->low_sclk_interrupt_t = 0;1432}14331434static int kv_init_fps_limits(struct amdgpu_device *adev)1435{1436struct kv_power_info *pi = kv_get_pi(adev);1437int ret = 0;14381439if (pi->caps_fps) {1440u16 tmp;14411442tmp = 45;1443pi->fps_high_t = cpu_to_be16(tmp);1444ret = amdgpu_kv_copy_bytes_to_smc(adev,1445pi->dpm_table_start +1446offsetof(SMU7_Fusion_DpmTable, FpsHighT),1447(u8 *)&pi->fps_high_t,1448sizeof(u16), pi->sram_end);14491450tmp = 30;1451pi->fps_low_t = cpu_to_be16(tmp);14521453ret = amdgpu_kv_copy_bytes_to_smc(adev,1454pi->dpm_table_start +1455offsetof(SMU7_Fusion_DpmTable, FpsLowT),1456(u8 *)&pi->fps_low_t,1457sizeof(u16), pi->sram_end);14581459}1460return ret;1461}14621463static void kv_init_powergate_state(struct amdgpu_device *adev)1464{1465struct kv_power_info *pi = kv_get_pi(adev);14661467pi->uvd_power_gated = false;1468pi->vce_power_gated = false;1469pi->samu_power_gated = false;1470pi->acp_power_gated = false;14711472}14731474static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)1475{1476return amdgpu_kv_notify_message_to_smu(adev, enable ?1477PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);1478}14791480static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)1481{1482return amdgpu_kv_notify_message_to_smu(adev, enable ?1483PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);1484}14851486static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)1487{1488return amdgpu_kv_notify_message_to_smu(adev, enable ?1489PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);1490}14911492static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)1493{1494return amdgpu_kv_notify_message_to_smu(adev, enable ?1495PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);1496}14971498static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)1499{1500struct kv_power_info *pi = kv_get_pi(adev);1501struct amdgpu_uvd_clock_voltage_dependency_table *table =1502&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;1503int ret;1504u32 mask;15051506if (!gate) {1507if (table->count)1508pi->uvd_boot_level = table->count - 1;1509else1510pi->uvd_boot_level = 0;15111512if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {1513mask = 1 << pi->uvd_boot_level;1514} else {1515mask = 0x1f;1516}15171518ret = amdgpu_kv_copy_bytes_to_smc(adev,1519pi->dpm_table_start +1520offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),1521(uint8_t *)&pi->uvd_boot_level,1522sizeof(u8), pi->sram_end);1523if (ret)1524return ret;15251526amdgpu_kv_send_msg_to_smc_with_parameter(adev,1527PPSMC_MSG_UVDDPM_SetEnabledMask,1528mask);1529}15301531return kv_enable_uvd_dpm(adev, !gate);1532}15331534static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)1535{1536u8 i;1537struct amdgpu_vce_clock_voltage_dependency_table *table =1538&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;15391540for (i = 0; i < table->count; i++) {1541if (table->entries[i].evclk >= evclk)1542break;1543}15441545return i;1546}15471548static int kv_update_vce_dpm(struct amdgpu_device *adev,1549struct amdgpu_ps *amdgpu_new_state,1550struct amdgpu_ps *amdgpu_current_state)1551{1552struct kv_power_info *pi = kv_get_pi(adev);1553struct amdgpu_vce_clock_voltage_dependency_table *table =1554&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;1555int ret;15561557if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {1558if (pi->caps_stable_p_state)1559pi->vce_boot_level = table->count - 1;1560else1561pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);15621563ret = amdgpu_kv_copy_bytes_to_smc(adev,1564pi->dpm_table_start +1565offsetof(SMU7_Fusion_DpmTable, VceBootLevel),1566(u8 *)&pi->vce_boot_level,1567sizeof(u8),1568pi->sram_end);1569if (ret)1570return ret;15711572if (pi->caps_stable_p_state)1573amdgpu_kv_send_msg_to_smc_with_parameter(adev,1574PPSMC_MSG_VCEDPM_SetEnabledMask,1575(1 << pi->vce_boot_level));1576kv_enable_vce_dpm(adev, true);1577} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {1578kv_enable_vce_dpm(adev, false);1579}15801581return 0;1582}15831584static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)1585{1586struct kv_power_info *pi = kv_get_pi(adev);1587struct amdgpu_clock_voltage_dependency_table *table =1588&adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;1589int ret;15901591if (!gate) {1592if (pi->caps_stable_p_state)1593pi->samu_boot_level = table->count - 1;1594else1595pi->samu_boot_level = 0;15961597ret = amdgpu_kv_copy_bytes_to_smc(adev,1598pi->dpm_table_start +1599offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),1600(u8 *)&pi->samu_boot_level,1601sizeof(u8),1602pi->sram_end);1603if (ret)1604return ret;16051606if (pi->caps_stable_p_state)1607amdgpu_kv_send_msg_to_smc_with_parameter(adev,1608PPSMC_MSG_SAMUDPM_SetEnabledMask,1609(1 << pi->samu_boot_level));1610}16111612return kv_enable_samu_dpm(adev, !gate);1613}16141615static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)1616{1617return 0;1618}16191620static void kv_update_acp_boot_level(struct amdgpu_device *adev)1621{1622struct kv_power_info *pi = kv_get_pi(adev);1623u8 acp_boot_level;16241625if (!pi->caps_stable_p_state) {1626acp_boot_level = kv_get_acp_boot_level(adev);1627if (acp_boot_level != pi->acp_boot_level) {1628pi->acp_boot_level = acp_boot_level;1629amdgpu_kv_send_msg_to_smc_with_parameter(adev,1630PPSMC_MSG_ACPDPM_SetEnabledMask,1631(1 << pi->acp_boot_level));1632}1633}1634}16351636static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)1637{1638struct kv_power_info *pi = kv_get_pi(adev);1639struct amdgpu_clock_voltage_dependency_table *table =1640&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;1641int ret;16421643if (!gate) {1644if (pi->caps_stable_p_state)1645pi->acp_boot_level = table->count - 1;1646else1647pi->acp_boot_level = kv_get_acp_boot_level(adev);16481649ret = amdgpu_kv_copy_bytes_to_smc(adev,1650pi->dpm_table_start +1651offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),1652(u8 *)&pi->acp_boot_level,1653sizeof(u8),1654pi->sram_end);1655if (ret)1656return ret;16571658if (pi->caps_stable_p_state)1659amdgpu_kv_send_msg_to_smc_with_parameter(adev,1660PPSMC_MSG_ACPDPM_SetEnabledMask,1661(1 << pi->acp_boot_level));1662}16631664return kv_enable_acp_dpm(adev, !gate);1665}16661667static void kv_dpm_powergate_uvd(void *handle, bool gate)1668{1669struct amdgpu_device *adev = (struct amdgpu_device *)handle;1670struct kv_power_info *pi = kv_get_pi(adev);16711672pi->uvd_power_gated = gate;16731674if (gate) {1675/* stop the UVD block */1676amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,1677AMD_PG_STATE_GATE);1678kv_update_uvd_dpm(adev, gate);1679if (pi->caps_uvd_pg)1680/* power off the UVD block */1681amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);1682} else {1683if (pi->caps_uvd_pg)1684/* power on the UVD block */1685amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);1686/* re-init the UVD block */1687kv_update_uvd_dpm(adev, gate);16881689amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,1690AMD_PG_STATE_UNGATE);1691}1692}16931694static void kv_dpm_powergate_vce(void *handle, bool gate)1695{1696struct amdgpu_device *adev = (struct amdgpu_device *)handle;1697struct kv_power_info *pi = kv_get_pi(adev);16981699pi->vce_power_gated = gate;17001701if (gate) {1702/* stop the VCE block */1703amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,1704AMD_PG_STATE_GATE);1705kv_enable_vce_dpm(adev, false);1706if (pi->caps_vce_pg) /* power off the VCE block */1707amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);1708} else {1709if (pi->caps_vce_pg) /* power on the VCE block */1710amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);1711kv_enable_vce_dpm(adev, true);1712/* re-init the VCE block */1713amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,1714AMD_PG_STATE_UNGATE);1715}1716}171717181719static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)1720{1721struct kv_power_info *pi = kv_get_pi(adev);17221723if (pi->samu_power_gated == gate)1724return;17251726pi->samu_power_gated = gate;17271728if (gate) {1729kv_update_samu_dpm(adev, true);1730if (pi->caps_samu_pg)1731amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);1732} else {1733if (pi->caps_samu_pg)1734amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);1735kv_update_samu_dpm(adev, false);1736}1737}17381739static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)1740{1741struct kv_power_info *pi = kv_get_pi(adev);17421743if (pi->acp_power_gated == gate)1744return;17451746if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)1747return;17481749pi->acp_power_gated = gate;17501751if (gate) {1752kv_update_acp_dpm(adev, true);1753if (pi->caps_acp_pg)1754amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);1755} else {1756if (pi->caps_acp_pg)1757amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);1758kv_update_acp_dpm(adev, false);1759}1760}17611762static void kv_set_valid_clock_range(struct amdgpu_device *adev,1763struct amdgpu_ps *new_rps)1764{1765struct kv_ps *new_ps = kv_get_ps(new_rps);1766struct kv_power_info *pi = kv_get_pi(adev);1767u32 i;1768struct amdgpu_clock_voltage_dependency_table *table =1769&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;17701771if (table && table->count) {1772for (i = 0; i < pi->graphics_dpm_level_count; i++) {1773if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||1774(i == (pi->graphics_dpm_level_count - 1))) {1775pi->lowest_valid = i;1776break;1777}1778}17791780for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {1781if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)1782break;1783}1784pi->highest_valid = i;17851786if (pi->lowest_valid > pi->highest_valid) {1787if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >1788(table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))1789pi->highest_valid = pi->lowest_valid;1790else1791pi->lowest_valid = pi->highest_valid;1792}1793} else {1794struct sumo_sclk_voltage_mapping_table *table =1795&pi->sys_info.sclk_voltage_mapping_table;17961797for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {1798if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||1799i == (int)(pi->graphics_dpm_level_count - 1)) {1800pi->lowest_valid = i;1801break;1802}1803}18041805for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {1806if (table->entries[i].sclk_frequency <=1807new_ps->levels[new_ps->num_levels - 1].sclk)1808break;1809}1810pi->highest_valid = i;18111812if (pi->lowest_valid > pi->highest_valid) {1813if ((new_ps->levels[0].sclk -1814table->entries[pi->highest_valid].sclk_frequency) >1815(table->entries[pi->lowest_valid].sclk_frequency -1816new_ps->levels[new_ps->num_levels - 1].sclk))1817pi->highest_valid = pi->lowest_valid;1818else1819pi->lowest_valid = pi->highest_valid;1820}1821}1822}18231824static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev,1825struct amdgpu_ps *new_rps)1826{1827struct kv_ps *new_ps = kv_get_ps(new_rps);1828struct kv_power_info *pi = kv_get_pi(adev);1829int ret = 0;1830u8 clk_bypass_cntl;18311832if (pi->caps_enable_dfs_bypass) {1833clk_bypass_cntl = new_ps->need_dfs_bypass ?1834pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;1835ret = amdgpu_kv_copy_bytes_to_smc(adev,1836(pi->dpm_table_start +1837offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +1838(pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +1839offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),1840&clk_bypass_cntl,1841sizeof(u8), pi->sram_end);1842}18431844return ret;1845}18461847static int kv_enable_nb_dpm(struct amdgpu_device *adev,1848bool enable)1849{1850struct kv_power_info *pi = kv_get_pi(adev);1851int ret = 0;18521853if (enable) {1854if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {1855ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable);1856if (ret == 0)1857pi->nb_dpm_enabled = true;1858}1859} else {1860if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {1861ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable);1862if (ret == 0)1863pi->nb_dpm_enabled = false;1864}1865}18661867return ret;1868}18691870static int kv_dpm_force_performance_level(void *handle,1871enum amd_dpm_forced_level level)1872{1873int ret;1874struct amdgpu_device *adev = (struct amdgpu_device *)handle;18751876if (level == AMD_DPM_FORCED_LEVEL_HIGH) {1877ret = kv_force_dpm_highest(adev);1878if (ret)1879return ret;1880} else if (level == AMD_DPM_FORCED_LEVEL_LOW) {1881ret = kv_force_dpm_lowest(adev);1882if (ret)1883return ret;1884} else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {1885ret = kv_unforce_levels(adev);1886if (ret)1887return ret;1888}18891890adev->pm.dpm.forced_level = level;18911892return 0;1893}18941895static int kv_dpm_pre_set_power_state(void *handle)1896{1897struct amdgpu_device *adev = (struct amdgpu_device *)handle;1898struct kv_power_info *pi = kv_get_pi(adev);1899struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;1900struct amdgpu_ps *new_ps = &requested_ps;19011902kv_update_requested_ps(adev, new_ps);19031904kv_apply_state_adjust_rules(adev,1905&pi->requested_rps,1906&pi->current_rps);19071908return 0;1909}19101911static int kv_dpm_set_power_state(void *handle)1912{1913struct amdgpu_device *adev = (struct amdgpu_device *)handle;1914struct kv_power_info *pi = kv_get_pi(adev);1915struct amdgpu_ps *new_ps = &pi->requested_rps;1916struct amdgpu_ps *old_ps = &pi->current_rps;1917int ret;19181919if (pi->bapm_enable) {1920ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);1921if (ret) {1922drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n");1923return ret;1924}1925}19261927if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {1928if (pi->enable_dpm) {1929kv_set_valid_clock_range(adev, new_ps);1930kv_update_dfs_bypass_settings(adev, new_ps);1931ret = kv_calculate_ds_divider(adev);1932if (ret) {1933drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");1934return ret;1935}1936kv_calculate_nbps_level_settings(adev);1937kv_calculate_dpm_settings(adev);1938kv_force_lowest_valid(adev);1939kv_enable_new_levels(adev);1940kv_upload_dpm_settings(adev);1941kv_program_nbps_index_settings(adev, new_ps);1942kv_unforce_levels(adev);1943kv_set_enabled_levels(adev);1944kv_force_lowest_valid(adev);1945kv_unforce_levels(adev);19461947ret = kv_update_vce_dpm(adev, new_ps, old_ps);1948if (ret) {1949drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");1950return ret;1951}1952kv_update_sclk_t(adev);1953if (adev->asic_type == CHIP_MULLINS)1954kv_enable_nb_dpm(adev, true);1955}1956} else {1957if (pi->enable_dpm) {1958kv_set_valid_clock_range(adev, new_ps);1959kv_update_dfs_bypass_settings(adev, new_ps);1960ret = kv_calculate_ds_divider(adev);1961if (ret) {1962drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n");1963return ret;1964}1965kv_calculate_nbps_level_settings(adev);1966kv_calculate_dpm_settings(adev);1967kv_freeze_sclk_dpm(adev, true);1968kv_upload_dpm_settings(adev);1969kv_program_nbps_index_settings(adev, new_ps);1970kv_freeze_sclk_dpm(adev, false);1971kv_set_enabled_levels(adev);1972ret = kv_update_vce_dpm(adev, new_ps, old_ps);1973if (ret) {1974drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n");1975return ret;1976}1977kv_update_acp_boot_level(adev);1978kv_update_sclk_t(adev);1979kv_enable_nb_dpm(adev, true);1980}1981}19821983return 0;1984}19851986static void kv_dpm_post_set_power_state(void *handle)1987{1988struct amdgpu_device *adev = (struct amdgpu_device *)handle;1989struct kv_power_info *pi = kv_get_pi(adev);1990struct amdgpu_ps *new_ps = &pi->requested_rps;19911992kv_update_current_ps(adev, new_ps);1993}19941995static void kv_dpm_setup_asic(struct amdgpu_device *adev)1996{1997sumo_take_smu_control(adev, true);1998kv_init_powergate_state(adev);1999kv_init_sclk_t(adev);2000}20012002#if 02003static void kv_dpm_reset_asic(struct amdgpu_device *adev)2004{2005struct kv_power_info *pi = kv_get_pi(adev);20062007if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {2008kv_force_lowest_valid(adev);2009kv_init_graphics_levels(adev);2010kv_program_bootup_state(adev);2011kv_upload_dpm_settings(adev);2012kv_force_lowest_valid(adev);2013kv_unforce_levels(adev);2014} else {2015kv_init_graphics_levels(adev);2016kv_program_bootup_state(adev);2017kv_freeze_sclk_dpm(adev, true);2018kv_upload_dpm_settings(adev);2019kv_freeze_sclk_dpm(adev, false);2020kv_set_enabled_level(adev, pi->graphics_boot_level);2021}2022}2023#endif20242025static void kv_construct_max_power_limits_table(struct amdgpu_device *adev,2026struct amdgpu_clock_and_voltage_limits *table)2027{2028struct kv_power_info *pi = kv_get_pi(adev);20292030if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {2031int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;2032table->sclk =2033pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;2034table->vddc =2035kv_convert_2bit_index_to_voltage(adev,2036pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);2037}20382039table->mclk = pi->sys_info.nbp_memory_clock[0];2040}20412042static void kv_patch_voltage_values(struct amdgpu_device *adev)2043{2044int i;2045struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =2046&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;2047struct amdgpu_vce_clock_voltage_dependency_table *vce_table =2048&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;2049struct amdgpu_clock_voltage_dependency_table *samu_table =2050&adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;2051struct amdgpu_clock_voltage_dependency_table *acp_table =2052&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;20532054if (uvd_table->count) {2055for (i = 0; i < uvd_table->count; i++)2056uvd_table->entries[i].v =2057kv_convert_8bit_index_to_voltage(adev,2058uvd_table->entries[i].v);2059}20602061if (vce_table->count) {2062for (i = 0; i < vce_table->count; i++)2063vce_table->entries[i].v =2064kv_convert_8bit_index_to_voltage(adev,2065vce_table->entries[i].v);2066}20672068if (samu_table->count) {2069for (i = 0; i < samu_table->count; i++)2070samu_table->entries[i].v =2071kv_convert_8bit_index_to_voltage(adev,2072samu_table->entries[i].v);2073}20742075if (acp_table->count) {2076for (i = 0; i < acp_table->count; i++)2077acp_table->entries[i].v =2078kv_convert_8bit_index_to_voltage(adev,2079acp_table->entries[i].v);2080}20812082}20832084static void kv_construct_boot_state(struct amdgpu_device *adev)2085{2086struct kv_power_info *pi = kv_get_pi(adev);20872088pi->boot_pl.sclk = pi->sys_info.bootup_sclk;2089pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;2090pi->boot_pl.ds_divider_index = 0;2091pi->boot_pl.ss_divider_index = 0;2092pi->boot_pl.allow_gnb_slow = 1;2093pi->boot_pl.force_nbp_state = 0;2094pi->boot_pl.display_wm = 0;2095pi->boot_pl.vce_wm = 0;2096}20972098static int kv_force_dpm_highest(struct amdgpu_device *adev)2099{2100int ret;2101u32 enable_mask, i;21022103ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);2104if (ret)2105return ret;21062107for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {2108if (enable_mask & (1 << i))2109break;2110}21112112if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)2113return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);2114else2115return kv_set_enabled_level(adev, i);2116}21172118static int kv_force_dpm_lowest(struct amdgpu_device *adev)2119{2120int ret;2121u32 enable_mask, i;21222123ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);2124if (ret)2125return ret;21262127for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {2128if (enable_mask & (1 << i))2129break;2130}21312132if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)2133return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);2134else2135return kv_set_enabled_level(adev, i);2136}21372138static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,2139u32 sclk, u32 min_sclk_in_sr)2140{2141struct kv_power_info *pi = kv_get_pi(adev);2142u32 i;2143u32 temp;2144u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);21452146if (sclk < min)2147return 0;21482149if (!pi->caps_sclk_ds)2150return 0;21512152for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {2153temp = sclk >> i;2154if (temp >= min)2155break;2156}21572158return (u8)i;2159}21602161static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit)2162{2163struct kv_power_info *pi = kv_get_pi(adev);2164struct amdgpu_clock_voltage_dependency_table *table =2165&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;2166int i;21672168if (table && table->count) {2169for (i = table->count - 1; i >= 0; i--) {2170if (pi->high_voltage_t &&2171(kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=2172pi->high_voltage_t)) {2173*limit = i;2174return 0;2175}2176}2177} else {2178struct sumo_sclk_voltage_mapping_table *table =2179&pi->sys_info.sclk_voltage_mapping_table;21802181for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {2182if (pi->high_voltage_t &&2183(kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=2184pi->high_voltage_t)) {2185*limit = i;2186return 0;2187}2188}2189}21902191*limit = 0;2192return 0;2193}21942195static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,2196struct amdgpu_ps *new_rps,2197struct amdgpu_ps *old_rps)2198{2199struct kv_ps *ps = kv_get_ps(new_rps);2200struct kv_power_info *pi = kv_get_pi(adev);2201u32 min_sclk = 10000; /* ??? */2202u32 sclk, mclk = 0;2203int i, limit;2204bool force_high;2205struct amdgpu_clock_voltage_dependency_table *table =2206&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;2207u32 stable_p_state_sclk = 0;2208struct amdgpu_clock_and_voltage_limits *max_limits =2209&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;22102211if (new_rps->vce_active) {2212new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;2213new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;2214} else {2215new_rps->evclk = 0;2216new_rps->ecclk = 0;2217}22182219mclk = max_limits->mclk;2220sclk = min_sclk;22212222if (pi->caps_stable_p_state) {2223stable_p_state_sclk = (max_limits->sclk * 75) / 100;22242225for (i = table->count - 1; i >= 0; i--) {2226if (stable_p_state_sclk >= table->entries[i].clk) {2227stable_p_state_sclk = table->entries[i].clk;2228break;2229}2230}22312232if (i > 0)2233stable_p_state_sclk = table->entries[0].clk;22342235sclk = stable_p_state_sclk;2236}22372238if (new_rps->vce_active) {2239if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)2240sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;2241}22422243ps->need_dfs_bypass = true;22442245for (i = 0; i < ps->num_levels; i++) {2246if (ps->levels[i].sclk < sclk)2247ps->levels[i].sclk = sclk;2248}22492250if (table && table->count) {2251for (i = 0; i < ps->num_levels; i++) {2252if (pi->high_voltage_t &&2253(pi->high_voltage_t <2254kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {2255kv_get_high_voltage_limit(adev, &limit);2256ps->levels[i].sclk = table->entries[limit].clk;2257}2258}2259} else {2260struct sumo_sclk_voltage_mapping_table *table =2261&pi->sys_info.sclk_voltage_mapping_table;22622263for (i = 0; i < ps->num_levels; i++) {2264if (pi->high_voltage_t &&2265(pi->high_voltage_t <2266kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {2267kv_get_high_voltage_limit(adev, &limit);2268ps->levels[i].sclk = table->entries[limit].sclk_frequency;2269}2270}2271}22722273if (pi->caps_stable_p_state) {2274for (i = 0; i < ps->num_levels; i++) {2275ps->levels[i].sclk = stable_p_state_sclk;2276}2277}22782279pi->video_start = new_rps->dclk || new_rps->vclk ||2280new_rps->evclk || new_rps->ecclk;22812282if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==2283ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)2284pi->battery_state = true;2285else2286pi->battery_state = false;22872288if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {2289ps->dpm0_pg_nb_ps_lo = 0x1;2290ps->dpm0_pg_nb_ps_hi = 0x0;2291ps->dpmx_nb_ps_lo = 0x1;2292ps->dpmx_nb_ps_hi = 0x0;2293} else {2294ps->dpm0_pg_nb_ps_lo = 0x3;2295ps->dpm0_pg_nb_ps_hi = 0x0;2296ps->dpmx_nb_ps_lo = 0x3;2297ps->dpmx_nb_ps_hi = 0x0;22982299if (pi->sys_info.nb_dpm_enable) {2300force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||2301pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||2302pi->disable_nb_ps3_in_battery;2303ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;2304ps->dpm0_pg_nb_ps_hi = 0x2;2305ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;2306ps->dpmx_nb_ps_hi = 0x2;2307}2308}2309}23102311static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev,2312u32 index, bool enable)2313{2314struct kv_power_info *pi = kv_get_pi(adev);23152316pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;2317}23182319static int kv_calculate_ds_divider(struct amdgpu_device *adev)2320{2321struct kv_power_info *pi = kv_get_pi(adev);2322u32 sclk_in_sr = 10000; /* ??? */2323u32 i;23242325if (pi->lowest_valid > pi->highest_valid)2326return -EINVAL;23272328for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {2329pi->graphics_level[i].DeepSleepDivId =2330kv_get_sleep_divider_id_from_clock(adev,2331be32_to_cpu(pi->graphics_level[i].SclkFrequency),2332sclk_in_sr);2333}2334return 0;2335}23362337static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)2338{2339struct kv_power_info *pi = kv_get_pi(adev);2340u32 i;2341bool force_high;2342struct amdgpu_clock_and_voltage_limits *max_limits =2343&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;2344u32 mclk = max_limits->mclk;23452346if (pi->lowest_valid > pi->highest_valid)2347return -EINVAL;23482349if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {2350for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {2351pi->graphics_level[i].GnbSlow = 1;2352pi->graphics_level[i].ForceNbPs1 = 0;2353pi->graphics_level[i].UpH = 0;2354}23552356if (!pi->sys_info.nb_dpm_enable)2357return 0;23582359force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||2360(adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);23612362if (force_high) {2363for (i = pi->lowest_valid; i <= pi->highest_valid; i++)2364pi->graphics_level[i].GnbSlow = 0;2365} else {2366if (pi->battery_state)2367pi->graphics_level[0].ForceNbPs1 = 1;23682369pi->graphics_level[1].GnbSlow = 0;2370pi->graphics_level[2].GnbSlow = 0;2371pi->graphics_level[3].GnbSlow = 0;2372pi->graphics_level[4].GnbSlow = 0;2373}2374} else {2375for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {2376pi->graphics_level[i].GnbSlow = 1;2377pi->graphics_level[i].ForceNbPs1 = 0;2378pi->graphics_level[i].UpH = 0;2379}23802381if (pi->sys_info.nb_dpm_enable && pi->battery_state) {2382pi->graphics_level[pi->lowest_valid].UpH = 0x28;2383pi->graphics_level[pi->lowest_valid].GnbSlow = 0;2384if (pi->lowest_valid != pi->highest_valid)2385pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;2386}2387}2388return 0;2389}23902391static int kv_calculate_dpm_settings(struct amdgpu_device *adev)2392{2393struct kv_power_info *pi = kv_get_pi(adev);2394u32 i;23952396if (pi->lowest_valid > pi->highest_valid)2397return -EINVAL;23982399for (i = pi->lowest_valid; i <= pi->highest_valid; i++)2400pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;24012402return 0;2403}24042405static void kv_init_graphics_levels(struct amdgpu_device *adev)2406{2407struct kv_power_info *pi = kv_get_pi(adev);2408u32 i;2409struct amdgpu_clock_voltage_dependency_table *table =2410&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;24112412if (table && table->count) {2413u32 vid_2bit;24142415pi->graphics_dpm_level_count = 0;2416for (i = 0; i < table->count; i++) {2417if (pi->high_voltage_t &&2418(pi->high_voltage_t <2419kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))2420break;24212422kv_set_divider_value(adev, i, table->entries[i].clk);2423vid_2bit = kv_convert_vid7_to_vid2(adev,2424&pi->sys_info.vid_mapping_table,2425table->entries[i].v);2426kv_set_vid(adev, i, vid_2bit);2427kv_set_at(adev, i, pi->at[i]);2428kv_dpm_power_level_enabled_for_throttle(adev, i, true);2429pi->graphics_dpm_level_count++;2430}2431} else {2432struct sumo_sclk_voltage_mapping_table *table =2433&pi->sys_info.sclk_voltage_mapping_table;24342435pi->graphics_dpm_level_count = 0;2436for (i = 0; i < table->num_max_dpm_entries; i++) {2437if (pi->high_voltage_t &&2438pi->high_voltage_t <2439kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))2440break;24412442kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);2443kv_set_vid(adev, i, table->entries[i].vid_2bit);2444kv_set_at(adev, i, pi->at[i]);2445kv_dpm_power_level_enabled_for_throttle(adev, i, true);2446pi->graphics_dpm_level_count++;2447}2448}24492450for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)2451kv_dpm_power_level_enable(adev, i, false);2452}24532454static void kv_enable_new_levels(struct amdgpu_device *adev)2455{2456struct kv_power_info *pi = kv_get_pi(adev);2457u32 i;24582459for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {2460if (i >= pi->lowest_valid && i <= pi->highest_valid)2461kv_dpm_power_level_enable(adev, i, true);2462}2463}24642465static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level)2466{2467u32 new_mask = (1 << level);24682469return amdgpu_kv_send_msg_to_smc_with_parameter(adev,2470PPSMC_MSG_SCLKDPM_SetEnabledMask,2471new_mask);2472}24732474static int kv_set_enabled_levels(struct amdgpu_device *adev)2475{2476struct kv_power_info *pi = kv_get_pi(adev);2477u32 i, new_mask = 0;24782479for (i = pi->lowest_valid; i <= pi->highest_valid; i++)2480new_mask |= (1 << i);24812482return amdgpu_kv_send_msg_to_smc_with_parameter(adev,2483PPSMC_MSG_SCLKDPM_SetEnabledMask,2484new_mask);2485}24862487static void kv_program_nbps_index_settings(struct amdgpu_device *adev,2488struct amdgpu_ps *new_rps)2489{2490struct kv_ps *new_ps = kv_get_ps(new_rps);2491struct kv_power_info *pi = kv_get_pi(adev);2492u32 nbdpmconfig1;24932494if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)2495return;24962497if (pi->sys_info.nb_dpm_enable) {2498nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1);2499nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK |2500NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK |2501NB_DPM_CONFIG_1__DpmXNbPsLo_MASK |2502NB_DPM_CONFIG_1__DpmXNbPsHi_MASK);2503nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) |2504(new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) |2505(new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) |2506(new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT);2507WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1);2508}2509}25102511static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,2512int min_temp, int max_temp)2513{2514int low_temp = 0 * 1000;2515int high_temp = 255 * 1000;2516u32 tmp;25172518if (low_temp < min_temp)2519low_temp = min_temp;2520if (high_temp > max_temp)2521high_temp = max_temp;2522if (high_temp < low_temp) {2523drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp);2524return -EINVAL;2525}25262527tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL);2528tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK |2529CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);2530tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) |2531((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);2532WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp);25332534adev->pm.dpm.thermal.min_temp = low_temp;2535adev->pm.dpm.thermal.max_temp = high_temp;25362537return 0;2538}25392540union igp_info {2541struct _ATOM_INTEGRATED_SYSTEM_INFO info;2542struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;2543struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;2544struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;2545struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;2546struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;2547};25482549static int kv_parse_sys_info_table(struct amdgpu_device *adev)2550{2551struct kv_power_info *pi = kv_get_pi(adev);2552struct amdgpu_mode_info *mode_info = &adev->mode_info;2553int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);2554union igp_info *igp_info;2555u8 frev, crev;2556u16 data_offset;2557int i;25582559if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,2560&frev, &crev, &data_offset)) {2561igp_info = (union igp_info *)(mode_info->atom_context->bios +2562data_offset);25632564if (crev != 8) {2565drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev);2566return -EINVAL;2567}2568pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);2569pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);2570pi->sys_info.bootup_nb_voltage_index =2571le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);2572if (igp_info->info_8.ucHtcTmpLmt == 0)2573pi->sys_info.htc_tmp_lmt = 203;2574else2575pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;2576if (igp_info->info_8.ucHtcHystLmt == 0)2577pi->sys_info.htc_hyst_lmt = 5;2578else2579pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;2580if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {2581drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n");2582}25832584if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))2585pi->sys_info.nb_dpm_enable = true;2586else2587pi->sys_info.nb_dpm_enable = false;25882589for (i = 0; i < KV_NUM_NBPSTATES; i++) {2590pi->sys_info.nbp_memory_clock[i] =2591le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);2592pi->sys_info.nbp_n_clock[i] =2593le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);2594}2595if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &2596SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS)2597pi->caps_enable_dfs_bypass = true;25982599sumo_construct_sclk_voltage_mapping_table(adev,2600&pi->sys_info.sclk_voltage_mapping_table,2601igp_info->info_8.sAvail_SCLK);26022603sumo_construct_vid_mapping_table(adev,2604&pi->sys_info.vid_mapping_table,2605igp_info->info_8.sAvail_SCLK);26062607kv_construct_max_power_limits_table(adev,2608&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);2609}2610return 0;2611}26122613union power_info {2614struct _ATOM_POWERPLAY_INFO info;2615struct _ATOM_POWERPLAY_INFO_V2 info_2;2616struct _ATOM_POWERPLAY_INFO_V3 info_3;2617struct _ATOM_PPLIB_POWERPLAYTABLE pplib;2618struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;2619struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;2620};26212622union pplib_clock_info {2623struct _ATOM_PPLIB_R600_CLOCK_INFO r600;2624struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;2625struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;2626struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;2627};26282629union pplib_power_state {2630struct _ATOM_PPLIB_STATE v1;2631struct _ATOM_PPLIB_STATE_V2 v2;2632};26332634static void kv_patch_boot_state(struct amdgpu_device *adev,2635struct kv_ps *ps)2636{2637struct kv_power_info *pi = kv_get_pi(adev);26382639ps->num_levels = 1;2640ps->levels[0] = pi->boot_pl;2641}26422643static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev,2644struct amdgpu_ps *rps,2645struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,2646u8 table_rev)2647{2648struct kv_ps *ps = kv_get_ps(rps);26492650rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);2651rps->class = le16_to_cpu(non_clock_info->usClassification);2652rps->class2 = le16_to_cpu(non_clock_info->usClassification2);26532654if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {2655rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);2656rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);2657} else {2658rps->vclk = 0;2659rps->dclk = 0;2660}26612662if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {2663adev->pm.dpm.boot_ps = rps;2664kv_patch_boot_state(adev, ps);2665}2666if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)2667adev->pm.dpm.uvd_ps = rps;2668}26692670static void kv_parse_pplib_clock_info(struct amdgpu_device *adev,2671struct amdgpu_ps *rps, int index,2672union pplib_clock_info *clock_info)2673{2674struct kv_power_info *pi = kv_get_pi(adev);2675struct kv_ps *ps = kv_get_ps(rps);2676struct kv_pl *pl = &ps->levels[index];2677u32 sclk;26782679sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);2680sclk |= clock_info->sumo.ucEngineClockHigh << 16;2681pl->sclk = sclk;2682pl->vddc_index = clock_info->sumo.vddcIndex;26832684ps->num_levels = index + 1;26852686if (pi->caps_sclk_ds) {2687pl->ds_divider_index = 5;2688pl->ss_divider_index = 5;2689}2690}26912692static int kv_parse_power_table(struct amdgpu_device *adev)2693{2694struct amdgpu_mode_info *mode_info = &adev->mode_info;2695struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;2696union pplib_power_state *power_state;2697int i, j, k, non_clock_array_index, clock_array_index;2698union pplib_clock_info *clock_info;2699struct _StateArray *state_array;2700struct _ClockInfoArray *clock_info_array;2701struct _NonClockInfoArray *non_clock_info_array;2702union power_info *power_info;2703int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);2704u16 data_offset;2705u8 frev, crev;2706u8 *power_state_offset;2707struct kv_ps *ps;27082709if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,2710&frev, &crev, &data_offset))2711return -EINVAL;2712power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);27132714amdgpu_add_thermal_controller(adev);27152716state_array = (struct _StateArray *)2717(mode_info->atom_context->bios + data_offset +2718le16_to_cpu(power_info->pplib.usStateArrayOffset));2719clock_info_array = (struct _ClockInfoArray *)2720(mode_info->atom_context->bios + data_offset +2721le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));2722non_clock_info_array = (struct _NonClockInfoArray *)2723(mode_info->atom_context->bios + data_offset +2724le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));27252726adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,2727sizeof(struct amdgpu_ps),2728GFP_KERNEL);2729if (!adev->pm.dpm.ps)2730return -ENOMEM;2731power_state_offset = (u8 *)state_array->states;2732for (i = 0; i < state_array->ucNumEntries; i++) {2733u8 *idx;2734power_state = (union pplib_power_state *)power_state_offset;2735non_clock_array_index = power_state->v2.nonClockInfoIndex;2736non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)2737&non_clock_info_array->nonClockInfo[non_clock_array_index];2738ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);2739if (ps == NULL)2740return -ENOMEM;2741adev->pm.dpm.ps[i].ps_priv = ps;2742k = 0;2743idx = (u8 *)&power_state->v2.clockInfoIndex[0];2744for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {2745clock_array_index = idx[j];2746if (clock_array_index >= clock_info_array->ucNumEntries)2747continue;2748if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)2749break;2750clock_info = (union pplib_clock_info *)2751((u8 *)&clock_info_array->clockInfo[0] +2752(clock_array_index * clock_info_array->ucEntrySize));2753kv_parse_pplib_clock_info(adev,2754&adev->pm.dpm.ps[i], k,2755clock_info);2756k++;2757}2758kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],2759non_clock_info,2760non_clock_info_array->ucEntrySize);2761power_state_offset += 2 + power_state->v2.ucNumDPMLevels;2762}2763adev->pm.dpm.num_ps = state_array->ucNumEntries;27642765/* fill in the vce power states */2766for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {2767u32 sclk;2768clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;2769clock_info = (union pplib_clock_info *)2770&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];2771sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);2772sclk |= clock_info->sumo.ucEngineClockHigh << 16;2773adev->pm.dpm.vce_states[i].sclk = sclk;2774adev->pm.dpm.vce_states[i].mclk = 0;2775}27762777return 0;2778}27792780static int kv_dpm_init(struct amdgpu_device *adev)2781{2782struct kv_power_info *pi;2783int ret, i;27842785pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);2786if (pi == NULL)2787return -ENOMEM;2788adev->pm.dpm.priv = pi;27892790ret = amdgpu_get_platform_caps(adev);2791if (ret)2792return ret;27932794ret = amdgpu_parse_extended_power_table(adev);2795if (ret)2796return ret;27972798for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)2799pi->at[i] = TRINITY_AT_DFLT;28002801pi->sram_end = SMC_RAM_END;28022803pi->enable_nb_dpm = true;28042805pi->caps_power_containment = true;2806pi->caps_cac = true;2807pi->enable_didt = false;2808if (pi->enable_didt) {2809pi->caps_sq_ramping = true;2810pi->caps_db_ramping = true;2811pi->caps_td_ramping = true;2812pi->caps_tcp_ramping = true;2813}28142815if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)2816pi->caps_sclk_ds = true;2817else2818pi->caps_sclk_ds = false;28192820pi->enable_auto_thermal_throttling = true;2821pi->disable_nb_ps3_in_battery = false;2822if (amdgpu_bapm == 0)2823pi->bapm_enable = false;2824else2825pi->bapm_enable = true;2826pi->voltage_drop_t = 0;2827pi->caps_sclk_throttle_low_notification = false;2828pi->caps_fps = false; /* true? */2829pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;2830pi->caps_uvd_dpm = true;2831pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;2832pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;2833pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;2834pi->caps_stable_p_state = false;28352836ret = kv_parse_sys_info_table(adev);2837if (ret)2838return ret;28392840kv_patch_voltage_values(adev);2841kv_construct_boot_state(adev);28422843ret = kv_parse_power_table(adev);2844if (ret)2845return ret;28462847pi->enable_dpm = true;28482849return 0;2850}28512852static void2853kv_dpm_debugfs_print_current_performance_level(void *handle,2854struct seq_file *m)2855{2856struct amdgpu_device *adev = (struct amdgpu_device *)handle;2857struct kv_power_info *pi = kv_get_pi(adev);2858u32 current_index =2859(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &2860TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>2861TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;2862u32 sclk, tmp;2863u16 vddc;28642865if (current_index >= SMU__NUM_SCLK_DPM_STATE) {2866seq_printf(m, "invalid dpm profile %d\n", current_index);2867} else {2868sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);2869tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &2870SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>2871SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;2872vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp);2873seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");2874seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");2875seq_printf(m, "power level %d sclk: %u vddc: %u\n",2876current_index, sclk, vddc);2877}2878}28792880static void2881kv_dpm_print_power_state(void *handle, void *request_ps)2882{2883int i;2884struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;2885struct kv_ps *ps = kv_get_ps(rps);2886struct amdgpu_device *adev = (struct amdgpu_device *)handle;28872888amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);2889amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);2890drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n",2891rps->vclk, rps->dclk);2892for (i = 0; i < ps->num_levels; i++) {2893struct kv_pl *pl = &ps->levels[i];2894drm_dbg(adev_to_drm(adev),2895"power level %d sclk: %u vddc: %u\n",2896i, pl->sclk,2897kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));2898}2899amdgpu_dpm_dbg_print_ps_status(adev, rps);2900}29012902static void kv_dpm_fini(struct amdgpu_device *adev)2903{2904int i;29052906for (i = 0; i < adev->pm.dpm.num_ps; i++) {2907kfree(adev->pm.dpm.ps[i].ps_priv);2908}2909kfree(adev->pm.dpm.ps);2910kfree(adev->pm.dpm.priv);2911amdgpu_free_extended_power_table(adev);2912}29132914static void kv_dpm_display_configuration_changed(void *handle)2915{29162917}29182919static u32 kv_dpm_get_sclk(void *handle, bool low)2920{2921struct amdgpu_device *adev = (struct amdgpu_device *)handle;2922struct kv_power_info *pi = kv_get_pi(adev);2923struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);29242925if (low)2926return requested_state->levels[0].sclk;2927else2928return requested_state->levels[requested_state->num_levels - 1].sclk;2929}29302931static u32 kv_dpm_get_mclk(void *handle, bool low)2932{2933struct amdgpu_device *adev = (struct amdgpu_device *)handle;2934struct kv_power_info *pi = kv_get_pi(adev);29352936return pi->sys_info.bootup_uma_clk;2937}29382939/* get temperature in millidegrees */2940static int kv_dpm_get_temp(void *handle)2941{2942u32 temp;2943int actual_temp = 0;2944struct amdgpu_device *adev = (struct amdgpu_device *)handle;29452946temp = RREG32_SMC(0xC0300E0C);29472948if (temp)2949actual_temp = (temp / 8) - 49;2950else2951actual_temp = 0;29522953actual_temp = actual_temp * 1000;29542955return actual_temp;2956}29572958static int kv_dpm_early_init(struct amdgpu_ip_block *ip_block)2959{2960struct amdgpu_device *adev = ip_block->adev;29612962adev->powerplay.pp_funcs = &kv_dpm_funcs;2963adev->powerplay.pp_handle = adev;2964kv_dpm_set_irq_funcs(adev);29652966return 0;2967}29682969static int kv_dpm_late_init(struct amdgpu_ip_block *ip_block)2970{2971/* powerdown unused blocks for now */2972struct amdgpu_device *adev = ip_block->adev;29732974if (!adev->pm.dpm_enabled)2975return 0;29762977kv_dpm_powergate_acp(adev, true);2978kv_dpm_powergate_samu(adev, true);29792980return 0;2981}29822983static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block)2984{2985int ret;2986struct amdgpu_device *adev = ip_block->adev;2987ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,2988&adev->pm.dpm.thermal.irq);2989if (ret)2990return ret;29912992ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,2993&adev->pm.dpm.thermal.irq);2994if (ret)2995return ret;29962997/* default to balanced state */2998adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;2999adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;3000adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;3001adev->pm.default_sclk = adev->clock.default_sclk;3002adev->pm.default_mclk = adev->clock.default_mclk;3003adev->pm.current_sclk = adev->clock.default_sclk;3004adev->pm.current_mclk = adev->clock.default_mclk;3005adev->pm.int_thermal_type = THERMAL_TYPE_NONE;30063007if (amdgpu_dpm == 0)3008return 0;30093010INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);3011ret = kv_dpm_init(adev);3012if (ret)3013goto dpm_failed;3014adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;3015if (amdgpu_dpm == 1)3016amdgpu_pm_print_power_states(adev);3017drm_info(adev_to_drm(adev), "dpm initialized\n");30183019return 0;30203021dpm_failed:3022kv_dpm_fini(adev);3023drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret);3024return ret;3025}30263027static int kv_dpm_sw_fini(struct amdgpu_ip_block *ip_block)3028{3029struct amdgpu_device *adev = ip_block->adev;30303031flush_work(&adev->pm.dpm.thermal.work);30323033kv_dpm_fini(adev);30343035return 0;3036}30373038static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)3039{3040int ret;3041struct amdgpu_device *adev = ip_block->adev;30423043if (!amdgpu_dpm)3044return 0;30453046mutex_lock(&adev->pm.mutex);3047kv_dpm_setup_asic(adev);3048ret = kv_dpm_enable(adev);3049if (ret)3050adev->pm.dpm_enabled = false;3051else3052adev->pm.dpm_enabled = true;3053amdgpu_legacy_dpm_compute_clocks(adev);3054mutex_unlock(&adev->pm.mutex);30553056return ret;3057}30583059static int kv_dpm_hw_fini(struct amdgpu_ip_block *ip_block)3060{3061struct amdgpu_device *adev = ip_block->adev;30623063if (adev->pm.dpm_enabled)3064kv_dpm_disable(adev);30653066return 0;3067}30683069static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)3070{3071struct amdgpu_device *adev = ip_block->adev;30723073cancel_work_sync(&adev->pm.dpm.thermal.work);30743075if (adev->pm.dpm_enabled) {3076mutex_lock(&adev->pm.mutex);3077adev->pm.dpm_enabled = false;3078/* disable dpm */3079kv_dpm_disable(adev);3080/* reset the power state */3081adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;3082mutex_unlock(&adev->pm.mutex);3083}3084return 0;3085}30863087static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)3088{3089int ret = 0;3090struct amdgpu_device *adev = ip_block->adev;30913092if (!amdgpu_dpm)3093return 0;30943095if (!adev->pm.dpm_enabled) {3096mutex_lock(&adev->pm.mutex);3097/* asic init will reset to the boot state */3098kv_dpm_setup_asic(adev);3099ret = kv_dpm_enable(adev);3100if (ret) {3101adev->pm.dpm_enabled = false;3102} else {3103adev->pm.dpm_enabled = true;3104amdgpu_legacy_dpm_compute_clocks(adev);3105}3106mutex_unlock(&adev->pm.mutex);3107}3108return ret;3109}31103111static bool kv_dpm_is_idle(struct amdgpu_ip_block *ip_block)3112{3113return true;3114}31153116static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,3117struct amdgpu_irq_src *src,3118unsigned type,3119enum amdgpu_interrupt_state state)3120{3121u32 cg_thermal_int;31223123switch (type) {3124case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:3125switch (state) {3126case AMDGPU_IRQ_STATE_DISABLE:3127cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);3128cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;3129WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);3130break;3131case AMDGPU_IRQ_STATE_ENABLE:3132cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);3133cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;3134WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);3135break;3136default:3137break;3138}3139break;31403141case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:3142switch (state) {3143case AMDGPU_IRQ_STATE_DISABLE:3144cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);3145cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;3146WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);3147break;3148case AMDGPU_IRQ_STATE_ENABLE:3149cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);3150cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;3151WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);3152break;3153default:3154break;3155}3156break;31573158default:3159break;3160}3161return 0;3162}31633164static int kv_dpm_process_interrupt(struct amdgpu_device *adev,3165struct amdgpu_irq_src *source,3166struct amdgpu_iv_entry *entry)3167{3168bool queue_thermal = false;31693170if (entry == NULL)3171return -EINVAL;31723173switch (entry->src_id) {3174case 230: /* thermal low to high */3175DRM_DEBUG("IH: thermal low to high\n");3176adev->pm.dpm.thermal.high_to_low = false;3177queue_thermal = true;3178break;3179case 231: /* thermal high to low */3180DRM_DEBUG("IH: thermal high to low\n");3181adev->pm.dpm.thermal.high_to_low = true;3182queue_thermal = true;3183break;3184default:3185break;3186}31873188if (queue_thermal)3189schedule_work(&adev->pm.dpm.thermal.work);31903191return 0;3192}31933194static int kv_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block,3195enum amd_clockgating_state state)3196{3197return 0;3198}31993200static int kv_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block,3201enum amd_powergating_state state)3202{3203return 0;3204}32053206static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,3207const struct kv_pl *kv_cpl2)3208{3209return ((kv_cpl1->sclk == kv_cpl2->sclk) &&3210(kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&3211(kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&3212(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));3213}32143215static int kv_check_state_equal(void *handle,3216void *current_ps,3217void *request_ps,3218bool *equal)3219{3220struct kv_ps *kv_cps;3221struct kv_ps *kv_rps;3222int i;3223struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;3224struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;3225struct amdgpu_device *adev = (struct amdgpu_device *)handle;32263227if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)3228return -EINVAL;32293230kv_cps = kv_get_ps(cps);3231kv_rps = kv_get_ps(rps);32323233if (kv_cps == NULL) {3234*equal = false;3235return 0;3236}32373238if (kv_cps->num_levels != kv_rps->num_levels) {3239*equal = false;3240return 0;3241}32423243for (i = 0; i < kv_cps->num_levels; i++) {3244if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),3245&(kv_rps->levels[i]))) {3246*equal = false;3247return 0;3248}3249}32503251/* If all performance levels are the same try to use the UVD clocks to break the tie.*/3252*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));3253*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));32543255return 0;3256}32573258static int kv_dpm_read_sensor(void *handle, int idx,3259void *value, int *size)3260{3261struct amdgpu_device *adev = (struct amdgpu_device *)handle;3262struct kv_power_info *pi = kv_get_pi(adev);3263uint32_t sclk;3264u32 pl_index =3265(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &3266TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>3267TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;32683269/* size must be at least 4 bytes for all sensors */3270if (*size < 4)3271return -EINVAL;32723273switch (idx) {3274case AMDGPU_PP_SENSOR_GFX_SCLK:3275if (pl_index < SMU__NUM_SCLK_DPM_STATE) {3276sclk = be32_to_cpu(3277pi->graphics_level[pl_index].SclkFrequency);3278*((uint32_t *)value) = sclk;3279*size = 4;3280return 0;3281}3282return -EINVAL;3283case AMDGPU_PP_SENSOR_GPU_TEMP:3284*((uint32_t *)value) = kv_dpm_get_temp(adev);3285*size = 4;3286return 0;3287default:3288return -EOPNOTSUPP;3289}3290}32913292static int kv_set_powergating_by_smu(void *handle,3293uint32_t block_type,3294bool gate,3295int inst)3296{3297switch (block_type) {3298case AMD_IP_BLOCK_TYPE_UVD:3299kv_dpm_powergate_uvd(handle, gate);3300break;3301case AMD_IP_BLOCK_TYPE_VCE:3302kv_dpm_powergate_vce(handle, gate);3303break;3304default:3305break;3306}3307return 0;3308}33093310static const struct amd_ip_funcs kv_dpm_ip_funcs = {3311.name = "kv_dpm",3312.early_init = kv_dpm_early_init,3313.late_init = kv_dpm_late_init,3314.sw_init = kv_dpm_sw_init,3315.sw_fini = kv_dpm_sw_fini,3316.hw_init = kv_dpm_hw_init,3317.hw_fini = kv_dpm_hw_fini,3318.suspend = kv_dpm_suspend,3319.resume = kv_dpm_resume,3320.is_idle = kv_dpm_is_idle,3321.set_clockgating_state = kv_dpm_set_clockgating_state,3322.set_powergating_state = kv_dpm_set_powergating_state,3323};33243325const struct amdgpu_ip_block_version kv_smu_ip_block = {3326.type = AMD_IP_BLOCK_TYPE_SMC,3327.major = 1,3328.minor = 0,3329.rev = 0,3330.funcs = &kv_dpm_ip_funcs,3331};33323333static const struct amd_pm_funcs kv_dpm_funcs = {3334.pre_set_power_state = &kv_dpm_pre_set_power_state,3335.set_power_state = &kv_dpm_set_power_state,3336.post_set_power_state = &kv_dpm_post_set_power_state,3337.display_configuration_changed = &kv_dpm_display_configuration_changed,3338.get_sclk = &kv_dpm_get_sclk,3339.get_mclk = &kv_dpm_get_mclk,3340.print_power_state = &kv_dpm_print_power_state,3341.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,3342.force_performance_level = &kv_dpm_force_performance_level,3343.set_powergating_by_smu = kv_set_powergating_by_smu,3344.enable_bapm = &kv_dpm_enable_bapm,3345.get_vce_clock_state = amdgpu_get_vce_clock_state,3346.check_state_equal = kv_check_state_equal,3347.read_sensor = &kv_dpm_read_sensor,3348.pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,3349};33503351static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {3352.set = kv_dpm_set_interrupt_state,3353.process = kv_dpm_process_interrupt,3354};33553356static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)3357{3358adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;3359adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;3360}336133623363