Path: blob/master/drivers/crypto/hisilicon/hpre/hpre_main.c
26292 views
// SPDX-License-Identifier: GPL-2.01/* Copyright (c) 2018-2019 HiSilicon Limited. */2#include <linux/acpi.h>3#include <linux/bitops.h>4#include <linux/debugfs.h>5#include <linux/init.h>6#include <linux/io.h>7#include <linux/kernel.h>8#include <linux/module.h>9#include <linux/pci.h>10#include <linux/pm_runtime.h>11#include <linux/topology.h>12#include <linux/uacce.h>13#include "hpre.h"1415#define CAP_FILE_PERMISSION 044416#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)17#define HPRE_CTRL_CNT_CLR_CE 0x30100018#define HPRE_FSM_MAX_CNT 0x30100819#define HPRE_VFG_AXQOS 0x30100c20#define HPRE_VFG_AXCACHE 0x30101021#define HPRE_RDCHN_INI_CFG 0x30101422#define HPRE_AWUSR_FP_CFG 0x30101823#define HPRE_BD_ENDIAN 0x30102024#define HPRE_ECC_BYPASS 0x30102425#define HPRE_RAS_WIDTH_CFG 0x30102826#define HPRE_POISON_BYPASS 0x30102c27#define HPRE_BD_ARUSR_CFG 0x30103028#define HPRE_BD_AWUSR_CFG 0x30103429#define HPRE_TYPES_ENB 0x30103830#define HPRE_RSA_ENB BIT(0)31#define HPRE_ECC_ENB BIT(1)32#define HPRE_DATA_RUSER_CFG 0x30103c33#define HPRE_DATA_WUSER_CFG 0x30104034#define HPRE_INT_MASK 0x30140035#define HPRE_INT_STATUS 0x30180036#define HPRE_HAC_INT_MSK 0x30140037#define HPRE_HAC_RAS_CE_ENB 0x30141038#define HPRE_HAC_RAS_NFE_ENB 0x30141439#define HPRE_HAC_RAS_FE_ENB 0x30141840#define HPRE_HAC_INT_SET 0x30150041#define HPRE_RNG_TIMEOUT_NUM 0x301A3442#define HPRE_CORE_INT_ENABLE 043#define HPRE_RDCHN_INI_ST 0x301a0044#define HPRE_CLSTR_BASE 0x30200045#define HPRE_CORE_EN_OFFSET 0x0446#define HPRE_CORE_INI_CFG_OFFSET 0x2047#define HPRE_CORE_INI_STATUS_OFFSET 0x8048#define HPRE_CORE_HTBT_WARN_OFFSET 0x8c49#define HPRE_CORE_IS_SCHD_OFFSET 0x905051#define HPRE_RAS_CE_ENB 0x30141052#define HPRE_RAS_NFE_ENB 0x30141453#define HPRE_RAS_FE_ENB 0x30141854#define HPRE_OOO_SHUTDOWN_SEL 0x301a3c55#define HPRE_HAC_RAS_FE_ENABLE 05657#define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)58#define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)59#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)60#define HPRE_HAC_ECC1_CNT 0x301a0461#define HPRE_HAC_ECC2_CNT 0x301a0862#define HPRE_HAC_SOURCE_INT 0x30160063#define HPRE_CLSTR_ADDR_INTRVL 0x100064#define HPRE_CLUSTER_INQURY 0x10065#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x10466#define HPRE_PASID_EN_BIT 967#define HPRE_REG_RD_INTVRL_US 1068#define HPRE_REG_RD_TMOUT_US 100069#define HPRE_DBGFS_VAL_MAX_LEN 2070#define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa25871#define HPRE_QM_USR_CFG_MASK GENMASK(31, 1)72#define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)73#define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)74#define HPRE_BD_USR_MASK GENMASK(1, 0)75#define HPRE_PREFETCH_CFG 0x30113076#define HPRE_SVA_PREFTCH_DFX 0x30115C77#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))78#define HPRE_PREFETCH_DISABLE BIT(30)79#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))8081/* clock gate */82#define HPRE_CLKGATE_CTL 0x301a1083#define HPRE_PEH_CFG_AUTO_GATE 0x301a2c84#define HPRE_CLUSTER_DYN_CTL 0x30201085#define HPRE_CORE_SHB_CFG 0x30208886#define HPRE_CLKGATE_CTL_EN BIT(0)87#define HPRE_PEH_CFG_AUTO_GATE_EN BIT(0)88#define HPRE_CLUSTER_DYN_CTL_EN BIT(0)89#define HPRE_CORE_GATE_EN (BIT(30) | BIT(31))9091#define HPRE_AM_OOO_SHUTDOWN_ENB 0x30104492#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)93#define HPRE_WR_MSI_PORT BIT(2)9495#define HPRE_CORE_ECC_2BIT_ERR BIT(1)96#define HPRE_OOO_ECC_2BIT_ERR BIT(5)9798#define HPRE_QM_BME_FLR BIT(7)99#define HPRE_QM_PM_FLR BIT(11)100#define HPRE_QM_SRIOV_FLR BIT(12)101102#define HPRE_SHAPER_TYPE_RATE 640103#define HPRE_VIA_MSI_DSM 1104#define HPRE_SQE_MASK_OFFSET 8105#define HPRE_SQE_MASK_LEN 44106#define HPRE_CTX_Q_NUM_DEF 1107108#define HPRE_DFX_BASE 0x301000109#define HPRE_DFX_COMMON1 0x301400110#define HPRE_DFX_COMMON2 0x301A00111#define HPRE_DFX_CORE 0x302000112#define HPRE_DFX_BASE_LEN 0x55113#define HPRE_DFX_COMMON1_LEN 0x41114#define HPRE_DFX_COMMON2_LEN 0xE115#define HPRE_DFX_CORE_LEN 0x43116117static const char hpre_name[] = "hisi_hpre";118static struct dentry *hpre_debugfs_root;119static const struct pci_device_id hpre_dev_ids[] = {120{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_PF) },121{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },122{ 0, }123};124125MODULE_DEVICE_TABLE(pci, hpre_dev_ids);126127struct hpre_hw_error {128u32 int_msk;129const char *msg;130};131132static const struct qm_dev_alg hpre_dev_algs[] = {133{134.alg_msk = BIT(0),135.alg = "rsa\n"136}, {137.alg_msk = BIT(1),138.alg = "dh\n"139}, {140.alg_msk = BIT(2),141.alg = "ecdh\n"142}, {143.alg_msk = BIT(3),144.alg = "ecdsa\n"145}, {146.alg_msk = BIT(4),147.alg = "sm2\n"148}, {149.alg_msk = BIT(5),150.alg = "x25519\n"151}, {152.alg_msk = BIT(6),153.alg = "x448\n"154}, {155/* sentinel */156}157};158159static struct hisi_qm_list hpre_devices = {160.register_to_crypto = hpre_algs_register,161.unregister_from_crypto = hpre_algs_unregister,162};163164static const char * const hpre_debug_file_name[] = {165[HPRE_CLEAR_ENABLE] = "rdclr_en",166[HPRE_CLUSTER_CTRL] = "cluster_ctrl",167};168169enum hpre_cap_type {170HPRE_QM_NFE_MASK_CAP,171HPRE_QM_RESET_MASK_CAP,172HPRE_QM_OOO_SHUTDOWN_MASK_CAP,173HPRE_QM_CE_MASK_CAP,174HPRE_NFE_MASK_CAP,175HPRE_RESET_MASK_CAP,176HPRE_OOO_SHUTDOWN_MASK_CAP,177HPRE_CE_MASK_CAP,178HPRE_CLUSTER_NUM_CAP,179HPRE_CORE_TYPE_NUM_CAP,180HPRE_CORE_NUM_CAP,181HPRE_CLUSTER_CORE_NUM_CAP,182HPRE_CORE_ENABLE_BITMAP_CAP,183HPRE_DRV_ALG_BITMAP_CAP,184HPRE_DEV_ALG_BITMAP_CAP,185HPRE_CORE1_ALG_BITMAP_CAP,186HPRE_CORE2_ALG_BITMAP_CAP,187HPRE_CORE3_ALG_BITMAP_CAP,188HPRE_CORE4_ALG_BITMAP_CAP,189HPRE_CORE5_ALG_BITMAP_CAP,190HPRE_CORE6_ALG_BITMAP_CAP,191HPRE_CORE7_ALG_BITMAP_CAP,192HPRE_CORE8_ALG_BITMAP_CAP,193HPRE_CORE9_ALG_BITMAP_CAP,194HPRE_CORE10_ALG_BITMAP_CAP195};196197static const struct hisi_qm_cap_info hpre_basic_info[] = {198{HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},199{HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},200{HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},201{HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},202{HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E},203{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},204{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},205{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},206{HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},207{HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},208{HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},209{HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},210{HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},211{HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},212{HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},213{HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},214{HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},215{HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},216{HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},217{HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},218{HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},219{HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},220{HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},221{HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},222{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}223};224225static const struct hisi_qm_cap_query_info hpre_cap_query_info[] = {226{QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C37, 0x7C37},227{QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},228{QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},229{HPRE_RAS_NFE_TYPE, "HPRE_RAS_NFE_TYPE ", 0x3130, 0x0, 0x3FFFFE, 0x1FFFC3E},230{HPRE_RAS_NFE_RESET, "HPRE_RAS_NFE_RESET ", 0x3134, 0x0, 0x3FFFFE, 0xBFFC3E},231{HPRE_RAS_CE_TYPE, "HPRE_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},232{HPRE_CORE_INFO, "HPRE_CORE_INFO ", 0x313c, 0x0, 0x420802, 0x120A0A},233{HPRE_CORE_EN, "HPRE_CORE_EN ", 0x3140, 0x0, 0xF, 0x3FF},234{HPRE_DRV_ALG_BITMAP, "HPRE_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x03, 0x27},235{HPRE_ALG_BITMAP, "HPRE_ALG_BITMAP ", 0x3148, 0x0, 0x03, 0x7F},236{HPRE_CORE1_BITMAP_CAP, "HPRE_CORE1_BITMAP_CAP ", 0x314c, 0x0, 0x7F, 0x7F},237{HPRE_CORE2_BITMAP_CAP, "HPRE_CORE2_BITMAP_CAP ", 0x3150, 0x0, 0x7F, 0x7F},238{HPRE_CORE3_BITMAP_CAP, "HPRE_CORE3_BITMAP_CAP ", 0x3154, 0x0, 0x7F, 0x7F},239{HPRE_CORE4_BITMAP_CAP, "HPRE_CORE4_BITMAP_CAP ", 0x3158, 0x0, 0x7F, 0x7F},240{HPRE_CORE5_BITMAP_CAP, "HPRE_CORE5_BITMAP_CAP ", 0x315c, 0x0, 0x7F, 0x7F},241{HPRE_CORE6_BITMAP_CAP, "HPRE_CORE6_BITMAP_CAP ", 0x3160, 0x0, 0x7F, 0x7F},242{HPRE_CORE7_BITMAP_CAP, "HPRE_CORE7_BITMAP_CAP ", 0x3164, 0x0, 0x7F, 0x7F},243{HPRE_CORE8_BITMAP_CAP, "HPRE_CORE8_BITMAP_CAP ", 0x3168, 0x0, 0x7F, 0x7F},244{HPRE_CORE9_BITMAP_CAP, "HPRE_CORE9_BITMAP_CAP ", 0x316c, 0x0, 0x10, 0x10},245{HPRE_CORE10_BITMAP_CAP, "HPRE_CORE10_BITMAP_CAP ", 0x3170, 0x0, 0x10, 0x10},246};247248static const struct hpre_hw_error hpre_hw_errors[] = {249{250.int_msk = BIT(0),251.msg = "core_ecc_1bit_err_int_set"252}, {253.int_msk = BIT(1),254.msg = "core_ecc_2bit_err_int_set"255}, {256.int_msk = BIT(2),257.msg = "dat_wb_poison_int_set"258}, {259.int_msk = BIT(3),260.msg = "dat_rd_poison_int_set"261}, {262.int_msk = BIT(4),263.msg = "bd_rd_poison_int_set"264}, {265.int_msk = BIT(5),266.msg = "ooo_ecc_2bit_err_int_set"267}, {268.int_msk = BIT(6),269.msg = "cluster1_shb_timeout_int_set"270}, {271.int_msk = BIT(7),272.msg = "cluster2_shb_timeout_int_set"273}, {274.int_msk = BIT(8),275.msg = "cluster3_shb_timeout_int_set"276}, {277.int_msk = BIT(9),278.msg = "cluster4_shb_timeout_int_set"279}, {280.int_msk = GENMASK(15, 10),281.msg = "ooo_rdrsp_err_int_set"282}, {283.int_msk = GENMASK(21, 16),284.msg = "ooo_wrrsp_err_int_set"285}, {286.int_msk = BIT(22),287.msg = "pt_rng_timeout_int_set"288}, {289.int_msk = BIT(23),290.msg = "sva_fsm_timeout_int_set"291}, {292.int_msk = BIT(24),293.msg = "sva_int_set"294}, {295/* sentinel */296}297};298299static const u64 hpre_cluster_offsets[] = {300[HPRE_CLUSTER0] =301HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,302[HPRE_CLUSTER1] =303HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,304[HPRE_CLUSTER2] =305HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,306[HPRE_CLUSTER3] =307HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,308};309310static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {311{"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},312{"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},313{"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},314{"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET},315{"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},316};317318static const struct debugfs_reg32 hpre_com_dfx_regs[] = {319{"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},320{"AXQOS ", HPRE_VFG_AXQOS},321{"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},322{"BD_ENDIAN ", HPRE_BD_ENDIAN},323{"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS},324{"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG},325{"POISON_BYPASS ", HPRE_POISON_BYPASS},326{"BD_ARUSER ", HPRE_BD_ARUSR_CFG},327{"BD_AWUSER ", HPRE_BD_AWUSR_CFG},328{"DATA_ARUSER ", HPRE_DATA_RUSER_CFG},329{"DATA_AWUSER ", HPRE_DATA_WUSER_CFG},330{"INT_STATUS ", HPRE_INT_STATUS},331{"INT_MASK ", HPRE_HAC_INT_MSK},332{"RAS_CE_ENB ", HPRE_HAC_RAS_CE_ENB},333{"RAS_NFE_ENB ", HPRE_HAC_RAS_NFE_ENB},334{"RAS_FE_ENB ", HPRE_HAC_RAS_FE_ENB},335{"INT_SET ", HPRE_HAC_INT_SET},336{"RNG_TIMEOUT_NUM ", HPRE_RNG_TIMEOUT_NUM},337};338339static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {340"send_cnt",341"recv_cnt",342"send_fail_cnt",343"send_busy_cnt",344"over_thrhld_cnt",345"overtime_thrhld",346"invalid_req_cnt"347};348349/* define the HPRE's dfx regs region and region length */350static struct dfx_diff_registers hpre_diff_regs[] = {351{352.reg_offset = HPRE_DFX_BASE,353.reg_len = HPRE_DFX_BASE_LEN,354}, {355.reg_offset = HPRE_DFX_COMMON1,356.reg_len = HPRE_DFX_COMMON1_LEN,357}, {358.reg_offset = HPRE_DFX_COMMON2,359.reg_len = HPRE_DFX_COMMON2_LEN,360}, {361.reg_offset = HPRE_DFX_CORE,362.reg_len = HPRE_DFX_CORE_LEN,363},364};365366static const struct hisi_qm_err_ini hpre_err_ini;367368bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)369{370u32 cap_val;371372cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val;373if (alg & cap_val)374return true;375376return false;377}378379static int hpre_diff_regs_show(struct seq_file *s, void *unused)380{381struct hisi_qm *qm = s->private;382383hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,384ARRAY_SIZE(hpre_diff_regs));385386return 0;387}388389DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs);390391static int hpre_com_regs_show(struct seq_file *s, void *unused)392{393hisi_qm_regs_dump(s, s->private);394395return 0;396}397398DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);399400static int hpre_cluster_regs_show(struct seq_file *s, void *unused)401{402hisi_qm_regs_dump(s, s->private);403404return 0;405}406407DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);408409static const struct kernel_param_ops hpre_uacce_mode_ops = {410.set = uacce_mode_set,411.get = param_get_int,412};413414/*415* uacce_mode = 0 means hpre only register to crypto,416* uacce_mode = 1 means hpre both register to crypto and uacce.417*/418static u32 uacce_mode = UACCE_MODE_NOUACCE;419module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);420MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);421422static bool pf_q_num_flag;423static int pf_q_num_set(const char *val, const struct kernel_param *kp)424{425pf_q_num_flag = true;426427return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);428}429430static const struct kernel_param_ops hpre_pf_q_num_ops = {431.set = pf_q_num_set,432.get = param_get_int,433};434435static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;436module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);437MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)");438439static const struct kernel_param_ops vfs_num_ops = {440.set = vfs_num_set,441.get = param_get_int,442};443444static u32 vfs_num;445module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);446MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");447448struct hisi_qp *hpre_create_qp(u8 type)449{450int node = cpu_to_node(raw_smp_processor_id());451struct hisi_qp *qp = NULL;452int ret;453454if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)455return NULL;456457/*458* type: 0 - RSA/DH. algorithm supported in V2,459* 1 - ECC algorithm in V3.460*/461ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);462if (!ret)463return qp;464465return NULL;466}467468static void hpre_config_pasid(struct hisi_qm *qm)469{470u32 val1, val2;471472if (qm->ver >= QM_HW_V3)473return;474475val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);476val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);477if (qm->use_sva) {478val1 |= BIT(HPRE_PASID_EN_BIT);479val2 |= BIT(HPRE_PASID_EN_BIT);480} else {481val1 &= ~BIT(HPRE_PASID_EN_BIT);482val2 &= ~BIT(HPRE_PASID_EN_BIT);483}484writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);485writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);486}487488static int hpre_cfg_by_dsm(struct hisi_qm *qm)489{490struct device *dev = &qm->pdev->dev;491union acpi_object *obj;492guid_t guid;493494if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {495dev_err(dev, "Hpre GUID failed\n");496return -EINVAL;497}498499/* Switch over to MSI handling due to non-standard PCI implementation */500obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,5010, HPRE_VIA_MSI_DSM, NULL);502if (!obj) {503dev_err(dev, "ACPI handle failed!\n");504return -EIO;505}506507ACPI_FREE(obj);508509return 0;510}511512static int hpre_set_cluster(struct hisi_qm *qm)513{514struct device *dev = &qm->pdev->dev;515u32 cluster_core_mask;516unsigned long offset;517u32 hpre_core_info;518u8 clusters_num;519u32 val = 0;520int ret, i;521522cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val;523hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;524clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &525hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;526for (i = 0; i < clusters_num; i++) {527offset = i * HPRE_CLSTR_ADDR_INTRVL;528529/* clusters initiating */530writel(cluster_core_mask,531qm->io_base + offset + HPRE_CORE_ENB);532writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG);533ret = readl_relaxed_poll_timeout(qm->io_base + offset +534HPRE_CORE_INI_STATUS, val,535((val & cluster_core_mask) ==536cluster_core_mask),537HPRE_REG_RD_INTVRL_US,538HPRE_REG_RD_TMOUT_US);539if (ret) {540dev_err(dev,541"cluster %d int st status timeout!\n", i);542return -ETIMEDOUT;543}544}545546return 0;547}548549/*550* For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).551* Or it may stay in D3 state when we bind and unbind hpre quickly,552* as it does FLR triggered by hardware.553*/554static void disable_flr_of_bme(struct hisi_qm *qm)555{556u32 val;557558val = readl(qm->io_base + QM_PEH_AXUSER_CFG);559val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);560val |= HPRE_QM_PM_FLR;561writel(val, qm->io_base + QM_PEH_AXUSER_CFG);562writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);563}564565static void hpre_open_sva_prefetch(struct hisi_qm *qm)566{567u32 val;568int ret;569570if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))571return;572573/* Enable prefetch */574val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);575val &= HPRE_PREFETCH_ENABLE;576writel(val, qm->io_base + HPRE_PREFETCH_CFG);577578ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,579val, !(val & HPRE_PREFETCH_DISABLE),580HPRE_REG_RD_INTVRL_US,581HPRE_REG_RD_TMOUT_US);582if (ret)583pci_err(qm->pdev, "failed to open sva prefetch\n");584}585586static void hpre_close_sva_prefetch(struct hisi_qm *qm)587{588u32 val;589int ret;590591if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))592return;593594val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);595val |= HPRE_PREFETCH_DISABLE;596writel(val, qm->io_base + HPRE_PREFETCH_CFG);597598ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,599val, !(val & HPRE_SVA_DISABLE_READY),600HPRE_REG_RD_INTVRL_US,601HPRE_REG_RD_TMOUT_US);602if (ret)603pci_err(qm->pdev, "failed to close sva prefetch\n");604}605606static void hpre_enable_clock_gate(struct hisi_qm *qm)607{608unsigned long offset;609u8 clusters_num, i;610u32 hpre_core_info;611u32 val;612613if (qm->ver < QM_HW_V3)614return;615616val = readl(qm->io_base + HPRE_CLKGATE_CTL);617val |= HPRE_CLKGATE_CTL_EN;618writel(val, qm->io_base + HPRE_CLKGATE_CTL);619620val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);621val |= HPRE_PEH_CFG_AUTO_GATE_EN;622writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);623624hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;625clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &626hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;627for (i = 0; i < clusters_num; i++) {628offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;629val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);630val |= HPRE_CLUSTER_DYN_CTL_EN;631writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);632633val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);634val |= HPRE_CORE_GATE_EN;635writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);636}637}638639static void hpre_disable_clock_gate(struct hisi_qm *qm)640{641unsigned long offset;642u8 clusters_num, i;643u32 hpre_core_info;644u32 val;645646if (qm->ver < QM_HW_V3)647return;648649val = readl(qm->io_base + HPRE_CLKGATE_CTL);650val &= ~HPRE_CLKGATE_CTL_EN;651writel(val, qm->io_base + HPRE_CLKGATE_CTL);652653val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);654val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;655writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);656657hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;658clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &659hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;660for (i = 0; i < clusters_num; i++) {661offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;662val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);663val &= ~HPRE_CLUSTER_DYN_CTL_EN;664writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);665666val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);667val &= ~HPRE_CORE_GATE_EN;668writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);669}670}671672static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)673{674struct device *dev = &qm->pdev->dev;675u32 val;676int ret;677678/* disabel dynamic clock gate before sram init */679hpre_disable_clock_gate(qm);680681writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);682writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);683writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);684685if (qm->ver >= QM_HW_V3)686writel(HPRE_RSA_ENB | HPRE_ECC_ENB,687qm->io_base + HPRE_TYPES_ENB);688else689writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB);690691writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);692writel(0x0, qm->io_base + HPRE_BD_ENDIAN);693writel(0x0, qm->io_base + HPRE_POISON_BYPASS);694writel(0x0, qm->io_base + HPRE_ECC_BYPASS);695696writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);697writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG);698writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG);699ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val,700val & BIT(0),701HPRE_REG_RD_INTVRL_US,702HPRE_REG_RD_TMOUT_US);703if (ret) {704dev_err(dev, "read rd channel timeout fail!\n");705return -ETIMEDOUT;706}707708ret = hpre_set_cluster(qm);709if (ret)710return -ETIMEDOUT;711712/* This setting is only needed by Kunpeng 920. */713if (qm->ver == QM_HW_V2) {714ret = hpre_cfg_by_dsm(qm);715if (ret)716return ret;717718disable_flr_of_bme(qm);719}720721/* Config data buffer pasid needed by Kunpeng 920 */722hpre_config_pasid(qm);723724hpre_enable_clock_gate(qm);725726return ret;727}728729static void hpre_cnt_regs_clear(struct hisi_qm *qm)730{731unsigned long offset;732u32 hpre_core_info;733u8 clusters_num;734int i;735736/* clear clusterX/cluster_ctrl */737hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;738clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &739hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;740for (i = 0; i < clusters_num; i++) {741offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;742writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);743}744745/* clear rdclr_en */746writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);747748hisi_qm_debug_regs_clear(qm);749}750751static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)752{753u32 val1, val2;754755val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);756if (enable) {757val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;758val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,759HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);760} else {761val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;762val2 = 0x0;763}764765if (qm->ver > QM_HW_V2)766writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);767768writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);769}770771static void hpre_hw_error_disable(struct hisi_qm *qm)772{773u32 ce, nfe;774775ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);776nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);777778/* disable hpre hw error interrupts */779writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);780/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */781hpre_master_ooo_ctrl(qm, false);782}783784static void hpre_hw_error_enable(struct hisi_qm *qm)785{786u32 ce, nfe, err_en;787788ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);789nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);790791/* clear HPRE hw error source if having */792writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);793794/* configure error type */795writel(ce, qm->io_base + HPRE_RAS_CE_ENB);796writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);797writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);798799/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */800hpre_master_ooo_ctrl(qm, true);801802/* enable hpre hw error interrupts */803err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;804writel(~err_en, qm->io_base + HPRE_INT_MASK);805}806807static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)808{809struct hpre *hpre = container_of(file->debug, struct hpre, debug);810811return &hpre->qm;812}813814static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)815{816struct hisi_qm *qm = hpre_file_to_qm(file);817818return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &819HPRE_CTRL_CNT_CLR_CE_BIT;820}821822static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)823{824struct hisi_qm *qm = hpre_file_to_qm(file);825u32 tmp;826827if (val != 1 && val != 0)828return -EINVAL;829830tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &831~HPRE_CTRL_CNT_CLR_CE_BIT) | val;832writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);833834return 0;835}836837static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)838{839struct hisi_qm *qm = hpre_file_to_qm(file);840int cluster_index = file->index - HPRE_CLUSTER_CTRL;841unsigned long offset = HPRE_CLSTR_BASE +842cluster_index * HPRE_CLSTR_ADDR_INTRVL;843844return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);845}846847static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)848{849struct hisi_qm *qm = hpre_file_to_qm(file);850int cluster_index = file->index - HPRE_CLUSTER_CTRL;851unsigned long offset = HPRE_CLSTR_BASE + cluster_index *852HPRE_CLSTR_ADDR_INTRVL;853854writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);855}856857static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,858size_t count, loff_t *pos)859{860struct hpre_debugfs_file *file = filp->private_data;861struct hisi_qm *qm = hpre_file_to_qm(file);862char tbuf[HPRE_DBGFS_VAL_MAX_LEN];863u32 val;864int ret;865866ret = hisi_qm_get_dfx_access(qm);867if (ret)868return ret;869870spin_lock_irq(&file->lock);871switch (file->type) {872case HPRE_CLEAR_ENABLE:873val = hpre_clear_enable_read(file);874break;875case HPRE_CLUSTER_CTRL:876val = hpre_cluster_inqry_read(file);877break;878default:879goto err_input;880}881spin_unlock_irq(&file->lock);882883hisi_qm_put_dfx_access(qm);884ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);885return simple_read_from_buffer(buf, count, pos, tbuf, ret);886887err_input:888spin_unlock_irq(&file->lock);889hisi_qm_put_dfx_access(qm);890return -EINVAL;891}892893static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,894size_t count, loff_t *pos)895{896struct hpre_debugfs_file *file = filp->private_data;897struct hisi_qm *qm = hpre_file_to_qm(file);898char tbuf[HPRE_DBGFS_VAL_MAX_LEN];899unsigned long val;900int len, ret;901902if (*pos != 0)903return 0;904905if (count >= HPRE_DBGFS_VAL_MAX_LEN)906return -ENOSPC;907908len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,909pos, buf, count);910if (len < 0)911return len;912913tbuf[len] = '\0';914if (kstrtoul(tbuf, 0, &val))915return -EFAULT;916917ret = hisi_qm_get_dfx_access(qm);918if (ret)919return ret;920921spin_lock_irq(&file->lock);922switch (file->type) {923case HPRE_CLEAR_ENABLE:924ret = hpre_clear_enable_write(file, val);925if (ret)926goto err_input;927break;928case HPRE_CLUSTER_CTRL:929hpre_cluster_inqry_write(file, val);930break;931default:932ret = -EINVAL;933goto err_input;934}935936ret = count;937938err_input:939spin_unlock_irq(&file->lock);940hisi_qm_put_dfx_access(qm);941return ret;942}943944static const struct file_operations hpre_ctrl_debug_fops = {945.owner = THIS_MODULE,946.open = simple_open,947.read = hpre_ctrl_debug_read,948.write = hpre_ctrl_debug_write,949};950951static int hpre_debugfs_atomic64_get(void *data, u64 *val)952{953struct hpre_dfx *dfx_item = data;954955*val = atomic64_read(&dfx_item->value);956957return 0;958}959960static int hpre_debugfs_atomic64_set(void *data, u64 val)961{962struct hpre_dfx *dfx_item = data;963struct hpre_dfx *hpre_dfx = NULL;964965if (dfx_item->type == HPRE_OVERTIME_THRHLD) {966hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;967atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);968} else if (val) {969return -EINVAL;970}971972atomic64_set(&dfx_item->value, val);973974return 0;975}976977DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,978hpre_debugfs_atomic64_set, "%llu\n");979980static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,981enum hpre_ctrl_dbgfs_file type, int indx)982{983struct hpre *hpre = container_of(qm, struct hpre, qm);984struct hpre_debug *dbg = &hpre->debug;985struct dentry *file_dir;986987if (dir)988file_dir = dir;989else990file_dir = qm->debug.debug_root;991992if (type >= HPRE_DEBUG_FILE_NUM)993return -EINVAL;994995spin_lock_init(&dbg->files[indx].lock);996dbg->files[indx].debug = dbg;997dbg->files[indx].type = type;998dbg->files[indx].index = indx;999debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,1000dbg->files + indx, &hpre_ctrl_debug_fops);10011002return 0;1003}10041005static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)1006{1007struct device *dev = &qm->pdev->dev;1008struct debugfs_regset32 *regset;10091010regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);1011if (!regset)1012return -ENOMEM;10131014regset->regs = hpre_com_dfx_regs;1015regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);1016regset->base = qm->io_base;1017regset->dev = dev;10181019debugfs_create_file("regs", 0444, qm->debug.debug_root,1020regset, &hpre_com_regs_fops);10211022return 0;1023}10241025static int hpre_cluster_debugfs_init(struct hisi_qm *qm)1026{1027struct device *dev = &qm->pdev->dev;1028char buf[HPRE_DBGFS_VAL_MAX_LEN];1029struct debugfs_regset32 *regset;1030struct dentry *tmp_d;1031u32 hpre_core_info;1032u8 clusters_num;1033int i, ret;10341035hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;1036clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &1037hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;1038for (i = 0; i < clusters_num; i++) {1039ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);1040if (ret >= HPRE_DBGFS_VAL_MAX_LEN)1041return -EINVAL;1042tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);10431044regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);1045if (!regset)1046return -ENOMEM;10471048regset->regs = hpre_cluster_dfx_regs;1049regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);1050regset->base = qm->io_base + hpre_cluster_offsets[i];1051regset->dev = dev;10521053debugfs_create_file("regs", 0444, tmp_d, regset,1054&hpre_cluster_regs_fops);1055ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,1056i + HPRE_CLUSTER_CTRL);1057if (ret)1058return ret;1059}10601061return 0;1062}10631064static int hpre_ctrl_debug_init(struct hisi_qm *qm)1065{1066int ret;10671068ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,1069HPRE_CLEAR_ENABLE);1070if (ret)1071return ret;10721073ret = hpre_pf_comm_regs_debugfs_init(qm);1074if (ret)1075return ret;10761077return hpre_cluster_debugfs_init(qm);1078}10791080static int hpre_cap_regs_show(struct seq_file *s, void *unused)1081{1082struct hisi_qm *qm = s->private;1083u32 i, size;10841085size = qm->cap_tables.qm_cap_size;1086for (i = 0; i < size; i++)1087seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,1088qm->cap_tables.qm_cap_table[i].cap_val);10891090size = qm->cap_tables.dev_cap_size;1091for (i = 0; i < size; i++)1092seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,1093qm->cap_tables.dev_cap_table[i].cap_val);10941095return 0;1096}10971098DEFINE_SHOW_ATTRIBUTE(hpre_cap_regs);10991100static void hpre_dfx_debug_init(struct hisi_qm *qm)1101{1102struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;1103struct hpre *hpre = container_of(qm, struct hpre, qm);1104struct hpre_dfx *dfx = hpre->debug.dfx;1105struct dentry *parent;1106int i;11071108parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);1109for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {1110dfx[i].type = i;1111debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],1112&hpre_atomic64_ops);1113}11141115if (qm->fun_type == QM_HW_PF && hpre_regs)1116debugfs_create_file("diff_regs", 0444, parent,1117qm, &hpre_diff_regs_fops);11181119debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,1120qm->debug.debug_root, qm, &hpre_cap_regs_fops);1121}11221123static int hpre_debugfs_init(struct hisi_qm *qm)1124{1125struct device *dev = &qm->pdev->dev;1126int ret;11271128ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));1129if (ret) {1130dev_warn(dev, "Failed to init HPRE diff regs!\n");1131return ret;1132}11331134qm->debug.debug_root = debugfs_create_dir(dev_name(dev),1135hpre_debugfs_root);1136qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;1137qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;11381139hisi_qm_debug_init(qm);11401141if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {1142ret = hpre_ctrl_debug_init(qm);1143if (ret)1144goto debugfs_remove;1145}11461147hpre_dfx_debug_init(qm);11481149return 0;11501151debugfs_remove:1152debugfs_remove_recursive(qm->debug.debug_root);1153hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));1154return ret;1155}11561157static void hpre_debugfs_exit(struct hisi_qm *qm)1158{1159debugfs_remove_recursive(qm->debug.debug_root);11601161hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));1162}11631164static int hpre_pre_store_cap_reg(struct hisi_qm *qm)1165{1166struct hisi_qm_cap_record *hpre_cap;1167struct device *dev = &qm->pdev->dev;1168u32 hpre_core_info;1169u8 clusters_num;1170size_t i, size;11711172size = ARRAY_SIZE(hpre_cap_query_info);1173hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);1174if (!hpre_cap)1175return -ENOMEM;11761177for (i = 0; i < size; i++) {1178hpre_cap[i].type = hpre_cap_query_info[i].type;1179hpre_cap[i].name = hpre_cap_query_info[i].name;1180hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info,1181i, qm->cap_ver);1182}11831184hpre_core_info = hpre_cap[HPRE_CORE_INFO].cap_val;1185clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &1186hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;1187if (clusters_num > HPRE_CLUSTERS_NUM_MAX) {1188dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",1189clusters_num, HPRE_CLUSTERS_NUM_MAX);1190return -EINVAL;1191}11921193qm->cap_tables.dev_cap_table = hpre_cap;1194qm->cap_tables.dev_cap_size = size;11951196return 0;1197}11981199static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)1200{1201u64 alg_msk;1202int ret;12031204if (pdev->revision == QM_HW_V1) {1205pci_warn(pdev, "HPRE version 1 is not supported!\n");1206return -EINVAL;1207}12081209qm->mode = uacce_mode;1210qm->pdev = pdev;1211qm->sqe_size = HPRE_SQE_SIZE;1212qm->dev_name = hpre_name;12131214qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ?1215QM_HW_PF : QM_HW_VF;1216if (qm->fun_type == QM_HW_PF) {1217qm->qp_base = HPRE_PF_DEF_Q_BASE;1218qm->qp_num = pf_q_num;1219qm->debug.curr_qm_qp_num = pf_q_num;1220qm->qm_list = &hpre_devices;1221qm->err_ini = &hpre_err_ini;1222if (pf_q_num_flag)1223set_bit(QM_MODULE_PARAM, &qm->misc_ctl);1224}12251226ret = hisi_qm_init(qm);1227if (ret) {1228pci_err(pdev, "Failed to init hpre qm configures!\n");1229return ret;1230}12311232/* Fetch and save the value of capability registers */1233ret = hpre_pre_store_cap_reg(qm);1234if (ret) {1235pci_err(pdev, "Failed to pre-store capability registers!\n");1236hisi_qm_uninit(qm);1237return ret;1238}12391240alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val;1241ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));1242if (ret) {1243pci_err(pdev, "Failed to set hpre algs!\n");1244hisi_qm_uninit(qm);1245}12461247return ret;1248}12491250static int hpre_show_last_regs_init(struct hisi_qm *qm)1251{1252int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);1253int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);1254struct qm_debug *debug = &qm->debug;1255void __iomem *io_base;1256u32 hpre_core_info;1257u8 clusters_num;1258int i, j, idx;12591260hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;1261clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &1262hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;1263debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +1264com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);1265if (!debug->last_words)1266return -ENOMEM;12671268for (i = 0; i < com_dfx_regs_num; i++)1269debug->last_words[i] = readl_relaxed(qm->io_base +1270hpre_com_dfx_regs[i].offset);12711272for (i = 0; i < clusters_num; i++) {1273io_base = qm->io_base + hpre_cluster_offsets[i];1274for (j = 0; j < cluster_dfx_regs_num; j++) {1275idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;1276debug->last_words[idx] = readl_relaxed(1277io_base + hpre_cluster_dfx_regs[j].offset);1278}1279}12801281return 0;1282}12831284static void hpre_show_last_regs_uninit(struct hisi_qm *qm)1285{1286struct qm_debug *debug = &qm->debug;12871288if (qm->fun_type == QM_HW_VF || !debug->last_words)1289return;12901291kfree(debug->last_words);1292debug->last_words = NULL;1293}12941295static void hpre_show_last_dfx_regs(struct hisi_qm *qm)1296{1297int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);1298int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);1299struct qm_debug *debug = &qm->debug;1300struct pci_dev *pdev = qm->pdev;1301void __iomem *io_base;1302u32 hpre_core_info;1303u8 clusters_num;1304int i, j, idx;1305u32 val;13061307if (qm->fun_type == QM_HW_VF || !debug->last_words)1308return;13091310/* dumps last word of the debugging registers during controller reset */1311for (i = 0; i < com_dfx_regs_num; i++) {1312val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset);1313if (debug->last_words[i] != val)1314pci_info(pdev, "Common_core:%s \t= 0x%08x => 0x%08x\n",1315hpre_com_dfx_regs[i].name, debug->last_words[i], val);1316}13171318hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;1319clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &1320hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;1321for (i = 0; i < clusters_num; i++) {1322io_base = qm->io_base + hpre_cluster_offsets[i];1323for (j = 0; j < cluster_dfx_regs_num; j++) {1324val = readl_relaxed(io_base +1325hpre_cluster_dfx_regs[j].offset);1326idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;1327if (debug->last_words[idx] != val)1328pci_info(pdev, "cluster-%d:%s \t= 0x%08x => 0x%08x\n",1329i, hpre_cluster_dfx_regs[j].name, debug->last_words[idx], val);1330}1331}1332}13331334static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)1335{1336const struct hpre_hw_error *err = hpre_hw_errors;1337struct device *dev = &qm->pdev->dev;13381339while (err->msg) {1340if (err->int_msk & err_sts)1341dev_warn(dev, "%s [error status=0x%x] found\n",1342err->msg, err->int_msk);1343err++;1344}1345}13461347static u32 hpre_get_hw_err_status(struct hisi_qm *qm)1348{1349return readl(qm->io_base + HPRE_INT_STATUS);1350}13511352static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)1353{1354writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);1355}13561357static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)1358{1359u32 nfe_mask;13601361nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);1362writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);1363}13641365static void hpre_open_axi_master_ooo(struct hisi_qm *qm)1366{1367u32 value;13681369value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);1370writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,1371qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);1372writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,1373qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);1374}13751376static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)1377{1378u32 err_status;13791380err_status = hpre_get_hw_err_status(qm);1381if (err_status) {1382if (err_status & qm->err_info.ecc_2bits_mask)1383qm->err_status.is_dev_ecc_mbit = true;1384hpre_log_hw_error(qm, err_status);13851386if (err_status & qm->err_info.dev_reset_mask) {1387/* Disable the same error reporting until device is recovered. */1388hpre_disable_error_report(qm, err_status);1389return ACC_ERR_NEED_RESET;1390}1391hpre_clear_hw_err_status(qm, err_status);1392}13931394return ACC_ERR_RECOVERED;1395}13961397static bool hpre_dev_is_abnormal(struct hisi_qm *qm)1398{1399u32 err_status;14001401err_status = hpre_get_hw_err_status(qm);1402if (err_status & qm->err_info.dev_shutdown_mask)1403return true;14041405return false;1406}14071408static void hpre_err_info_init(struct hisi_qm *qm)1409{1410struct hisi_qm_err_info *err_info = &qm->err_info;14111412err_info->fe = HPRE_HAC_RAS_FE_ENABLE;1413err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);1414err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);1415err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;1416err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,1417HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);1418err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,1419HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);1420err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,1421HPRE_QM_RESET_MASK_CAP, qm->cap_ver);1422err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,1423HPRE_RESET_MASK_CAP, qm->cap_ver);1424err_info->msi_wr_port = HPRE_WR_MSI_PORT;1425err_info->acpi_rst = "HRST";1426}14271428static const struct hisi_qm_err_ini hpre_err_ini = {1429.hw_init = hpre_set_user_domain_and_cache,1430.hw_err_enable = hpre_hw_error_enable,1431.hw_err_disable = hpre_hw_error_disable,1432.get_dev_hw_err_status = hpre_get_hw_err_status,1433.clear_dev_hw_err_status = hpre_clear_hw_err_status,1434.open_axi_master_ooo = hpre_open_axi_master_ooo,1435.open_sva_prefetch = hpre_open_sva_prefetch,1436.close_sva_prefetch = hpre_close_sva_prefetch,1437.show_last_dfx_regs = hpre_show_last_dfx_regs,1438.err_info_init = hpre_err_info_init,1439.get_err_result = hpre_get_err_result,1440.dev_is_abnormal = hpre_dev_is_abnormal,1441};14421443static int hpre_pf_probe_init(struct hpre *hpre)1444{1445struct hisi_qm *qm = &hpre->qm;1446int ret;14471448ret = hpre_set_user_domain_and_cache(qm);1449if (ret)1450return ret;14511452hpre_open_sva_prefetch(qm);14531454hisi_qm_dev_err_init(qm);1455ret = hpre_show_last_regs_init(qm);1456if (ret)1457pci_err(qm->pdev, "Failed to init last word regs!\n");14581459return ret;1460}14611462static int hpre_probe_init(struct hpre *hpre)1463{1464u32 type_rate = HPRE_SHAPER_TYPE_RATE;1465struct hisi_qm *qm = &hpre->qm;1466int ret;14671468if (qm->fun_type == QM_HW_PF) {1469ret = hpre_pf_probe_init(hpre);1470if (ret)1471return ret;1472/* Enable shaper type 0 */1473if (qm->ver >= QM_HW_V3) {1474type_rate |= QM_SHAPER_ENABLE;1475qm->type_rate = type_rate;1476}1477}14781479return 0;1480}14811482static void hpre_probe_uninit(struct hisi_qm *qm)1483{1484if (qm->fun_type == QM_HW_VF)1485return;14861487hpre_cnt_regs_clear(qm);1488qm->debug.curr_qm_qp_num = 0;1489hpre_show_last_regs_uninit(qm);1490hpre_close_sva_prefetch(qm);1491hisi_qm_dev_err_uninit(qm);1492}14931494static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)1495{1496struct hisi_qm *qm;1497struct hpre *hpre;1498int ret;14991500hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);1501if (!hpre)1502return -ENOMEM;15031504qm = &hpre->qm;1505ret = hpre_qm_init(qm, pdev);1506if (ret) {1507pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);1508return ret;1509}15101511ret = hpre_probe_init(hpre);1512if (ret) {1513pci_err(pdev, "Failed to probe (%d)!\n", ret);1514goto err_with_qm_init;1515}15161517ret = hisi_qm_start(qm);1518if (ret)1519goto err_with_probe_init;15201521ret = hpre_debugfs_init(qm);1522if (ret)1523dev_warn(&pdev->dev, "init debugfs fail!\n");15241525hisi_qm_add_list(qm, &hpre_devices);1526ret = hisi_qm_alg_register(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);1527if (ret < 0) {1528pci_err(pdev, "fail to register algs to crypto!\n");1529goto err_qm_del_list;1530}15311532if (qm->uacce) {1533ret = uacce_register(qm->uacce);1534if (ret) {1535pci_err(pdev, "failed to register uacce (%d)!\n", ret);1536goto err_with_alg_register;1537}1538}15391540if (qm->fun_type == QM_HW_PF && vfs_num) {1541ret = hisi_qm_sriov_enable(pdev, vfs_num);1542if (ret < 0)1543goto err_with_alg_register;1544}15451546hisi_qm_pm_init(qm);15471548return 0;15491550err_with_alg_register:1551hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);15521553err_qm_del_list:1554hisi_qm_del_list(qm, &hpre_devices);1555hpre_debugfs_exit(qm);1556hisi_qm_stop(qm, QM_NORMAL);15571558err_with_probe_init:1559hpre_probe_uninit(qm);15601561err_with_qm_init:1562hisi_qm_uninit(qm);15631564return ret;1565}15661567static void hpre_remove(struct pci_dev *pdev)1568{1569struct hisi_qm *qm = pci_get_drvdata(pdev);15701571hisi_qm_pm_uninit(qm);1572hisi_qm_wait_task_finish(qm, &hpre_devices);1573hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);1574hisi_qm_del_list(qm, &hpre_devices);1575if (qm->fun_type == QM_HW_PF && qm->vfs_num)1576hisi_qm_sriov_disable(pdev, true);15771578hpre_debugfs_exit(qm);1579hisi_qm_stop(qm, QM_NORMAL);15801581hpre_probe_uninit(qm);1582hisi_qm_uninit(qm);1583}15841585static const struct dev_pm_ops hpre_pm_ops = {1586SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)1587};15881589static const struct pci_error_handlers hpre_err_handler = {1590.error_detected = hisi_qm_dev_err_detected,1591.slot_reset = hisi_qm_dev_slot_reset,1592.reset_prepare = hisi_qm_reset_prepare,1593.reset_done = hisi_qm_reset_done,1594};15951596static struct pci_driver hpre_pci_driver = {1597.name = hpre_name,1598.id_table = hpre_dev_ids,1599.probe = hpre_probe,1600.remove = hpre_remove,1601.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?1602hisi_qm_sriov_configure : NULL,1603.err_handler = &hpre_err_handler,1604.shutdown = hisi_qm_dev_shutdown,1605.driver.pm = &hpre_pm_ops,1606};16071608struct pci_driver *hisi_hpre_get_pf_driver(void)1609{1610return &hpre_pci_driver;1611}1612EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver);16131614static void hpre_register_debugfs(void)1615{1616if (!debugfs_initialized())1617return;16181619hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);1620}16211622static void hpre_unregister_debugfs(void)1623{1624debugfs_remove_recursive(hpre_debugfs_root);1625}16261627static int __init hpre_init(void)1628{1629int ret;16301631hisi_qm_init_list(&hpre_devices);1632hpre_register_debugfs();16331634ret = pci_register_driver(&hpre_pci_driver);1635if (ret) {1636hpre_unregister_debugfs();1637pr_err("hpre: can't register hisi hpre driver.\n");1638}16391640return ret;1641}16421643static void __exit hpre_exit(void)1644{1645pci_unregister_driver(&hpre_pci_driver);1646hpre_unregister_debugfs();1647}16481649module_init(hpre_init);1650module_exit(hpre_exit);16511652MODULE_LICENSE("GPL v2");1653MODULE_AUTHOR("Zaibo Xu <[email protected]>");1654MODULE_AUTHOR("Meng Yu <[email protected]>");1655MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");165616571658