Path: blob/master/drivers/crypto/hisilicon/sec/sec_drv.c
26289 views
// SPDX-License-Identifier: GPL-2.01/*2* Driver for the HiSilicon SEC units found on Hip06 Hip073*4* Copyright (c) 2016-2017 HiSilicon Limited.5*/6#include <linux/acpi.h>7#include <linux/atomic.h>8#include <linux/delay.h>9#include <linux/dma-direction.h>10#include <linux/dma-mapping.h>11#include <linux/dmapool.h>12#include <linux/io.h>13#include <linux/iommu.h>14#include <linux/interrupt.h>15#include <linux/irq.h>16#include <linux/irqreturn.h>17#include <linux/mm.h>18#include <linux/module.h>19#include <linux/of.h>20#include <linux/platform_device.h>21#include <linux/slab.h>2223#include "sec_drv.h"2425#define SEC_QUEUE_AR_FROCE_ALLOC 026#define SEC_QUEUE_AR_FROCE_NOALLOC 127#define SEC_QUEUE_AR_FROCE_DIS 22829#define SEC_QUEUE_AW_FROCE_ALLOC 030#define SEC_QUEUE_AW_FROCE_NOALLOC 131#define SEC_QUEUE_AW_FROCE_DIS 23233/* SEC_ALGSUB registers */34#define SEC_ALGSUB_CLK_EN_REG 0x03b835#define SEC_ALGSUB_CLK_DIS_REG 0x03bc36#define SEC_ALGSUB_CLK_ST_REG 0x535c37#define SEC_ALGSUB_RST_REQ_REG 0x0aa838#define SEC_ALGSUB_RST_DREQ_REG 0x0aac39#define SEC_ALGSUB_RST_ST_REG 0x5a5440#define SEC_ALGSUB_RST_ST_IS_RST BIT(0)4142#define SEC_ALGSUB_BUILD_RST_REQ_REG 0x0ab843#define SEC_ALGSUB_BUILD_RST_DREQ_REG 0x0abc44#define SEC_ALGSUB_BUILD_RST_ST_REG 0x5a5c45#define SEC_ALGSUB_BUILD_RST_ST_IS_RST BIT(0)4647#define SEC_SAA_BASE 0x00001000UL4849/* SEC_SAA registers */50#define SEC_SAA_CTRL_REG(x) ((x) * SEC_SAA_ADDR_SIZE)51#define SEC_SAA_CTRL_GET_QM_EN BIT(0)5253#define SEC_ST_INTMSK1_REG 0x020054#define SEC_ST_RINT1_REG 0x040055#define SEC_ST_INTSTS1_REG 0x060056#define SEC_BD_MNG_STAT_REG 0x080057#define SEC_PARSING_STAT_REG 0x080458#define SEC_LOAD_TIME_OUT_CNT_REG 0x080859#define SEC_CORE_WORK_TIME_OUT_CNT_REG 0x080c60#define SEC_BACK_TIME_OUT_CNT_REG 0x081061#define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG 0x081462#define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG 0x081863#define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG 0x081c64#define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG 0x082065#define SEC_SAA_ACC_REG 0x083c66#define SEC_BD_NUM_CNT_IN_SEC_REG 0x085867#define SEC_LOAD_WORK_TIME_CNT_REG 0x086068#define SEC_CORE_WORK_WORK_TIME_CNT_REG 0x086469#define SEC_BACK_WORK_TIME_CNT_REG 0x086870#define SEC_SAA_IDLE_TIME_CNT_REG 0x086c71#define SEC_SAA_CLK_CNT_REG 0x08707273/* SEC_COMMON registers */74#define SEC_CLK_EN_REG 0x000075#define SEC_CTRL_REG 0x00047677#define SEC_COMMON_CNT_CLR_CE_REG 0x000878#define SEC_COMMON_CNT_CLR_CE_CLEAR BIT(0)79#define SEC_COMMON_CNT_CLR_CE_SNAP_EN BIT(1)8081#define SEC_SECURE_CTRL_REG 0x000c82#define SEC_AXI_CACHE_CFG_REG 0x001083#define SEC_AXI_QOS_CFG_REG 0x001484#define SEC_IPV4_MASK_TABLE_REG 0x002085#define SEC_IPV6_MASK_TABLE_X_REG(x) (0x0024 + (x) * 4)86#define SEC_FSM_MAX_CNT_REG 0x00648788#define SEC_CTRL2_REG 0x006889#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M GENMASK(3, 0)90#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S 091#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M GENMASK(6, 4)92#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S 493#define SEC_CTRL2_CLK_GATE_EN BIT(7)94#define SEC_CTRL2_ENDIAN_BD BIT(8)95#define SEC_CTRL2_ENDIAN_BD_TYPE BIT(9)9697#define SEC_CNT_PRECISION_CFG_REG 0x006c98#define SEC_DEBUG_BD_CFG_REG 0x007099#define SEC_DEBUG_BD_CFG_WB_NORMAL BIT(0)100#define SEC_DEBUG_BD_CFG_WB_EN BIT(1)101102#define SEC_Q_SIGHT_SEL 0x0074103#define SEC_Q_SIGHT_HIS_CLR 0x0078104#define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4)105#define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4)106#define SEC_STAT_CLR_REG 0x0a00107#define SEC_SAA_IDLE_CNT_CLR_REG 0x0a04108#define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG 0x0b00109#define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG 0x0b04110#define SEC_QM_BD_DFX_CFG_REG 0x0b08111#define SEC_QM_BD_DFX_RESULT_REG 0x0b0c112#define SEC_QM_BDID_DFX_RESULT_REG 0x0b10113#define SEC_QM_BD_DFIFO_STATUS_REG 0x0b14114#define SEC_QM_BD_DFX_CFG2_REG 0x0b1c115#define SEC_QM_BD_DFX_RESULT2_REG 0x0b20116#define SEC_QM_BD_IDFIFO_STATUS_REG 0x0b18117#define SEC_QM_BD_DFIFO_STATUS2_REG 0x0b28118#define SEC_QM_BD_IDFIFO_STATUS2_REG 0x0b2c119120#define SEC_HASH_IPV4_MASK 0xfff00000121#define SEC_MAX_SAA_NUM 0xa122#define SEC_SAA_ADDR_SIZE 0x1000123124#define SEC_Q_INIT_REG 0x0125#define SEC_Q_INIT_WO_STAT_CLEAR 0x2126#define SEC_Q_INIT_AND_STAT_CLEAR 0x3127128#define SEC_Q_CFG_REG 0x8129#define SEC_Q_CFG_REORDER BIT(0)130131#define SEC_Q_PROC_NUM_CFG_REG 0x10132#define SEC_QUEUE_ENB_REG 0x18133134#define SEC_Q_DEPTH_CFG_REG 0x50135#define SEC_Q_DEPTH_CFG_DEPTH_M GENMASK(11, 0)136#define SEC_Q_DEPTH_CFG_DEPTH_S 0137138#define SEC_Q_BASE_HADDR_REG 0x54139#define SEC_Q_BASE_LADDR_REG 0x58140#define SEC_Q_WR_PTR_REG 0x5c141#define SEC_Q_OUTORDER_BASE_HADDR_REG 0x60142#define SEC_Q_OUTORDER_BASE_LADDR_REG 0x64143#define SEC_Q_OUTORDER_RD_PTR_REG 0x68144#define SEC_Q_OT_TH_REG 0x6c145146#define SEC_Q_ARUSER_CFG_REG 0x70147#define SEC_Q_ARUSER_CFG_FA BIT(0)148#define SEC_Q_ARUSER_CFG_FNA BIT(1)149#define SEC_Q_ARUSER_CFG_RINVLD BIT(2)150#define SEC_Q_ARUSER_CFG_PKG BIT(3)151152#define SEC_Q_AWUSER_CFG_REG 0x74153#define SEC_Q_AWUSER_CFG_FA BIT(0)154#define SEC_Q_AWUSER_CFG_FNA BIT(1)155#define SEC_Q_AWUSER_CFG_PKG BIT(2)156157#define SEC_Q_ERR_BASE_HADDR_REG 0x7c158#define SEC_Q_ERR_BASE_LADDR_REG 0x80159#define SEC_Q_CFG_VF_NUM_REG 0x84160#define SEC_Q_SOFT_PROC_PTR_REG 0x88161#define SEC_Q_FAIL_INT_MSK_REG 0x300162#define SEC_Q_FLOW_INT_MKS_REG 0x304163#define SEC_Q_FAIL_RINT_REG 0x400164#define SEC_Q_FLOW_RINT_REG 0x404165#define SEC_Q_FAIL_INT_STATUS_REG 0x500166#define SEC_Q_FLOW_INT_STATUS_REG 0x504167#define SEC_Q_STATUS_REG 0x600168#define SEC_Q_RD_PTR_REG 0x604169#define SEC_Q_PRO_PTR_REG 0x608170#define SEC_Q_OUTORDER_WR_PTR_REG 0x60c171#define SEC_Q_OT_CNT_STATUS_REG 0x610172#define SEC_Q_INORDER_BD_NUM_ST_REG 0x650173#define SEC_Q_INORDER_GET_FLAG_ST_REG 0x654174#define SEC_Q_INORDER_ADD_FLAG_ST_REG 0x658175#define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG 0x65c176#define SEC_Q_RD_DONE_PTR_REG 0x660177#define SEC_Q_CPL_Q_BD_NUM_ST_REG 0x700178#define SEC_Q_CPL_Q_PTR_ST_REG 0x704179#define SEC_Q_CPL_Q_H_ADDR_ST_REG 0x708180#define SEC_Q_CPL_Q_L_ADDR_ST_REG 0x70c181#define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG 0x710182#define SEC_Q_WRR_ID_CHECK_REG 0x714183#define SEC_Q_CPLQ_FULL_CHECK_REG 0x718184#define SEC_Q_SUCCESS_BD_CNT_REG 0x800185#define SEC_Q_FAIL_BD_CNT_REG 0x804186#define SEC_Q_GET_BD_CNT_REG 0x808187#define SEC_Q_IVLD_CNT_REG 0x80c188#define SEC_Q_BD_PROC_GET_CNT_REG 0x810189#define SEC_Q_BD_PROC_DONE_CNT_REG 0x814190#define SEC_Q_LAT_CLR_REG 0x850191#define SEC_Q_PKT_LAT_MAX_REG 0x854192#define SEC_Q_PKT_LAT_AVG_REG 0x858193#define SEC_Q_PKT_LAT_MIN_REG 0x85c194#define SEC_Q_ID_CLR_CFG_REG 0x900195#define SEC_Q_1ST_BD_ERR_ID_REG 0x904196#define SEC_Q_1ST_AUTH_FAIL_ID_REG 0x908197#define SEC_Q_1ST_RD_ERR_ID_REG 0x90c198#define SEC_Q_1ST_ECC2_ERR_ID_REG 0x910199#define SEC_Q_1ST_IVLD_ID_REG 0x914200#define SEC_Q_1ST_BD_WR_ERR_ID_REG 0x918201#define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG 0x91c202#define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG 0x920203204struct sec_debug_bd_info {205#define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M GENMASK(22, 0)206u32 soft_err_check;207#define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M GENMASK(9, 0)208u32 hard_err_check;209u32 icv_mac1st_word;210#define SEC_DEBUG_BD_INFO_GET_ID_M GENMASK(19, 0)211u32 sec_get_id;212/* W4---W15 */213u32 reserv_left[12];214};215216struct sec_out_bd_info {217#define SEC_OUT_BD_INFO_Q_ID_M GENMASK(11, 0)218#define SEC_OUT_BD_INFO_ECC_2BIT_ERR BIT(14)219u16 data;220};221222#define SEC_MAX_DEVICES 8223static struct sec_dev_info *sec_devices[SEC_MAX_DEVICES];224static DEFINE_MUTEX(sec_id_lock);225226static int sec_queue_map_io(struct sec_queue *queue)227{228struct device *dev = queue->dev_info->dev;229struct resource *res;230231res = platform_get_resource(to_platform_device(dev),232IORESOURCE_MEM,2332 + queue->queue_id);234if (!res) {235dev_err(dev, "Failed to get queue %u memory resource\n",236queue->queue_id);237return -ENOMEM;238}239queue->regs = ioremap(res->start, resource_size(res));240if (!queue->regs)241return -ENOMEM;242243return 0;244}245246static void sec_queue_unmap_io(struct sec_queue *queue)247{248iounmap(queue->regs);249}250251static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)252{253void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;254u32 regval;255256regval = readl_relaxed(addr);257if (ar_pkg)258regval |= SEC_Q_ARUSER_CFG_PKG;259else260regval &= ~SEC_Q_ARUSER_CFG_PKG;261writel_relaxed(regval, addr);262263return 0;264}265266static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)267{268void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;269u32 regval;270271regval = readl_relaxed(addr);272regval |= SEC_Q_AWUSER_CFG_PKG;273writel_relaxed(regval, addr);274275return 0;276}277278static int sec_clk_en(struct sec_dev_info *info)279{280void __iomem *base = info->regs[SEC_COMMON];281u32 i = 0;282283writel_relaxed(0x7, base + SEC_ALGSUB_CLK_EN_REG);284do {285usleep_range(1000, 10000);286if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0x7)287return 0;288i++;289} while (i < 10);290dev_err(info->dev, "sec clock enable fail!\n");291292return -EIO;293}294295static int sec_clk_dis(struct sec_dev_info *info)296{297void __iomem *base = info->regs[SEC_COMMON];298u32 i = 0;299300writel_relaxed(0x7, base + SEC_ALGSUB_CLK_DIS_REG);301do {302usleep_range(1000, 10000);303if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0)304return 0;305i++;306} while (i < 10);307dev_err(info->dev, "sec clock disable fail!\n");308309return -EIO;310}311312static int sec_reset_whole_module(struct sec_dev_info *info)313{314void __iomem *base = info->regs[SEC_COMMON];315bool is_reset, b_is_reset;316u32 i = 0;317318writel_relaxed(1, base + SEC_ALGSUB_RST_REQ_REG);319writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_REQ_REG);320while (1) {321usleep_range(1000, 10000);322is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &323SEC_ALGSUB_RST_ST_IS_RST;324b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &325SEC_ALGSUB_BUILD_RST_ST_IS_RST;326if (is_reset && b_is_reset)327break;328i++;329if (i > 10) {330dev_err(info->dev, "Reset req failed\n");331return -EIO;332}333}334335i = 0;336writel_relaxed(1, base + SEC_ALGSUB_RST_DREQ_REG);337writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_DREQ_REG);338while (1) {339usleep_range(1000, 10000);340is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &341SEC_ALGSUB_RST_ST_IS_RST;342b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &343SEC_ALGSUB_BUILD_RST_ST_IS_RST;344if (!is_reset && !b_is_reset)345break;346347i++;348if (i > 10) {349dev_err(info->dev, "Reset dreq failed\n");350return -EIO;351}352}353354return 0;355}356357static void sec_bd_endian_little(struct sec_dev_info *info)358{359void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;360u32 regval;361362regval = readl_relaxed(addr);363regval &= ~(SEC_CTRL2_ENDIAN_BD | SEC_CTRL2_ENDIAN_BD_TYPE);364writel_relaxed(regval, addr);365}366367/*368* sec_cache_config - configure optimum cache placement369*/370static void sec_cache_config(struct sec_dev_info *info)371{372struct iommu_domain *domain;373void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL_REG;374375domain = iommu_get_domain_for_dev(info->dev);376377/* Check that translation is occurring */378if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))379writel_relaxed(0x44cf9e, addr);380else381writel_relaxed(0x4cfd9, addr);382}383384static void sec_data_axiwr_otsd_cfg(struct sec_dev_info *info, u32 cfg)385{386void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;387u32 regval;388389regval = readl_relaxed(addr);390regval &= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;391regval |= (cfg << SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S) &392SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;393writel_relaxed(regval, addr);394}395396static void sec_data_axird_otsd_cfg(struct sec_dev_info *info, u32 cfg)397{398void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;399u32 regval;400401regval = readl_relaxed(addr);402regval &= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;403regval |= (cfg << SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S) &404SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;405writel_relaxed(regval, addr);406}407408static void sec_clk_gate_en(struct sec_dev_info *info, bool clkgate)409{410void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;411u32 regval;412413regval = readl_relaxed(addr);414if (clkgate)415regval |= SEC_CTRL2_CLK_GATE_EN;416else417regval &= ~SEC_CTRL2_CLK_GATE_EN;418writel_relaxed(regval, addr);419}420421static void sec_comm_cnt_cfg(struct sec_dev_info *info, bool clr_ce)422{423void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;424u32 regval;425426regval = readl_relaxed(addr);427if (clr_ce)428regval |= SEC_COMMON_CNT_CLR_CE_CLEAR;429else430regval &= ~SEC_COMMON_CNT_CLR_CE_CLEAR;431writel_relaxed(regval, addr);432}433434static void sec_commsnap_en(struct sec_dev_info *info, bool snap_en)435{436void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;437u32 regval;438439regval = readl_relaxed(addr);440if (snap_en)441regval |= SEC_COMMON_CNT_CLR_CE_SNAP_EN;442else443regval &= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN;444writel_relaxed(regval, addr);445}446447static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])448{449void __iomem *base = info->regs[SEC_SAA];450int i;451452for (i = 0; i < 10; i++)453writel_relaxed(hash_mask[0],454base + SEC_IPV6_MASK_TABLE_X_REG(i));455}456457static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)458{459if (hash_mask & SEC_HASH_IPV4_MASK) {460dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n");461return -EINVAL;462}463464writel_relaxed(hash_mask,465info->regs[SEC_SAA] + SEC_IPV4_MASK_TABLE_REG);466467return 0;468}469470static void sec_set_dbg_bd_cfg(struct sec_dev_info *info, u32 cfg)471{472void __iomem *addr = info->regs[SEC_SAA] + SEC_DEBUG_BD_CFG_REG;473u32 regval;474475regval = readl_relaxed(addr);476/* Always disable write back of normal bd */477regval &= ~SEC_DEBUG_BD_CFG_WB_NORMAL;478479if (cfg)480regval &= ~SEC_DEBUG_BD_CFG_WB_EN;481else482regval |= SEC_DEBUG_BD_CFG_WB_EN;483484writel_relaxed(regval, addr);485}486487static void sec_saa_getqm_en(struct sec_dev_info *info, u32 saa_indx, u32 en)488{489void __iomem *addr = info->regs[SEC_SAA] + SEC_SAA_BASE +490SEC_SAA_CTRL_REG(saa_indx);491u32 regval;492493regval = readl_relaxed(addr);494if (en)495regval |= SEC_SAA_CTRL_GET_QM_EN;496else497regval &= ~SEC_SAA_CTRL_GET_QM_EN;498writel_relaxed(regval, addr);499}500501static void sec_saa_int_mask(struct sec_dev_info *info, u32 saa_indx,502u32 saa_int_mask)503{504writel_relaxed(saa_int_mask,505info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_ST_INTMSK1_REG +506saa_indx * SEC_SAA_ADDR_SIZE);507}508509static void sec_streamid(struct sec_dev_info *info, int i)510{511#define SEC_SID 0x600512#define SEC_VMID 0513514writel_relaxed((SEC_VMID | ((SEC_SID & 0xffff) << 8)),515info->regs[SEC_SAA] + SEC_Q_VMID_CFG_REG(i));516}517518static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)519{520void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;521u32 regval;522523regval = readl_relaxed(addr);524if (alloc == SEC_QUEUE_AR_FROCE_ALLOC) {525regval |= SEC_Q_ARUSER_CFG_FA;526regval &= ~SEC_Q_ARUSER_CFG_FNA;527} else {528regval &= ~SEC_Q_ARUSER_CFG_FA;529regval |= SEC_Q_ARUSER_CFG_FNA;530}531532writel_relaxed(regval, addr);533}534535static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)536{537void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;538u32 regval;539540regval = readl_relaxed(addr);541if (alloc == SEC_QUEUE_AW_FROCE_ALLOC) {542regval |= SEC_Q_AWUSER_CFG_FA;543regval &= ~SEC_Q_AWUSER_CFG_FNA;544} else {545regval &= ~SEC_Q_AWUSER_CFG_FA;546regval |= SEC_Q_AWUSER_CFG_FNA;547}548549writel_relaxed(regval, addr);550}551552static void sec_queue_reorder(struct sec_queue *queue, bool reorder)553{554void __iomem *base = queue->regs;555u32 regval;556557regval = readl_relaxed(base + SEC_Q_CFG_REG);558if (reorder)559regval |= SEC_Q_CFG_REORDER;560else561regval &= ~SEC_Q_CFG_REORDER;562writel_relaxed(regval, base + SEC_Q_CFG_REG);563}564565static void sec_queue_depth(struct sec_queue *queue, u32 depth)566{567void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;568u32 regval;569570regval = readl_relaxed(addr);571regval &= ~SEC_Q_DEPTH_CFG_DEPTH_M;572regval |= (depth << SEC_Q_DEPTH_CFG_DEPTH_S) & SEC_Q_DEPTH_CFG_DEPTH_M;573574writel_relaxed(regval, addr);575}576577static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)578{579writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);580writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);581}582583static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)584{585writel_relaxed(upper_32_bits(addr),586queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);587writel_relaxed(lower_32_bits(addr),588queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);589}590591static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)592{593writel_relaxed(upper_32_bits(addr),594queue->regs + SEC_Q_ERR_BASE_HADDR_REG);595writel_relaxed(lower_32_bits(addr),596queue->regs + SEC_Q_ERR_BASE_LADDR_REG);597}598599static void sec_queue_irq_disable(struct sec_queue *queue)600{601writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);602}603604static void sec_queue_irq_enable(struct sec_queue *queue)605{606writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);607}608609static void sec_queue_abn_irq_disable(struct sec_queue *queue)610{611writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);612}613614static void sec_queue_stop(struct sec_queue *queue)615{616disable_irq(queue->task_irq);617sec_queue_irq_disable(queue);618writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);619}620621static void sec_queue_start(struct sec_queue *queue)622{623sec_queue_irq_enable(queue);624enable_irq(queue->task_irq);625queue->expected = 0;626writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);627writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);628}629630static struct sec_queue *sec_alloc_queue(struct sec_dev_info *info)631{632int i;633634mutex_lock(&info->dev_lock);635636/* Get the first idle queue in SEC device */637for (i = 0; i < SEC_Q_NUM; i++)638if (!info->queues[i].in_use) {639info->queues[i].in_use = true;640info->queues_in_use++;641mutex_unlock(&info->dev_lock);642643return &info->queues[i];644}645mutex_unlock(&info->dev_lock);646647return ERR_PTR(-ENODEV);648}649650static int sec_queue_free(struct sec_queue *queue)651{652struct sec_dev_info *info = queue->dev_info;653654if (queue->queue_id >= SEC_Q_NUM) {655dev_err(info->dev, "No queue %u\n", queue->queue_id);656return -ENODEV;657}658659if (!queue->in_use) {660dev_err(info->dev, "Queue %u is idle\n", queue->queue_id);661return -ENODEV;662}663664mutex_lock(&info->dev_lock);665queue->in_use = false;666info->queues_in_use--;667mutex_unlock(&info->dev_lock);668669return 0;670}671672static irqreturn_t sec_isr_handle_th(int irq, void *q)673{674sec_queue_irq_disable(q);675return IRQ_WAKE_THREAD;676}677678static irqreturn_t sec_isr_handle(int irq, void *q)679{680struct sec_queue *queue = q;681struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;682struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;683struct sec_out_bd_info *outorder_msg;684struct sec_bd_info *msg;685u32 ooo_read, ooo_write;686void __iomem *base = queue->regs;687int q_id;688689ooo_read = readl(base + SEC_Q_OUTORDER_RD_PTR_REG);690ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);691outorder_msg = cq_ring->vaddr + ooo_read;692q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;693msg = msg_ring->vaddr + q_id;694695while ((ooo_write != ooo_read) && msg->w0 & SEC_BD_W0_DONE) {696/*697* Must be before callback otherwise blocks adding other chained698* elements699*/700set_bit(q_id, queue->unprocessed);701if (q_id == queue->expected)702while (test_bit(queue->expected, queue->unprocessed)) {703clear_bit(queue->expected, queue->unprocessed);704msg = msg_ring->vaddr + queue->expected;705msg->w0 &= ~SEC_BD_W0_DONE;706msg_ring->callback(msg,707queue->shadow[queue->expected]);708queue->shadow[queue->expected] = NULL;709queue->expected = (queue->expected + 1) %710SEC_QUEUE_LEN;711atomic_dec(&msg_ring->used);712}713714ooo_read = (ooo_read + 1) % SEC_QUEUE_LEN;715writel(ooo_read, base + SEC_Q_OUTORDER_RD_PTR_REG);716ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);717outorder_msg = cq_ring->vaddr + ooo_read;718q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;719msg = msg_ring->vaddr + q_id;720}721722sec_queue_irq_enable(queue);723724return IRQ_HANDLED;725}726727static int sec_queue_irq_init(struct sec_queue *queue)728{729struct sec_dev_info *info = queue->dev_info;730int irq = queue->task_irq;731int ret;732733ret = request_threaded_irq(irq, sec_isr_handle_th, sec_isr_handle,734IRQF_TRIGGER_RISING, queue->name, queue);735if (ret) {736dev_err(info->dev, "request irq(%d) failed %d\n", irq, ret);737return ret;738}739disable_irq(irq);740741return 0;742}743744static int sec_queue_irq_uninit(struct sec_queue *queue)745{746free_irq(queue->task_irq, queue);747748return 0;749}750751static struct sec_dev_info *sec_device_get(void)752{753struct sec_dev_info *sec_dev = NULL;754struct sec_dev_info *this_sec_dev;755int least_busy_n = SEC_Q_NUM + 1;756int i;757758/* Find which one is least busy and use that first */759for (i = 0; i < SEC_MAX_DEVICES; i++) {760this_sec_dev = sec_devices[i];761if (this_sec_dev &&762this_sec_dev->queues_in_use < least_busy_n) {763least_busy_n = this_sec_dev->queues_in_use;764sec_dev = this_sec_dev;765}766}767768return sec_dev;769}770771static struct sec_queue *sec_queue_alloc_start(struct sec_dev_info *info)772{773struct sec_queue *queue;774775queue = sec_alloc_queue(info);776if (IS_ERR(queue)) {777dev_err(info->dev, "alloc sec queue failed! %ld\n",778PTR_ERR(queue));779return queue;780}781782sec_queue_start(queue);783784return queue;785}786787/**788* sec_queue_alloc_start_safe - get a hw queue from appropriate instance789*790* This function does extremely simplistic load balancing. It does not take into791* account NUMA locality of the accelerator, or which cpu has requested the792* queue. Future work may focus on optimizing this in order to improve full793* machine throughput.794*/795struct sec_queue *sec_queue_alloc_start_safe(void)796{797struct sec_dev_info *info;798struct sec_queue *queue = ERR_PTR(-ENODEV);799800mutex_lock(&sec_id_lock);801info = sec_device_get();802if (!info)803goto unlock;804805queue = sec_queue_alloc_start(info);806807unlock:808mutex_unlock(&sec_id_lock);809810return queue;811}812813/**814* sec_queue_stop_release() - free up a hw queue for reuse815* @queue: The queue we are done with.816*817* This will stop the current queue, terminanting any transactions818* that are inflight an return it to the pool of available hw queuess819*/820int sec_queue_stop_release(struct sec_queue *queue)821{822struct device *dev = queue->dev_info->dev;823int ret;824825sec_queue_stop(queue);826827ret = sec_queue_free(queue);828if (ret)829dev_err(dev, "Releasing queue failed %d\n", ret);830831return ret;832}833834/**835* sec_queue_empty() - Is this hardware queue currently empty.836* @queue: The queue to test837*838* We need to know if we have an empty queue for some of the chaining modes839* as if it is not empty we may need to hold the message in a software queue840* until the hw queue is drained.841*/842bool sec_queue_empty(struct sec_queue *queue)843{844struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;845846return !atomic_read(&msg_ring->used);847}848849/**850* sec_queue_send() - queue up a single operation in the hw queue851* @queue: The queue in which to put the message852* @msg: The message853* @ctx: Context to be put in the shadow array and passed back to cb on result.854*855* This function will return -EAGAIN if the queue is currently full.856*/857int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)858{859struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;860void __iomem *base = queue->regs;861u32 write, read;862863mutex_lock(&msg_ring->lock);864read = readl(base + SEC_Q_RD_PTR_REG);865write = readl(base + SEC_Q_WR_PTR_REG);866if (write == read && atomic_read(&msg_ring->used) == SEC_QUEUE_LEN) {867mutex_unlock(&msg_ring->lock);868return -EAGAIN;869}870memcpy(msg_ring->vaddr + write, msg, sizeof(*msg));871queue->shadow[write] = ctx;872write = (write + 1) % SEC_QUEUE_LEN;873874/* Ensure content updated before queue advance */875wmb();876writel(write, base + SEC_Q_WR_PTR_REG);877878atomic_inc(&msg_ring->used);879mutex_unlock(&msg_ring->lock);880881return 0;882}883884bool sec_queue_can_enqueue(struct sec_queue *queue, int num)885{886struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;887888return SEC_QUEUE_LEN - atomic_read(&msg_ring->used) >= num;889}890891static void sec_queue_hw_init(struct sec_queue *queue)892{893sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);894sec_queue_aw_alloc(queue, SEC_QUEUE_AW_FROCE_NOALLOC);895sec_queue_ar_pkgattr(queue, 1);896sec_queue_aw_pkgattr(queue, 1);897898/* Enable out of order queue */899sec_queue_reorder(queue, true);900901/* Interrupt after a single complete element */902writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);903904sec_queue_depth(queue, SEC_QUEUE_LEN - 1);905906sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);907908sec_queue_outorder_addr(queue, queue->ring_cq.paddr);909910sec_queue_errbase_addr(queue, queue->ring_db.paddr);911912writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);913914sec_queue_abn_irq_disable(queue);915sec_queue_irq_disable(queue);916writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);917}918919static int sec_hw_init(struct sec_dev_info *info)920{921struct iommu_domain *domain;922u32 sec_ipv4_mask = 0;923u32 sec_ipv6_mask[10] = {};924u32 i, ret;925926domain = iommu_get_domain_for_dev(info->dev);927928/*929* Enable all available processing unit clocks.930* Only the first cluster is usable with translations.931*/932if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))933info->num_saas = 5;934935else936info->num_saas = 10;937938writel_relaxed(GENMASK(info->num_saas - 1, 0),939info->regs[SEC_SAA] + SEC_CLK_EN_REG);940941/* 32 bit little endian */942sec_bd_endian_little(info);943944sec_cache_config(info);945946/* Data axi port write and read outstanding config as per datasheet */947sec_data_axiwr_otsd_cfg(info, 0x7);948sec_data_axird_otsd_cfg(info, 0x7);949950/* Enable clock gating */951sec_clk_gate_en(info, true);952953/* Set CNT_CYC register not read clear */954sec_comm_cnt_cfg(info, false);955956/* Enable CNT_CYC */957sec_commsnap_en(info, false);958959writel_relaxed((u32)~0, info->regs[SEC_SAA] + SEC_FSM_MAX_CNT_REG);960961ret = sec_ipv4_hashmask(info, sec_ipv4_mask);962if (ret) {963dev_err(info->dev, "Failed to set ipv4 hashmask %d\n", ret);964return -EIO;965}966967sec_ipv6_hashmask(info, sec_ipv6_mask);968969/* do not use debug bd */970sec_set_dbg_bd_cfg(info, 0);971972if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) {973for (i = 0; i < SEC_Q_NUM; i++) {974sec_streamid(info, i);975/* Same QoS for all queues */976writel_relaxed(0x3f,977info->regs[SEC_SAA] +978SEC_Q_WEIGHT_CFG_REG(i));979}980}981982for (i = 0; i < info->num_saas; i++) {983sec_saa_getqm_en(info, i, 1);984sec_saa_int_mask(info, i, 0);985}986987return 0;988}989990static void sec_hw_exit(struct sec_dev_info *info)991{992int i;993994for (i = 0; i < SEC_MAX_SAA_NUM; i++) {995sec_saa_int_mask(info, i, (u32)~0);996sec_saa_getqm_en(info, i, 0);997}998}9991000static void sec_queue_base_init(struct sec_dev_info *info,1001struct sec_queue *queue, int queue_id)1002{1003queue->dev_info = info;1004queue->queue_id = queue_id;1005snprintf(queue->name, sizeof(queue->name),1006"%s_%d", dev_name(info->dev), queue->queue_id);1007}10081009static int sec_map_io(struct sec_dev_info *info, struct platform_device *pdev)1010{1011struct resource *res;1012int i;10131014for (i = 0; i < SEC_NUM_ADDR_REGIONS; i++) {1015res = platform_get_resource(pdev, IORESOURCE_MEM, i);10161017if (!res) {1018dev_err(info->dev, "Memory resource %d not found\n", i);1019return -EINVAL;1020}10211022info->regs[i] = devm_ioremap(info->dev, res->start,1023resource_size(res));1024if (!info->regs[i]) {1025dev_err(info->dev,1026"Memory resource %d could not be remapped\n",1027i);1028return -EINVAL;1029}1030}10311032return 0;1033}10341035static int sec_base_init(struct sec_dev_info *info,1036struct platform_device *pdev)1037{1038int ret;10391040ret = sec_map_io(info, pdev);1041if (ret)1042return ret;10431044ret = sec_clk_en(info);1045if (ret)1046return ret;10471048ret = sec_reset_whole_module(info);1049if (ret)1050goto sec_clk_disable;10511052ret = sec_hw_init(info);1053if (ret)1054goto sec_clk_disable;10551056return 0;10571058sec_clk_disable:1059sec_clk_dis(info);10601061return ret;1062}10631064static void sec_base_exit(struct sec_dev_info *info)1065{1066sec_hw_exit(info);1067sec_clk_dis(info);1068}10691070#define SEC_Q_CMD_SIZE \1071round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)1072#define SEC_Q_CQ_SIZE \1073round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)1074#define SEC_Q_DB_SIZE \1075round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)10761077static int sec_queue_res_cfg(struct sec_queue *queue)1078{1079struct device *dev = queue->dev_info->dev;1080struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;1081struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;1082struct sec_queue_ring_db *ring_db = &queue->ring_db;1083int ret;10841085ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,1086&ring_cmd->paddr, GFP_KERNEL);1087if (!ring_cmd->vaddr)1088return -ENOMEM;10891090atomic_set(&ring_cmd->used, 0);1091mutex_init(&ring_cmd->lock);1092ring_cmd->callback = sec_alg_callback;10931094ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,1095&ring_cq->paddr, GFP_KERNEL);1096if (!ring_cq->vaddr) {1097ret = -ENOMEM;1098goto err_free_ring_cmd;1099}11001101ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,1102&ring_db->paddr, GFP_KERNEL);1103if (!ring_db->vaddr) {1104ret = -ENOMEM;1105goto err_free_ring_cq;1106}1107queue->task_irq = platform_get_irq(to_platform_device(dev),1108queue->queue_id * 2 + 1);1109if (queue->task_irq < 0) {1110ret = queue->task_irq;1111goto err_free_ring_db;1112}11131114return 0;11151116err_free_ring_db:1117dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,1118queue->ring_db.paddr);1119err_free_ring_cq:1120dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,1121queue->ring_cq.paddr);1122err_free_ring_cmd:1123dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,1124queue->ring_cmd.paddr);11251126return ret;1127}11281129static void sec_queue_free_ring_pages(struct sec_queue *queue)1130{1131struct device *dev = queue->dev_info->dev;11321133dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,1134queue->ring_db.paddr);1135dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,1136queue->ring_cq.paddr);1137dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,1138queue->ring_cmd.paddr);1139}11401141static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,1142int queue_id)1143{1144int ret;11451146sec_queue_base_init(info, queue, queue_id);11471148ret = sec_queue_res_cfg(queue);1149if (ret)1150return ret;11511152ret = sec_queue_map_io(queue);1153if (ret) {1154dev_err(info->dev, "Queue map failed %d\n", ret);1155sec_queue_free_ring_pages(queue);1156return ret;1157}11581159sec_queue_hw_init(queue);11601161return 0;1162}11631164static void sec_queue_unconfig(struct sec_dev_info *info,1165struct sec_queue *queue)1166{1167sec_queue_unmap_io(queue);1168sec_queue_free_ring_pages(queue);1169}11701171static int sec_id_alloc(struct sec_dev_info *info)1172{1173int ret = 0;1174int i;11751176mutex_lock(&sec_id_lock);11771178for (i = 0; i < SEC_MAX_DEVICES; i++)1179if (!sec_devices[i])1180break;1181if (i == SEC_MAX_DEVICES) {1182ret = -ENOMEM;1183goto unlock;1184}1185info->sec_id = i;1186sec_devices[info->sec_id] = info;11871188unlock:1189mutex_unlock(&sec_id_lock);11901191return ret;1192}11931194static void sec_id_free(struct sec_dev_info *info)1195{1196mutex_lock(&sec_id_lock);1197sec_devices[info->sec_id] = NULL;1198mutex_unlock(&sec_id_lock);1199}12001201static int sec_probe(struct platform_device *pdev)1202{1203struct sec_dev_info *info;1204struct device *dev = &pdev->dev;1205int i, j;1206int ret;12071208ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));1209if (ret) {1210dev_err(dev, "Failed to set 64 bit dma mask %d", ret);1211return -ENODEV;1212}12131214info = devm_kzalloc(dev, (sizeof(*info)), GFP_KERNEL);1215if (!info)1216return -ENOMEM;12171218info->dev = dev;1219mutex_init(&info->dev_lock);12201221info->hw_sgl_pool = dmam_pool_create("sgl", dev,1222sizeof(struct sec_hw_sgl), 64, 0);1223if (!info->hw_sgl_pool) {1224dev_err(dev, "Failed to create sec sgl dma pool\n");1225return -ENOMEM;1226}12271228ret = sec_base_init(info, pdev);1229if (ret) {1230dev_err(dev, "Base initialization fail! %d\n", ret);1231return ret;1232}12331234for (i = 0; i < SEC_Q_NUM; i++) {1235ret = sec_queue_config(info, &info->queues[i], i);1236if (ret)1237goto queues_unconfig;12381239ret = sec_queue_irq_init(&info->queues[i]);1240if (ret) {1241sec_queue_unconfig(info, &info->queues[i]);1242goto queues_unconfig;1243}1244}12451246ret = sec_algs_register();1247if (ret) {1248dev_err(dev, "Failed to register algorithms with crypto %d\n",1249ret);1250goto queues_unconfig;1251}12521253platform_set_drvdata(pdev, info);12541255ret = sec_id_alloc(info);1256if (ret)1257goto algs_unregister;12581259return 0;12601261algs_unregister:1262sec_algs_unregister();1263queues_unconfig:1264for (j = i - 1; j >= 0; j--) {1265sec_queue_irq_uninit(&info->queues[j]);1266sec_queue_unconfig(info, &info->queues[j]);1267}1268sec_base_exit(info);12691270return ret;1271}12721273static void sec_remove(struct platform_device *pdev)1274{1275struct sec_dev_info *info = platform_get_drvdata(pdev);1276int i;12771278/* Unexpose as soon as possible, reuse during remove is fine */1279sec_id_free(info);12801281sec_algs_unregister();12821283for (i = 0; i < SEC_Q_NUM; i++) {1284sec_queue_irq_uninit(&info->queues[i]);1285sec_queue_unconfig(info, &info->queues[i]);1286}12871288sec_base_exit(info);1289}12901291static const __maybe_unused struct of_device_id sec_match[] = {1292{ .compatible = "hisilicon,hip06-sec" },1293{ .compatible = "hisilicon,hip07-sec" },1294{}1295};1296MODULE_DEVICE_TABLE(of, sec_match);12971298static const __maybe_unused struct acpi_device_id sec_acpi_match[] = {1299{ "HISI02C1", 0 },1300{ }1301};1302MODULE_DEVICE_TABLE(acpi, sec_acpi_match);13031304static struct platform_driver sec_driver = {1305.probe = sec_probe,1306.remove = sec_remove,1307.driver = {1308.name = "hisi_sec_platform_driver",1309.of_match_table = sec_match,1310.acpi_match_table = ACPI_PTR(sec_acpi_match),1311},1312};1313module_platform_driver(sec_driver);13141315MODULE_LICENSE("GPL");1316MODULE_DESCRIPTION("HiSilicon Security Accelerators");1317MODULE_AUTHOR("Zaibo Xu <[email protected]");1318MODULE_AUTHOR("Jonathan Cameron <[email protected]>");131913201321