Path: blob/master/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
26285 views
/* SPDX-License-Identifier: GPL-2.0-only1* Copyright (C) 2020 Marvell.2*/3#ifndef __OTX2_CPTLF_H4#define __OTX2_CPTLF_H56#include <linux/soc/marvell/octeontx2/asm.h>7#include <linux/bitfield.h>8#include <mbox.h>9#include <rvu.h>10#include "otx2_cpt_common.h"11#include "otx2_cpt_reqmgr.h"1213/*14* CPT instruction and pending queues user requested length in CPT_INST_S msgs15*/16#define OTX2_CPT_USER_REQUESTED_QLEN_MSGS 82001718/*19* CPT instruction queue size passed to HW is in units of 40*CPT_INST_S20* messages.21*/22#define OTX2_CPT_SIZE_DIV40 (OTX2_CPT_USER_REQUESTED_QLEN_MSGS/40)2324/*25* CPT instruction and pending queues length in CPT_INST_S messages26*/27#define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40)2829/*30* LDWB is getting incorrectly used when IQB_LDWB = 1 and CPT instruction31* queue has less than 320 free entries. So, increase HW instruction queue32* size by 320 and give 320 entries less for SW/NIX RX as a workaround.33*/34#define OTX2_CPT_INST_QLEN_EXTRA_BYTES (320 * OTX2_CPT_INST_SIZE)35#define OTX2_CPT_EXTRA_SIZE_DIV40 (320/40)3637/* CPT instruction queue length in bytes */38#define OTX2_CPT_INST_QLEN_BYTES \39((OTX2_CPT_SIZE_DIV40 * 40 * OTX2_CPT_INST_SIZE) + \40OTX2_CPT_INST_QLEN_EXTRA_BYTES)4142/* CPT instruction group queue length in bytes */43#define OTX2_CPT_INST_GRP_QLEN_BYTES \44((OTX2_CPT_SIZE_DIV40 + OTX2_CPT_EXTRA_SIZE_DIV40) * 16)4546/* CPT FC length in bytes */47#define OTX2_CPT_Q_FC_LEN 1284849/* CPT instruction queue alignment */50#define OTX2_CPT_INST_Q_ALIGNMENT 1285152/* Mask which selects all engine groups */53#define OTX2_CPT_ALL_ENG_GRPS_MASK 0xFF5455/* Maximum LFs supported in OcteonTX2 for CPT */56#define OTX2_CPT_MAX_LFS_NUM 645758/* Queue priority */59#define OTX2_CPT_QUEUE_HI_PRIO 0x160#define OTX2_CPT_QUEUE_LOW_PRIO 0x06162enum otx2_cptlf_state {63OTX2_CPTLF_IN_RESET,64OTX2_CPTLF_STARTED,65};6667struct otx2_cpt_inst_queue {68u8 *vaddr;69u8 *real_vaddr;70dma_addr_t dma_addr;71dma_addr_t real_dma_addr;72u32 size;73};7475struct otx2_cptlfs_info;76struct otx2_cptlf_wqe {77struct tasklet_struct work;78struct otx2_cptlfs_info *lfs;79u8 lf_num;80};8182struct otx2_cptlf_info {83struct otx2_cptlfs_info *lfs; /* Ptr to cptlfs_info struct */84void __iomem *lmtline; /* Address of LMTLINE */85void __iomem *ioreg; /* LMTLINE send register */86int msix_offset; /* MSI-X interrupts offset */87cpumask_var_t affinity_mask; /* IRQs affinity mask */88u8 irq_name[OTX2_CPT_LF_MSIX_VECTORS][32];/* Interrupts name */89u8 is_irq_reg[OTX2_CPT_LF_MSIX_VECTORS]; /* Is interrupt registered */90u8 slot; /* Slot number of this LF */9192struct otx2_cpt_inst_queue iqueue;/* Instruction queue */93struct otx2_cpt_pending_queue pqueue; /* Pending queue */94struct otx2_cptlf_wqe *wqe; /* Tasklet work info */95};9697struct cpt_hw_ops {98void (*send_cmd)(union otx2_cpt_inst_s *cptinst, u32 insts_num,99struct otx2_cptlf_info *lf);100u8 (*cpt_get_compcode)(union otx2_cpt_res_s *result);101u8 (*cpt_get_uc_compcode)(union otx2_cpt_res_s *result);102struct otx2_cpt_inst_info *103(*cpt_sg_info_create)(struct pci_dev *pdev, struct otx2_cpt_req_info *req,104gfp_t gfp);105};106107#define LMTLINE_SIZE 128108#define LMTLINE_ALIGN 128109struct otx2_lmt_info {110void *base;111dma_addr_t iova;112u32 size;113u8 align;114};115116struct otx2_cptlfs_info {117/* Registers start address of VF/PF LFs are attached to */118void __iomem *reg_base;119struct otx2_lmt_info lmt_info;120struct pci_dev *pdev; /* Device LFs are attached to */121struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];122struct otx2_mbox *mbox;123struct cpt_hw_ops *ops;124u8 are_lfs_attached; /* Whether CPT LFs are attached */125u8 lfs_num; /* Number of CPT LFs */126u8 kcrypto_se_eng_grp_num; /* Crypto symmetric engine group number */127u8 kcrypto_ae_eng_grp_num; /* Crypto asymmetric engine group number */128u8 kvf_limits; /* Kernel crypto limits */129atomic_t state; /* LF's state. started/reset */130int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */131int global_slot; /* Global slot across the blocks */132u8 ctx_ilen;133u8 ctx_ilen_ovrd;134};135136static inline void otx2_cpt_free_instruction_queues(137struct otx2_cptlfs_info *lfs)138{139struct otx2_cpt_inst_queue *iq;140int i;141142for (i = 0; i < lfs->lfs_num; i++) {143iq = &lfs->lf[i].iqueue;144if (iq->real_vaddr)145dma_free_coherent(&lfs->pdev->dev,146iq->size,147iq->real_vaddr,148iq->real_dma_addr);149iq->real_vaddr = NULL;150iq->vaddr = NULL;151}152}153154static inline int otx2_cpt_alloc_instruction_queues(155struct otx2_cptlfs_info *lfs)156{157struct otx2_cpt_inst_queue *iq;158int ret = 0, i;159160if (!lfs->lfs_num)161return -EINVAL;162163for (i = 0; i < lfs->lfs_num; i++) {164iq = &lfs->lf[i].iqueue;165iq->size = OTX2_CPT_INST_QLEN_BYTES +166OTX2_CPT_Q_FC_LEN +167OTX2_CPT_INST_GRP_QLEN_BYTES +168OTX2_CPT_INST_Q_ALIGNMENT;169iq->real_vaddr = dma_alloc_coherent(&lfs->pdev->dev, iq->size,170&iq->real_dma_addr, GFP_KERNEL);171if (!iq->real_vaddr) {172ret = -ENOMEM;173goto error;174}175iq->vaddr = iq->real_vaddr + OTX2_CPT_INST_GRP_QLEN_BYTES;176iq->dma_addr = iq->real_dma_addr + OTX2_CPT_INST_GRP_QLEN_BYTES;177178/* Align pointers */179iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_CPT_INST_Q_ALIGNMENT);180iq->dma_addr = PTR_ALIGN(iq->dma_addr,181OTX2_CPT_INST_Q_ALIGNMENT);182}183return 0;184185error:186otx2_cpt_free_instruction_queues(lfs);187return ret;188}189190static inline void otx2_cptlf_set_iqueues_base_addr(191struct otx2_cptlfs_info *lfs)192{193union otx2_cptx_lf_q_base lf_q_base;194int slot;195196for (slot = 0; slot < lfs->lfs_num; slot++) {197lf_q_base.u = lfs->lf[slot].iqueue.dma_addr;198otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,199OTX2_CPT_LF_Q_BASE, lf_q_base.u);200}201}202203static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)204{205union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 };206207lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 +208OTX2_CPT_EXTRA_SIZE_DIV40;209otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,210OTX2_CPT_LF_Q_SIZE, lf_q_size.u);211}212213static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs)214{215int slot;216217for (slot = 0; slot < lfs->lfs_num; slot++)218otx2_cptlf_do_set_iqueue_size(&lfs->lf[slot]);219}220221#define INFLIGHT GENMASK_ULL(8, 0)222#define GRB_CNT GENMASK_ULL(39, 32)223#define GWB_CNT GENMASK_ULL(47, 40)224#define XQ_XOR GENMASK_ULL(63, 63)225#define DQPTR GENMASK_ULL(19, 0)226#define NQPTR GENMASK_ULL(51, 32)227228static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)229{230void __iomem *reg_base = lf->lfs->reg_base;231struct pci_dev *pdev = lf->lfs->pdev;232u8 blkaddr = lf->lfs->blkaddr;233int timeout = 1000000;234u64 inprog, inst_ptr;235u64 slot = lf->slot;236u64 qsize, pending;237int i = 0;238239/* Disable instructions enqueuing */240otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_CTL, 0x0);241242inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG);243inprog |= BIT_ULL(16);244otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG, inprog);245246qsize = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_SIZE) & 0x7FFF;247do {248inst_ptr = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_INST_PTR);249pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +250FIELD_GET(NQPTR, inst_ptr) - FIELD_GET(DQPTR, inst_ptr);251udelay(1);252timeout--;253} while ((pending != 0) && (timeout != 0));254255if (timeout == 0)256dev_warn(&pdev->dev, "TIMEOUT: CPT poll on pending instructions\n");257258timeout = 1000000;259/* Wait for CPT queue to become execution-quiescent */260do {261inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG);262263if ((FIELD_GET(INFLIGHT, inprog) == 0) &&264(FIELD_GET(GRB_CNT, inprog) == 0)) {265i++;266} else {267i = 0;268timeout--;269}270} while ((timeout != 0) && (i < 10));271272if (timeout == 0)273dev_warn(&pdev->dev, "TIMEOUT: CPT poll on inflight count\n");274/* Wait for 2 us to flush all queue writes to memory */275udelay(2);276}277278static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs)279{280int slot;281282for (slot = 0; slot < lfs->lfs_num; slot++) {283otx2_cptlf_do_disable_iqueue(&lfs->lf[slot]);284otx2_cpt_lf_reset_msg(lfs, lfs->global_slot + slot);285}286}287288static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf,289bool enable)290{291u8 blkaddr = lf->lfs->blkaddr;292union otx2_cptx_lf_ctl lf_ctl;293294lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,295OTX2_CPT_LF_CTL);296297/* Set iqueue's enqueuing */298lf_ctl.s.ena = enable ? 0x1 : 0x0;299otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,300OTX2_CPT_LF_CTL, lf_ctl.u);301}302303static inline void otx2_cptlf_enable_iqueue_enq(struct otx2_cptlf_info *lf)304{305otx2_cptlf_set_iqueue_enq(lf, true);306}307308static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf,309bool enable)310{311union otx2_cptx_lf_inprog lf_inprog;312u8 blkaddr = lf->lfs->blkaddr;313314lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,315OTX2_CPT_LF_INPROG);316317/* Set iqueue's execution */318lf_inprog.s.eena = enable ? 0x1 : 0x0;319otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,320OTX2_CPT_LF_INPROG, lf_inprog.u);321}322323static inline void otx2_cptlf_set_ctx_flr_flush(struct otx2_cptlf_info *lf)324{325u8 blkaddr = lf->lfs->blkaddr;326u64 val;327328val = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,329OTX2_CPT_LF_CTX_CTL);330val |= BIT_ULL(0);331332otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,333OTX2_CPT_LF_CTX_CTL, val);334}335336static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf)337{338otx2_cptlf_set_iqueue_exec(lf, true);339}340341static inline void otx2_cptlf_disable_iqueue_exec(struct otx2_cptlf_info *lf)342{343otx2_cptlf_set_iqueue_exec(lf, false);344}345346static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs)347{348int slot;349350for (slot = 0; slot < lfs->lfs_num; slot++) {351/* Enable flush on FLR for Errata */352if (is_dev_cn10kb(lfs->pdev))353otx2_cptlf_set_ctx_flr_flush(&lfs->lf[slot]);354355otx2_cptlf_enable_iqueue_exec(&lfs->lf[slot]);356otx2_cptlf_enable_iqueue_enq(&lfs->lf[slot]);357}358}359360static inline void otx2_cpt_fill_inst(union otx2_cpt_inst_s *cptinst,361struct otx2_cpt_iq_command *iq_cmd,362u64 comp_baddr)363{364cptinst->u[0] = 0x0;365cptinst->s.doneint = true;366cptinst->s.res_addr = comp_baddr;367cptinst->u[2] = 0x0;368cptinst->u[3] = 0x0;369cptinst->s.ei0 = iq_cmd->cmd.u;370cptinst->s.ei1 = iq_cmd->dptr;371cptinst->s.ei2 = iq_cmd->rptr;372cptinst->s.ei3 = iq_cmd->cptr.u;373}374375/*376* On OcteonTX2 platform the parameter insts_num is used as a count of377* instructions to be enqueued. The valid values for insts_num are:378* 1 - 1 CPT instruction will be enqueued during LMTST operation379* 2 - 2 CPT instructions will be enqueued during LMTST operation380*/381static inline void otx2_cpt_send_cmd(union otx2_cpt_inst_s *cptinst,382u32 insts_num, struct otx2_cptlf_info *lf)383{384void __iomem *lmtline = lf->lmtline;385long ret;386387/*388* Make sure memory areas pointed in CPT_INST_S389* are flushed before the instruction is sent to CPT390*/391dma_wmb();392393do {394/* Copy CPT command to LMTLINE */395memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);396397/*398* LDEOR initiates atomic transfer to I/O device399* The following will cause the LMTST to fail (the LDEOR400* returns zero):401* - No stores have been performed to the LMTLINE since it was402* last invalidated.403* - The bytes which have been stored to LMTLINE since it was404* last invalidated form a pattern that is non-contiguous, does405* not start at byte 0, or does not end on a 8-byte boundary.406* (i.e.comprises a formation of other than 1–16 8-byte407* words.)408*409* These rules are designed such that an operating system410* context switch or hypervisor guest switch need have no411* knowledge of the LMTST operations; the switch code does not412* need to store to LMTCANCEL. Also note as LMTLINE data cannot413* be read, there is no information leakage between processes.414*/415ret = otx2_lmt_flush(lf->ioreg);416417} while (!ret);418}419420static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs)421{422return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED;423}424425static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs,426struct pci_dev *pdev,427void __iomem *reg_base,428struct otx2_mbox *mbox,429int blkaddr)430{431lfs->pdev = pdev;432lfs->reg_base = reg_base;433lfs->mbox = mbox;434lfs->blkaddr = blkaddr;435}436437int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,438int lfs_num);439void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);440int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs);441int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs);442void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs);443void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs);444void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs);445int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs);446447#endif /* __OTX2_CPTLF_H */448449450