Path: blob/master/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
48893 views
// SPDX-License-Identifier: GPL-2.0-only1/* Copyright (C) 2020 Marvell. */23#include <linux/ctype.h>4#include <linux/firmware.h>5#include <linux/string.h>6#include <linux/string_choices.h>7#include "otx2_cptpf_ucode.h"8#include "otx2_cpt_common.h"9#include "otx2_cptpf.h"10#include "otx2_cptlf.h"11#include "otx2_cpt_reqmgr.h"12#include "rvu_reg.h"1314#define CSR_DELAY 301516#define LOADFVC_RLEN 817#define LOADFVC_MAJOR_OP 0x0118#define LOADFVC_MINOR_OP 0x081920/*21* Interval to flush dirty data for next CTX entry. The interval is measured22* in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).23*/24#define CTX_FLUSH_TIMER_CNT 0x2FAF02526struct fw_info_t {27struct list_head ucodes;28};2930static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,31struct otx2_cpt_eng_grp_info *eng_grp)32{33struct otx2_cpt_bitmap bmap = { {0} };34bool found = false;35int i;3637if (eng_grp->g->engs_num < 0 ||38eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {39dev_err(dev, "unsupported number of engines %d on octeontx2\n",40eng_grp->g->engs_num);41return bmap;42}4344for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {45if (eng_grp->engs[i].type) {46bitmap_or(bmap.bits, bmap.bits,47eng_grp->engs[i].bmap,48eng_grp->g->engs_num);49bmap.size = eng_grp->g->engs_num;50found = true;51}52}5354if (!found)55dev_err(dev, "No engines reserved for engine group %d\n",56eng_grp->idx);57return bmap;58}5960static int is_eng_type(int val, int eng_type)61{62return val & (1 << eng_type);63}6465static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)66{67if (eng_grp->ucode[1].type)68return true;69else70return false;71}7273static void set_ucode_filename(struct otx2_cpt_ucode *ucode,74const char *filename)75{76strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);77}7879static char *get_eng_type_str(int eng_type)80{81char *str = "unknown";8283switch (eng_type) {84case OTX2_CPT_SE_TYPES:85str = "SE";86break;8788case OTX2_CPT_IE_TYPES:89str = "IE";90break;9192case OTX2_CPT_AE_TYPES:93str = "AE";94break;95}96return str;97}9899static char *get_ucode_type_str(int ucode_type)100{101char *str = "unknown";102103switch (ucode_type) {104case (1 << OTX2_CPT_SE_TYPES):105str = "SE";106break;107108case (1 << OTX2_CPT_IE_TYPES):109str = "IE";110break;111112case (1 << OTX2_CPT_AE_TYPES):113str = "AE";114break;115116case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):117str = "SE+IPSEC";118break;119}120return str;121}122123static int get_ucode_type(struct device *dev,124struct otx2_cpt_ucode_hdr *ucode_hdr,125int *ucode_type, u16 rid)126{127char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];128char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];129int i, val = 0;130u8 nn;131132strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);133for (i = 0; i < strlen(tmp_ver_str); i++)134tmp_ver_str[i] = tolower(tmp_ver_str[i]);135136sprintf(ver_str_prefix, "ocpt-%02d", rid);137if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))138return -EINVAL;139140nn = ucode_hdr->ver_num.nn;141if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&142(nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||143nn == OTX2_CPT_SE_UC_TYPE3))144val |= 1 << OTX2_CPT_SE_TYPES;145if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&146(nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||147nn == OTX2_CPT_IE_UC_TYPE3))148val |= 1 << OTX2_CPT_IE_TYPES;149if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&150nn == OTX2_CPT_AE_UC_TYPE)151val |= 1 << OTX2_CPT_AE_TYPES;152153*ucode_type = val;154155if (!val)156return -EINVAL;157158return 0;159}160161static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,162dma_addr_t dma_addr, int blkaddr)163{164return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,165CPT_AF_EXEX_UCODE_BASE(eng),166(u64)dma_addr, blkaddr);167}168169static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,170struct otx2_cptpf_dev *cptpf, int blkaddr)171{172struct otx2_cpt_engs_rsvd *engs;173dma_addr_t dma_addr;174int i, bit, ret;175176/* Set PF number for microcode fetches */177ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,178CPT_AF_PF_FUNC,179rvu_make_pcifunc(cptpf->pdev,180cptpf->pf_id, 0),181blkaddr);182if (ret)183return ret;184185for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {186engs = &eng_grp->engs[i];187if (!engs->type)188continue;189190dma_addr = engs->ucode->dma;191192/*193* Set UCODE_BASE only for the cores which are not used,194* other cores should have already valid UCODE_BASE set195*/196for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)197if (!eng_grp->g->eng_ref_cnt[bit]) {198ret = __write_ucode_base(cptpf, bit, dma_addr,199blkaddr);200if (ret)201return ret;202}203}204return 0;205}206207static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)208{209struct otx2_cptpf_dev *cptpf = obj;210int ret;211212if (cptpf->has_cpt1) {213ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);214if (ret)215return ret;216}217return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);218}219220static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,221struct otx2_cptpf_dev *cptpf,222struct otx2_cpt_bitmap bmap,223int blkaddr)224{225int i, timeout = 10;226int busy, ret;227u64 reg = 0;228229/* Detach the cores from group */230for_each_set_bit(i, bmap.bits, bmap.size) {231ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,232CPT_AF_EXEX_CTL2(i), ®, blkaddr);233if (ret)234return ret;235236if (reg & (1ull << eng_grp->idx)) {237eng_grp->g->eng_ref_cnt[i]--;238reg &= ~(1ull << eng_grp->idx);239240ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,241cptpf->pdev,242CPT_AF_EXEX_CTL2(i), reg,243blkaddr);244if (ret)245return ret;246}247}248249/* Wait for cores to become idle */250do {251busy = 0;252usleep_range(10000, 20000);253if (timeout-- < 0)254return -EBUSY;255256for_each_set_bit(i, bmap.bits, bmap.size) {257ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,258cptpf->pdev,259CPT_AF_EXEX_STS(i), ®,260blkaddr);261if (ret)262return ret;263264if (reg & 0x1) {265busy = 1;266break;267}268}269} while (busy);270271/* Disable the cores only if they are not used anymore */272for_each_set_bit(i, bmap.bits, bmap.size) {273if (!eng_grp->g->eng_ref_cnt[i]) {274ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,275cptpf->pdev,276CPT_AF_EXEX_CTL(i), 0x0,277blkaddr);278if (ret)279return ret;280}281}282283return 0;284}285286static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,287void *obj)288{289struct otx2_cptpf_dev *cptpf = obj;290struct otx2_cpt_bitmap bmap;291int ret;292293bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);294if (!bmap.size)295return -EINVAL;296297if (cptpf->has_cpt1) {298ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,299BLKADDR_CPT1);300if (ret)301return ret;302}303return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,304BLKADDR_CPT0);305}306307static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,308struct otx2_cptpf_dev *cptpf,309struct otx2_cpt_bitmap bmap,310int blkaddr)311{312u64 reg = 0;313int i, ret;314315/* Attach the cores to the group */316for_each_set_bit(i, bmap.bits, bmap.size) {317ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,318CPT_AF_EXEX_CTL2(i), ®, blkaddr);319if (ret)320return ret;321322if (!(reg & (1ull << eng_grp->idx))) {323eng_grp->g->eng_ref_cnt[i]++;324reg |= 1ull << eng_grp->idx;325326ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,327cptpf->pdev,328CPT_AF_EXEX_CTL2(i), reg,329blkaddr);330if (ret)331return ret;332}333}334335/* Enable the cores */336for_each_set_bit(i, bmap.bits, bmap.size) {337ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,338CPT_AF_EXEX_CTL(i), 0x1,339blkaddr);340if (ret)341return ret;342}343return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);344}345346static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,347void *obj)348{349struct otx2_cptpf_dev *cptpf = obj;350struct otx2_cpt_bitmap bmap;351int ret;352353bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);354if (!bmap.size)355return -EINVAL;356357if (cptpf->has_cpt1) {358ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,359BLKADDR_CPT1);360if (ret)361return ret;362}363return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);364}365366static int load_fw(struct device *dev, struct fw_info_t *fw_info,367char *filename, u16 rid)368{369struct otx2_cpt_ucode_hdr *ucode_hdr;370struct otx2_cpt_uc_info_t *uc_info;371int ucode_type, ucode_size;372int ret;373374uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);375if (!uc_info)376return -ENOMEM;377378ret = request_firmware(&uc_info->fw, filename, dev);379if (ret)380goto free_uc_info;381382ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;383ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid);384if (ret)385goto release_fw;386387ucode_size = ntohl(ucode_hdr->code_length) * 2;388if (!ucode_size) {389dev_err(dev, "Ucode %s invalid size\n", filename);390ret = -EINVAL;391goto release_fw;392}393394set_ucode_filename(&uc_info->ucode, filename);395memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,396OTX2_CPT_UCODE_VER_STR_SZ);397uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0;398uc_info->ucode.ver_num = ucode_hdr->ver_num;399uc_info->ucode.type = ucode_type;400uc_info->ucode.size = ucode_size;401list_add_tail(&uc_info->list, &fw_info->ucodes);402403return 0;404405release_fw:406release_firmware(uc_info->fw);407free_uc_info:408kfree(uc_info);409return ret;410}411412static void cpt_ucode_release_fw(struct fw_info_t *fw_info)413{414struct otx2_cpt_uc_info_t *curr, *temp;415416if (!fw_info)417return;418419list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {420list_del(&curr->list);421release_firmware(curr->fw);422kfree(curr);423}424}425426static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,427int ucode_type)428{429struct otx2_cpt_uc_info_t *curr;430431list_for_each_entry(curr, &fw_info->ucodes, list) {432if (!is_eng_type(curr->ucode.type, ucode_type))433continue;434435return curr;436}437return NULL;438}439440static void print_uc_info(struct fw_info_t *fw_info)441{442struct otx2_cpt_uc_info_t *curr;443444list_for_each_entry(curr, &fw_info->ucodes, list) {445pr_debug("Ucode filename %s\n", curr->ucode.filename);446pr_debug("Ucode version string %s\n", curr->ucode.ver_str);447pr_debug("Ucode version %d.%d.%d.%d\n",448curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,449curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);450pr_debug("Ucode type (%d) %s\n", curr->ucode.type,451get_ucode_type_str(curr->ucode.type));452pr_debug("Ucode size %d\n", curr->ucode.size);453pr_debug("Ucode ptr %p\n", curr->fw->data);454}455}456457static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,458u16 rid)459{460char filename[OTX2_CPT_NAME_LENGTH];461char eng_type[8];462int ret, e, i;463464INIT_LIST_HEAD(&fw_info->ucodes);465466for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {467strscpy(eng_type, get_eng_type_str(e));468for (i = 0; i < strlen(eng_type); i++)469eng_type[i] = tolower(eng_type[i]);470471snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",472rid, eng_type);473/* Request firmware for each engine type */474ret = load_fw(&pdev->dev, fw_info, filename, rid);475if (ret)476goto release_fw;477}478print_uc_info(fw_info);479return 0;480481release_fw:482cpt_ucode_release_fw(fw_info);483return ret;484}485486struct otx2_cpt_engs_rsvd *find_engines_by_type(487struct otx2_cpt_eng_grp_info *eng_grp,488int eng_type)489{490int i;491492for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {493if (!eng_grp->engs[i].type)494continue;495496if (eng_grp->engs[i].type == eng_type)497return &eng_grp->engs[i];498}499return NULL;500}501502static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,503int eng_type)504{505struct otx2_cpt_engs_rsvd *engs;506507engs = find_engines_by_type(eng_grp, eng_type);508509return (engs != NULL ? 1 : 0);510}511512static int update_engines_avail_count(struct device *dev,513struct otx2_cpt_engs_available *avail,514struct otx2_cpt_engs_rsvd *engs, int val)515{516switch (engs->type) {517case OTX2_CPT_SE_TYPES:518avail->se_cnt += val;519break;520521case OTX2_CPT_IE_TYPES:522avail->ie_cnt += val;523break;524525case OTX2_CPT_AE_TYPES:526avail->ae_cnt += val;527break;528529default:530dev_err(dev, "Invalid engine type %d\n", engs->type);531return -EINVAL;532}533return 0;534}535536static int update_engines_offset(struct device *dev,537struct otx2_cpt_engs_available *avail,538struct otx2_cpt_engs_rsvd *engs)539{540switch (engs->type) {541case OTX2_CPT_SE_TYPES:542engs->offset = 0;543break;544545case OTX2_CPT_IE_TYPES:546engs->offset = avail->max_se_cnt;547break;548549case OTX2_CPT_AE_TYPES:550engs->offset = avail->max_se_cnt + avail->max_ie_cnt;551break;552553default:554dev_err(dev, "Invalid engine type %d\n", engs->type);555return -EINVAL;556}557return 0;558}559560static int release_engines(struct device *dev,561struct otx2_cpt_eng_grp_info *grp)562{563int i, ret = 0;564565for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {566if (!grp->engs[i].type)567continue;568569if (grp->engs[i].count > 0) {570ret = update_engines_avail_count(dev, &grp->g->avail,571&grp->engs[i],572grp->engs[i].count);573if (ret)574return ret;575}576577grp->engs[i].type = 0;578grp->engs[i].count = 0;579grp->engs[i].offset = 0;580grp->engs[i].ucode = NULL;581bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);582}583return 0;584}585586static int do_reserve_engines(struct device *dev,587struct otx2_cpt_eng_grp_info *grp,588struct otx2_cpt_engines *req_engs)589{590struct otx2_cpt_engs_rsvd *engs = NULL;591int i, ret;592593for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {594if (!grp->engs[i].type) {595engs = &grp->engs[i];596break;597}598}599600if (!engs)601return -ENOMEM;602603engs->type = req_engs->type;604engs->count = req_engs->count;605606ret = update_engines_offset(dev, &grp->g->avail, engs);607if (ret)608return ret;609610if (engs->count > 0) {611ret = update_engines_avail_count(dev, &grp->g->avail, engs,612-engs->count);613if (ret)614return ret;615}616617return 0;618}619620static int check_engines_availability(struct device *dev,621struct otx2_cpt_eng_grp_info *grp,622struct otx2_cpt_engines *req_eng)623{624int avail_cnt = 0;625626switch (req_eng->type) {627case OTX2_CPT_SE_TYPES:628avail_cnt = grp->g->avail.se_cnt;629break;630631case OTX2_CPT_IE_TYPES:632avail_cnt = grp->g->avail.ie_cnt;633break;634635case OTX2_CPT_AE_TYPES:636avail_cnt = grp->g->avail.ae_cnt;637break;638639default:640dev_err(dev, "Invalid engine type %d\n", req_eng->type);641return -EINVAL;642}643644if (avail_cnt < req_eng->count) {645dev_err(dev,646"Error available %s engines %d < than requested %d\n",647get_eng_type_str(req_eng->type),648avail_cnt, req_eng->count);649return -EBUSY;650}651return 0;652}653654static int reserve_engines(struct device *dev,655struct otx2_cpt_eng_grp_info *grp,656struct otx2_cpt_engines *req_engs, int ucodes_cnt)657{658int i, ret = 0;659660/* Validate if a number of requested engines are available */661for (i = 0; i < ucodes_cnt; i++) {662ret = check_engines_availability(dev, grp, &req_engs[i]);663if (ret)664return ret;665}666667/* Reserve requested engines for this engine group */668for (i = 0; i < ucodes_cnt; i++) {669ret = do_reserve_engines(dev, grp, &req_engs[i]);670if (ret)671return ret;672}673return 0;674}675676static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)677{678if (ucode->va) {679dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,680ucode->dma);681ucode->va = NULL;682ucode->dma = 0;683ucode->size = 0;684}685686memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);687memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));688set_ucode_filename(ucode, "");689ucode->type = 0;690}691692static int copy_ucode_to_dma_mem(struct device *dev,693struct otx2_cpt_ucode *ucode,694const u8 *ucode_data)695{696u32 i;697698/* Allocate DMAable space */699ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,700GFP_KERNEL);701if (!ucode->va)702return -ENOMEM;703704memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),705ucode->size);706707/* Byte swap 64-bit */708for (i = 0; i < (ucode->size / 8); i++)709cpu_to_be64s(&((u64 *)ucode->va)[i]);710/* Ucode needs 16-bit swap */711for (i = 0; i < (ucode->size / 2); i++)712cpu_to_be16s(&((u16 *)ucode->va)[i]);713return 0;714}715716static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,717void *obj)718{719int ret;720721/* Point microcode to each core of the group */722ret = cpt_set_ucode_base(eng_grp, obj);723if (ret)724return ret;725726/* Attach the cores to the group and enable them */727ret = cpt_attach_and_enable_cores(eng_grp, obj);728729return ret;730}731732static int disable_eng_grp(struct device *dev,733struct otx2_cpt_eng_grp_info *eng_grp,734void *obj)735{736int i, ret;737738/* Disable all engines used by this group */739ret = cpt_detach_and_disable_cores(eng_grp, obj);740if (ret)741return ret;742743/* Unload ucode used by this engine group */744ucode_unload(dev, &eng_grp->ucode[0]);745ucode_unload(dev, &eng_grp->ucode[1]);746747for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {748if (!eng_grp->engs[i].type)749continue;750751eng_grp->engs[i].ucode = &eng_grp->ucode[0];752}753754/* Clear UCODE_BASE register for each engine used by this group */755ret = cpt_set_ucode_base(eng_grp, obj);756757return ret;758}759760static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,761struct otx2_cpt_eng_grp_info *src_grp)762{763/* Setup fields for engine group which is mirrored */764src_grp->mirror.is_ena = false;765src_grp->mirror.idx = 0;766src_grp->mirror.ref_count++;767768/* Setup fields for mirroring engine group */769dst_grp->mirror.is_ena = true;770dst_grp->mirror.idx = src_grp->idx;771dst_grp->mirror.ref_count = 0;772}773774static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)775{776struct otx2_cpt_eng_grp_info *src_grp;777778if (!dst_grp->mirror.is_ena)779return;780781src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];782783src_grp->mirror.ref_count--;784dst_grp->mirror.is_ena = false;785dst_grp->mirror.idx = 0;786dst_grp->mirror.ref_count = 0;787}788789static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,790struct otx2_cpt_engines *engs, int engs_cnt)791{792struct otx2_cpt_engs_rsvd *mirrored_engs;793int i;794795for (i = 0; i < engs_cnt; i++) {796mirrored_engs = find_engines_by_type(mirror_eng_grp,797engs[i].type);798if (!mirrored_engs)799continue;800801/*802* If mirrored group has this type of engines attached then803* there are 3 scenarios possible:804* 1) mirrored_engs.count == engs[i].count then all engines805* from mirrored engine group will be shared with this engine806* group807* 2) mirrored_engs.count > engs[i].count then only a subset of808* engines from mirrored engine group will be shared with this809* engine group810* 3) mirrored_engs.count < engs[i].count then all engines811* from mirrored engine group will be shared with this group812* and additional engines will be reserved for exclusively use813* by this engine group814*/815engs[i].count -= mirrored_engs->count;816}817}818819static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(820struct otx2_cpt_eng_grp_info *grp)821{822struct otx2_cpt_eng_grps *eng_grps = grp->g;823int i;824825for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {826if (!eng_grps->grp[i].is_enabled)827continue;828if (eng_grps->grp[i].ucode[0].type &&829eng_grps->grp[i].ucode[1].type)830continue;831if (grp->idx == i)832continue;833if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,834grp->ucode[0].ver_str,835OTX2_CPT_UCODE_VER_STR_SZ))836return &eng_grps->grp[i];837}838839return NULL;840}841842static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(843struct otx2_cpt_eng_grps *eng_grps)844{845int i;846847for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {848if (!eng_grps->grp[i].is_enabled)849return &eng_grps->grp[i];850}851return NULL;852}853854static int eng_grp_update_masks(struct device *dev,855struct otx2_cpt_eng_grp_info *eng_grp)856{857struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;858struct otx2_cpt_bitmap tmp_bmap = { {0} };859int i, j, cnt, max_cnt;860int bit;861862for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {863engs = &eng_grp->engs[i];864if (!engs->type)865continue;866if (engs->count <= 0)867continue;868869switch (engs->type) {870case OTX2_CPT_SE_TYPES:871max_cnt = eng_grp->g->avail.max_se_cnt;872break;873874case OTX2_CPT_IE_TYPES:875max_cnt = eng_grp->g->avail.max_ie_cnt;876break;877878case OTX2_CPT_AE_TYPES:879max_cnt = eng_grp->g->avail.max_ae_cnt;880break;881882default:883dev_err(dev, "Invalid engine type %d\n", engs->type);884return -EINVAL;885}886887cnt = engs->count;888WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);889bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);890for (j = engs->offset; j < engs->offset + max_cnt; j++) {891if (!eng_grp->g->eng_ref_cnt[j]) {892bitmap_set(tmp_bmap.bits, j, 1);893cnt--;894if (!cnt)895break;896}897}898899if (cnt)900return -ENOSPC;901902bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);903}904905if (!eng_grp->mirror.is_ena)906return 0;907908for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {909engs = &eng_grp->engs[i];910if (!engs->type)911continue;912913mirrored_engs = find_engines_by_type(914&eng_grp->g->grp[eng_grp->mirror.idx],915engs->type);916WARN_ON(!mirrored_engs && engs->count <= 0);917if (!mirrored_engs)918continue;919920bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,921eng_grp->g->engs_num);922if (engs->count < 0) {923bit = find_first_bit(mirrored_engs->bmap,924eng_grp->g->engs_num);925bitmap_clear(tmp_bmap.bits, bit, -engs->count);926}927bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,928eng_grp->g->engs_num);929}930return 0;931}932933static int delete_engine_group(struct device *dev,934struct otx2_cpt_eng_grp_info *eng_grp)935{936int ret;937938if (!eng_grp->is_enabled)939return 0;940941if (eng_grp->mirror.ref_count)942return -EINVAL;943944/* Removing engine group mirroring if enabled */945remove_eng_grp_mirroring(eng_grp);946947/* Disable engine group */948ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);949if (ret)950return ret;951952/* Release all engines held by this engine group */953ret = release_engines(dev, eng_grp);954if (ret)955return ret;956957eng_grp->is_enabled = false;958959return 0;960}961962static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)963{964struct otx2_cpt_ucode *ucode;965966if (eng_grp->mirror.is_ena)967ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];968else969ucode = &eng_grp->ucode[0];970WARN_ON(!eng_grp->engs[0].type);971eng_grp->engs[0].ucode = ucode;972973if (eng_grp->engs[1].type) {974if (is_2nd_ucode_used(eng_grp))975eng_grp->engs[1].ucode = &eng_grp->ucode[1];976else977eng_grp->engs[1].ucode = ucode;978}979}980981static int create_engine_group(struct device *dev,982struct otx2_cpt_eng_grps *eng_grps,983struct otx2_cpt_engines *engs, int ucodes_cnt,984void *ucode_data[], int is_print)985{986struct otx2_cpt_eng_grp_info *mirrored_eng_grp;987struct otx2_cpt_eng_grp_info *eng_grp;988struct otx2_cpt_uc_info_t *uc_info;989int i, ret = 0;990991/* Find engine group which is not used */992eng_grp = find_unused_eng_grp(eng_grps);993if (!eng_grp) {994dev_err(dev, "Error all engine groups are being used\n");995return -ENOSPC;996}997/* Load ucode */998for (i = 0; i < ucodes_cnt; i++) {999uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];1000eng_grp->ucode[i] = uc_info->ucode;1001ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],1002uc_info->fw->data);1003if (ret)1004goto unload_ucode;1005}10061007/* Check if this group mirrors another existing engine group */1008mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);1009if (mirrored_eng_grp) {1010/* Setup mirroring */1011setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);10121013/*1014* Update count of requested engines because some1015* of them might be shared with mirrored group1016*/1017update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);1018}1019ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);1020if (ret)1021goto unload_ucode;10221023/* Update ucode pointers used by engines */1024update_ucode_ptrs(eng_grp);10251026/* Update engine masks used by this group */1027ret = eng_grp_update_masks(dev, eng_grp);1028if (ret)1029goto release_engs;10301031/* Enable engine group */1032ret = enable_eng_grp(eng_grp, eng_grps->obj);1033if (ret)1034goto release_engs;10351036/*1037* If this engine group mirrors another engine group1038* then we need to unload ucode as we will use ucode1039* from mirrored engine group1040*/1041if (eng_grp->mirror.is_ena)1042ucode_unload(dev, &eng_grp->ucode[0]);10431044eng_grp->is_enabled = true;10451046if (!is_print)1047return 0;10481049if (mirrored_eng_grp)1050dev_info(dev,1051"Engine_group%d: reuse microcode %s from group %d\n",1052eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,1053mirrored_eng_grp->idx);1054else1055dev_info(dev, "Engine_group%d: microcode loaded %s\n",1056eng_grp->idx, eng_grp->ucode[0].ver_str);1057if (is_2nd_ucode_used(eng_grp))1058dev_info(dev, "Engine_group%d: microcode loaded %s\n",1059eng_grp->idx, eng_grp->ucode[1].ver_str);10601061return 0;10621063release_engs:1064release_engines(dev, eng_grp);1065unload_ucode:1066ucode_unload(dev, &eng_grp->ucode[0]);1067ucode_unload(dev, &eng_grp->ucode[1]);1068return ret;1069}10701071static void delete_engine_grps(struct pci_dev *pdev,1072struct otx2_cpt_eng_grps *eng_grps)1073{1074int i;10751076/* First delete all mirroring engine groups */1077for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)1078if (eng_grps->grp[i].mirror.is_ena)1079delete_engine_group(&pdev->dev, &eng_grps->grp[i]);10801081/* Delete remaining engine groups */1082for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)1083delete_engine_group(&pdev->dev, &eng_grps->grp[i]);1084}10851086#define PCI_DEVID_CN10K_RNM 0xA0981087#define RNM_ENTROPY_STATUS 0x810881089static void rnm_to_cpt_errata_fixup(struct device *dev)1090{1091struct pci_dev *pdev;1092void __iomem *base;1093int timeout = 5000;10941095pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);1096if (!pdev)1097return;10981099base = pci_ioremap_bar(pdev, 0);1100if (!base)1101goto put_pdev;11021103while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {1104cpu_relax();1105udelay(1);1106timeout--;1107if (!timeout) {1108dev_warn(dev, "RNM is not producing entropy\n");1109break;1110}1111}11121113iounmap(base);11141115put_pdev:1116pci_dev_put(pdev);1117}11181119int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)1120{11211122int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;1123struct otx2_cpt_eng_grp_info *grp;1124int i;11251126for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {1127grp = &eng_grps->grp[i];1128if (!grp->is_enabled)1129continue;11301131if (eng_type == OTX2_CPT_SE_TYPES) {1132if (eng_grp_has_eng_type(grp, eng_type) &&1133!eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {1134eng_grp_num = i;1135break;1136}1137} else {1138if (eng_grp_has_eng_type(grp, eng_type)) {1139eng_grp_num = i;1140break;1141}1142}1143}1144return eng_grp_num;1145}11461147int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,1148struct otx2_cpt_eng_grps *eng_grps)1149{1150struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };1151struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };1152struct pci_dev *pdev = cptpf->pdev;1153struct fw_info_t fw_info;1154u64 reg_val;1155int ret = 0;11561157mutex_lock(&eng_grps->lock);1158/*1159* We don't create engine groups if it was already1160* made (when user enabled VFs for the first time)1161*/1162if (eng_grps->is_grps_created)1163goto unlock;11641165ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);1166if (ret)1167goto unlock;11681169/*1170* Create engine group with SE engines for kernel1171* crypto functionality (symmetric crypto)1172*/1173uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);1174if (uc_info[0] == NULL) {1175dev_err(&pdev->dev, "Unable to find firmware for SE\n");1176ret = -EINVAL;1177goto release_fw;1178}1179engs[0].type = OTX2_CPT_SE_TYPES;1180engs[0].count = eng_grps->avail.max_se_cnt;11811182ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1183(void **) uc_info, 1);1184if (ret)1185goto release_fw;11861187/*1188* Create engine group with SE+IE engines for IPSec.1189* All SE engines will be shared with engine group 0.1190*/1191uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);1192uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);11931194if (uc_info[1] == NULL) {1195dev_err(&pdev->dev, "Unable to find firmware for IE");1196ret = -EINVAL;1197goto delete_eng_grp;1198}1199engs[0].type = OTX2_CPT_SE_TYPES;1200engs[0].count = eng_grps->avail.max_se_cnt;1201engs[1].type = OTX2_CPT_IE_TYPES;1202engs[1].count = eng_grps->avail.max_ie_cnt;12031204ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,1205(void **) uc_info, 1);1206if (ret)1207goto delete_eng_grp;12081209/*1210* Create engine group with AE engines for asymmetric1211* crypto functionality.1212*/1213uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);1214if (uc_info[0] == NULL) {1215dev_err(&pdev->dev, "Unable to find firmware for AE");1216ret = -EINVAL;1217goto delete_eng_grp;1218}1219engs[0].type = OTX2_CPT_AE_TYPES;1220engs[0].count = eng_grps->avail.max_ae_cnt;12211222ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1223(void **) uc_info, 1);1224if (ret)1225goto delete_eng_grp;12261227eng_grps->is_grps_created = true;12281229cpt_ucode_release_fw(&fw_info);12301231if (is_dev_otx2(pdev))1232goto unlock;12331234/*1235* Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing1236* CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.1237*/1238rnm_to_cpt_errata_fixup(&pdev->dev);12391240otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, ®_val,1241BLKADDR_CPT0);1242/*1243* Configure engine group mask to allow context prefetching1244* for the groups and enable random number request, to enable1245* CPT to request random numbers from RNM.1246*/1247reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);1248otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,1249reg_val, BLKADDR_CPT0);1250/*1251* Set interval to periodically flush dirty data for the next1252* CTX cache entry. Set the interval count to maximum supported1253* value.1254*/1255otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,1256CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);12571258/*1259* Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when1260* CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM1261* encounters a fault/poison, a rare case may result in1262* unpredictable data being delivered to a CPT engine.1263*/1264if (cpt_is_errata_38550_exists(pdev)) {1265otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,1266®_val, BLKADDR_CPT0);1267otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,1268reg_val | BIT_ULL(24), BLKADDR_CPT0);1269}12701271mutex_unlock(&eng_grps->lock);1272return 0;12731274delete_eng_grp:1275delete_engine_grps(pdev, eng_grps);1276release_fw:1277cpt_ucode_release_fw(&fw_info);1278unlock:1279mutex_unlock(&eng_grps->lock);1280return ret;1281}12821283static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,1284int blkaddr)1285{1286int timeout = 10, ret;1287int i, busy;1288u64 reg;12891290/* Disengage the cores from groups */1291for (i = 0; i < total_cores; i++) {1292ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,1293CPT_AF_EXEX_CTL2(i), 0x0,1294blkaddr);1295if (ret)1296return ret;12971298cptpf->eng_grps.eng_ref_cnt[i] = 0;1299}1300ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);1301if (ret)1302return ret;13031304/* Wait for cores to become idle */1305do {1306busy = 0;1307usleep_range(10000, 20000);1308if (timeout-- < 0)1309return -EBUSY;13101311for (i = 0; i < total_cores; i++) {1312ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,1313cptpf->pdev,1314CPT_AF_EXEX_STS(i), ®,1315blkaddr);1316if (ret)1317return ret;13181319if (reg & 0x1) {1320busy = 1;1321break;1322}1323}1324} while (busy);13251326/* Disable the cores */1327for (i = 0; i < total_cores; i++) {1328ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,1329CPT_AF_EXEX_CTL(i), 0x0,1330blkaddr);1331if (ret)1332return ret;1333}1334return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);1335}13361337int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)1338{1339int total_cores, ret;13401341total_cores = cptpf->eng_grps.avail.max_se_cnt +1342cptpf->eng_grps.avail.max_ie_cnt +1343cptpf->eng_grps.avail.max_ae_cnt;13441345if (cptpf->has_cpt1) {1346ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);1347if (ret)1348return ret;1349}1350return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);1351}13521353void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,1354struct otx2_cpt_eng_grps *eng_grps)1355{1356struct otx2_cpt_eng_grp_info *grp;1357int i, j;13581359mutex_lock(&eng_grps->lock);1360delete_engine_grps(pdev, eng_grps);1361/* Release memory */1362for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {1363grp = &eng_grps->grp[i];1364for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {1365kfree(grp->engs[j].bmap);1366grp->engs[j].bmap = NULL;1367}1368}1369mutex_unlock(&eng_grps->lock);1370}13711372int otx2_cpt_init_eng_grps(struct pci_dev *pdev,1373struct otx2_cpt_eng_grps *eng_grps)1374{1375struct otx2_cpt_eng_grp_info *grp;1376int i, j, ret;13771378mutex_init(&eng_grps->lock);1379eng_grps->obj = pci_get_drvdata(pdev);1380eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;1381eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;1382eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;13831384eng_grps->engs_num = eng_grps->avail.max_se_cnt +1385eng_grps->avail.max_ie_cnt +1386eng_grps->avail.max_ae_cnt;1387if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {1388dev_err(&pdev->dev,1389"Number of engines %d > than max supported %d\n",1390eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);1391ret = -EINVAL;1392goto cleanup_eng_grps;1393}13941395for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {1396grp = &eng_grps->grp[i];1397grp->g = eng_grps;1398grp->idx = i;13991400for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {1401grp->engs[j].bmap =1402kcalloc(BITS_TO_LONGS(eng_grps->engs_num),1403sizeof(long), GFP_KERNEL);1404if (!grp->engs[j].bmap) {1405ret = -ENOMEM;1406goto cleanup_eng_grps;1407}1408}1409}1410return 0;14111412cleanup_eng_grps:1413otx2_cpt_cleanup_eng_grps(pdev, eng_grps);1414return ret;1415}14161417static int create_eng_caps_discovery_grps(struct pci_dev *pdev,1418struct otx2_cpt_eng_grps *eng_grps)1419{1420struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };1421struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };1422struct fw_info_t fw_info;1423int ret;14241425mutex_lock(&eng_grps->lock);1426ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);1427if (ret) {1428mutex_unlock(&eng_grps->lock);1429return ret;1430}14311432uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);1433if (uc_info[0] == NULL) {1434dev_err(&pdev->dev, "Unable to find firmware for AE\n");1435ret = -EINVAL;1436goto release_fw;1437}1438engs[0].type = OTX2_CPT_AE_TYPES;1439engs[0].count = 2;14401441ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1442(void **) uc_info, 0);1443if (ret)1444goto release_fw;14451446uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);1447if (uc_info[0] == NULL) {1448dev_err(&pdev->dev, "Unable to find firmware for SE\n");1449ret = -EINVAL;1450goto delete_eng_grp;1451}1452engs[0].type = OTX2_CPT_SE_TYPES;1453engs[0].count = 2;14541455ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1456(void **) uc_info, 0);1457if (ret)1458goto delete_eng_grp;14591460uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);1461if (uc_info[0] == NULL) {1462dev_err(&pdev->dev, "Unable to find firmware for IE\n");1463ret = -EINVAL;1464goto delete_eng_grp;1465}1466engs[0].type = OTX2_CPT_IE_TYPES;1467engs[0].count = 2;14681469ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1470(void **) uc_info, 0);1471if (ret)1472goto delete_eng_grp;14731474cpt_ucode_release_fw(&fw_info);1475mutex_unlock(&eng_grps->lock);1476return 0;14771478delete_eng_grp:1479delete_engine_grps(pdev, eng_grps);1480release_fw:1481cpt_ucode_release_fw(&fw_info);1482mutex_unlock(&eng_grps->lock);1483return ret;1484}14851486/*1487* Get CPT HW capabilities using LOAD_FVC operation.1488*/1489int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)1490{1491struct otx2_cptlfs_info *lfs = &cptpf->lfs;1492struct otx2_cpt_iq_command iq_cmd;1493union otx2_cpt_opcode opcode;1494union otx2_cpt_res_s *result;1495union otx2_cpt_inst_s inst;1496dma_addr_t result_baddr;1497dma_addr_t rptr_baddr;1498struct pci_dev *pdev;1499int timeout = 10000;1500void *base, *rptr;1501int ret, etype;1502u32 len;15031504/*1505* We don't get capabilities if it was already done1506* (when user enabled VFs for the first time)1507*/1508if (cptpf->is_eng_caps_discovered)1509return 0;15101511pdev = cptpf->pdev;1512/*1513* Create engine groups for each type to submit LOAD_FVC op and1514* get engine's capabilities.1515*/1516ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);1517if (ret)1518goto delete_grps;15191520ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,1521OTX2_CPT_QUEUE_HI_PRIO, 1);1522if (ret)1523goto delete_grps;15241525/* Allocate extra memory for "rptr" and "result" pointer alignment */1526len = LOADFVC_RLEN + ARCH_DMA_MINALIGN +1527sizeof(union otx2_cpt_res_s) + OTX2_CPT_RES_ADDR_ALIGN;15281529base = kzalloc(len, GFP_KERNEL);1530if (!base) {1531ret = -ENOMEM;1532goto lf_cleanup;1533}15341535rptr = PTR_ALIGN(base, ARCH_DMA_MINALIGN);1536rptr_baddr = dma_map_single(&pdev->dev, rptr, len, DMA_BIDIRECTIONAL);1537if (dma_mapping_error(&pdev->dev, rptr_baddr)) {1538dev_err(&pdev->dev, "DMA mapping failed\n");1539ret = -EFAULT;1540goto free_rptr;1541}15421543result = (union otx2_cpt_res_s *)PTR_ALIGN(rptr + LOADFVC_RLEN,1544OTX2_CPT_RES_ADDR_ALIGN);1545result_baddr = ALIGN(rptr_baddr + LOADFVC_RLEN,1546OTX2_CPT_RES_ADDR_ALIGN);15471548/* Fill in the command */1549opcode.s.major = LOADFVC_MAJOR_OP;1550opcode.s.minor = LOADFVC_MINOR_OP;15511552iq_cmd.cmd.u = 0;1553iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);15541555/* 64-bit swap for microcode data reads, not needed for addresses */1556cpu_to_be64s(&iq_cmd.cmd.u);1557iq_cmd.dptr = 0;1558iq_cmd.rptr = rptr_baddr;1559iq_cmd.cptr.u = 0;15601561for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {1562result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;1563iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,1564etype);1565otx2_cpt_fill_inst(&inst, &iq_cmd, result_baddr);1566lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);1567timeout = 10000;15681569while (lfs->ops->cpt_get_compcode(result) ==1570OTX2_CPT_COMPLETION_CODE_INIT) {1571cpu_relax();1572udelay(1);1573timeout--;1574if (!timeout) {1575ret = -ENODEV;1576cptpf->is_eng_caps_discovered = false;1577dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n");1578goto error_no_response;1579}1580}15811582cptpf->eng_caps[etype].u = be64_to_cpup(rptr);1583}1584cptpf->is_eng_caps_discovered = true;15851586error_no_response:1587dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);1588free_rptr:1589kfree(base);1590lf_cleanup:1591otx2_cptlf_shutdown(lfs);1592delete_grps:1593delete_engine_grps(pdev, &cptpf->eng_grps);15941595return ret;1596}15971598int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,1599struct devlink_param_gset_ctx *ctx)1600{1601struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };1602struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};1603struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;1604char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];1605char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };1606struct device *dev = &cptpf->pdev->dev;1607char *start, *val, *err_msg, *tmp;1608int grp_idx = 0, ret = -EINVAL;1609bool has_se, has_ie, has_ae;1610struct fw_info_t fw_info;1611int ucode_idx = 0;16121613if (!eng_grps->is_grps_created) {1614dev_err(dev, "Not allowed before creating the default groups\n");1615return -EINVAL;1616}1617err_msg = "Invalid engine group format";1618strscpy(tmp_buf, ctx->val.vstr);1619start = tmp_buf;16201621has_se = has_ie = has_ae = false;16221623for (;;) {1624val = strsep(&start, ";");1625if (!val)1626break;1627val = strim(val);1628if (!*val)1629continue;16301631if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {1632if (has_se || ucode_idx)1633goto err_print;1634tmp = strsep(&val, ":");1635if (!tmp)1636goto err_print;1637tmp = strim(tmp);1638if (!val)1639goto err_print;1640if (strlen(tmp) != 2)1641goto err_print;1642if (kstrtoint(strim(val), 10, &engs[grp_idx].count))1643goto err_print;1644engs[grp_idx++].type = OTX2_CPT_SE_TYPES;1645has_se = true;1646} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {1647if (has_ae || ucode_idx)1648goto err_print;1649tmp = strsep(&val, ":");1650if (!tmp)1651goto err_print;1652tmp = strim(tmp);1653if (!val)1654goto err_print;1655if (strlen(tmp) != 2)1656goto err_print;1657if (kstrtoint(strim(val), 10, &engs[grp_idx].count))1658goto err_print;1659engs[grp_idx++].type = OTX2_CPT_AE_TYPES;1660has_ae = true;1661} else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {1662if (has_ie || ucode_idx)1663goto err_print;1664tmp = strsep(&val, ":");1665if (!tmp)1666goto err_print;1667tmp = strim(tmp);1668if (!val)1669goto err_print;1670if (strlen(tmp) != 2)1671goto err_print;1672if (kstrtoint(strim(val), 10, &engs[grp_idx].count))1673goto err_print;1674engs[grp_idx++].type = OTX2_CPT_IE_TYPES;1675has_ie = true;1676} else {1677if (ucode_idx > 1)1678goto err_print;1679if (!strlen(val))1680goto err_print;1681if (strnstr(val, " ", strlen(val)))1682goto err_print;1683ucode_filename[ucode_idx++] = val;1684}1685}16861687/* Validate input parameters */1688if (!(grp_idx && ucode_idx))1689goto err_print;16901691if (ucode_idx > 1 && grp_idx < 2)1692goto err_print;16931694if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {1695err_msg = "Error max 2 engine types can be attached";1696goto err_print;1697}16981699if (grp_idx > 1) {1700if ((engs[0].type + engs[1].type) !=1701(OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {1702err_msg = "Only combination of SE+IE engines is allowed";1703goto err_print;1704}1705/* Keep SE engines at zero index */1706if (engs[1].type == OTX2_CPT_SE_TYPES)1707swap(engs[0], engs[1]);1708}1709mutex_lock(&eng_grps->lock);17101711if (cptpf->enabled_vfs) {1712dev_err(dev, "Disable VFs before modifying engine groups\n");1713ret = -EACCES;1714goto err_unlock;1715}1716INIT_LIST_HEAD(&fw_info.ucodes);17171718ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid);1719if (ret) {1720dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);1721goto err_unlock;1722}1723if (ucode_idx > 1) {1724ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid);1725if (ret) {1726dev_err(dev, "Unable to load firmware %s\n",1727ucode_filename[1]);1728goto release_fw;1729}1730}1731uc_info[0] = get_ucode(&fw_info, engs[0].type);1732if (uc_info[0] == NULL) {1733dev_err(dev, "Unable to find firmware for %s\n",1734get_eng_type_str(engs[0].type));1735ret = -EINVAL;1736goto release_fw;1737}1738if (ucode_idx > 1) {1739uc_info[1] = get_ucode(&fw_info, engs[1].type);1740if (uc_info[1] == NULL) {1741dev_err(dev, "Unable to find firmware for %s\n",1742get_eng_type_str(engs[1].type));1743ret = -EINVAL;1744goto release_fw;1745}1746}1747ret = create_engine_group(dev, eng_grps, engs, grp_idx,1748(void **)uc_info, 1);17491750release_fw:1751cpt_ucode_release_fw(&fw_info);1752err_unlock:1753mutex_unlock(&eng_grps->lock);1754return ret;1755err_print:1756dev_err(dev, "%s\n", err_msg);1757return ret;1758}17591760int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,1761struct devlink_param_gset_ctx *ctx)1762{1763struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;1764struct device *dev = &cptpf->pdev->dev;1765char *tmp, *err_msg;1766int egrp;1767int ret;17681769err_msg = "Invalid input string format(ex: egrp:0)";1770if (strncasecmp(ctx->val.vstr, "egrp", 4))1771goto err_print;1772tmp = ctx->val.vstr;1773strsep(&tmp, ":");1774if (!tmp)1775goto err_print;1776if (kstrtoint(tmp, 10, &egrp))1777goto err_print;17781779if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {1780dev_err(dev, "Invalid engine group %d", egrp);1781return -EINVAL;1782}1783if (!eng_grps->grp[egrp].is_enabled) {1784dev_err(dev, "Error engine_group%d is not configured", egrp);1785return -EINVAL;1786}1787mutex_lock(&eng_grps->lock);1788ret = delete_engine_group(dev, &eng_grps->grp[egrp]);1789mutex_unlock(&eng_grps->lock);17901791return ret;17921793err_print:1794dev_err(dev, "%s\n", err_msg);1795return -EINVAL;1796}179717981799