Path: blob/master/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
26288 views
// SPDX-License-Identifier: GPL-2.0-only1/* Copyright (C) 2020 Marvell. */23#include <linux/ctype.h>4#include <linux/firmware.h>5#include <linux/string_choices.h>6#include "otx2_cptpf_ucode.h"7#include "otx2_cpt_common.h"8#include "otx2_cptpf.h"9#include "otx2_cptlf.h"10#include "otx2_cpt_reqmgr.h"11#include "rvu_reg.h"1213#define CSR_DELAY 301415#define LOADFVC_RLEN 816#define LOADFVC_MAJOR_OP 0x0117#define LOADFVC_MINOR_OP 0x081819/*20* Interval to flush dirty data for next CTX entry. The interval is measured21* in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).22*/23#define CTX_FLUSH_TIMER_CNT 0x2FAF02425struct fw_info_t {26struct list_head ucodes;27};2829static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,30struct otx2_cpt_eng_grp_info *eng_grp)31{32struct otx2_cpt_bitmap bmap = { {0} };33bool found = false;34int i;3536if (eng_grp->g->engs_num < 0 ||37eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {38dev_err(dev, "unsupported number of engines %d on octeontx2\n",39eng_grp->g->engs_num);40return bmap;41}4243for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {44if (eng_grp->engs[i].type) {45bitmap_or(bmap.bits, bmap.bits,46eng_grp->engs[i].bmap,47eng_grp->g->engs_num);48bmap.size = eng_grp->g->engs_num;49found = true;50}51}5253if (!found)54dev_err(dev, "No engines reserved for engine group %d\n",55eng_grp->idx);56return bmap;57}5859static int is_eng_type(int val, int eng_type)60{61return val & (1 << eng_type);62}6364static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)65{66if (eng_grp->ucode[1].type)67return true;68else69return false;70}7172static void set_ucode_filename(struct otx2_cpt_ucode *ucode,73const char *filename)74{75strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);76}7778static char *get_eng_type_str(int eng_type)79{80char *str = "unknown";8182switch (eng_type) {83case OTX2_CPT_SE_TYPES:84str = "SE";85break;8687case OTX2_CPT_IE_TYPES:88str = "IE";89break;9091case OTX2_CPT_AE_TYPES:92str = "AE";93break;94}95return str;96}9798static char *get_ucode_type_str(int ucode_type)99{100char *str = "unknown";101102switch (ucode_type) {103case (1 << OTX2_CPT_SE_TYPES):104str = "SE";105break;106107case (1 << OTX2_CPT_IE_TYPES):108str = "IE";109break;110111case (1 << OTX2_CPT_AE_TYPES):112str = "AE";113break;114115case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):116str = "SE+IPSEC";117break;118}119return str;120}121122static int get_ucode_type(struct device *dev,123struct otx2_cpt_ucode_hdr *ucode_hdr,124int *ucode_type, u16 rid)125{126char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];127char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];128int i, val = 0;129u8 nn;130131strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);132for (i = 0; i < strlen(tmp_ver_str); i++)133tmp_ver_str[i] = tolower(tmp_ver_str[i]);134135sprintf(ver_str_prefix, "ocpt-%02d", rid);136if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))137return -EINVAL;138139nn = ucode_hdr->ver_num.nn;140if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&141(nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||142nn == OTX2_CPT_SE_UC_TYPE3))143val |= 1 << OTX2_CPT_SE_TYPES;144if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&145(nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||146nn == OTX2_CPT_IE_UC_TYPE3))147val |= 1 << OTX2_CPT_IE_TYPES;148if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&149nn == OTX2_CPT_AE_UC_TYPE)150val |= 1 << OTX2_CPT_AE_TYPES;151152*ucode_type = val;153154if (!val)155return -EINVAL;156157return 0;158}159160static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,161dma_addr_t dma_addr, int blkaddr)162{163return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,164CPT_AF_EXEX_UCODE_BASE(eng),165(u64)dma_addr, blkaddr);166}167168static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,169struct otx2_cptpf_dev *cptpf, int blkaddr)170{171struct otx2_cpt_engs_rsvd *engs;172dma_addr_t dma_addr;173int i, bit, ret;174175/* Set PF number for microcode fetches */176ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,177CPT_AF_PF_FUNC,178rvu_make_pcifunc(cptpf->pdev,179cptpf->pf_id, 0),180blkaddr);181if (ret)182return ret;183184for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {185engs = &eng_grp->engs[i];186if (!engs->type)187continue;188189dma_addr = engs->ucode->dma;190191/*192* Set UCODE_BASE only for the cores which are not used,193* other cores should have already valid UCODE_BASE set194*/195for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)196if (!eng_grp->g->eng_ref_cnt[bit]) {197ret = __write_ucode_base(cptpf, bit, dma_addr,198blkaddr);199if (ret)200return ret;201}202}203return 0;204}205206static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)207{208struct otx2_cptpf_dev *cptpf = obj;209int ret;210211if (cptpf->has_cpt1) {212ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);213if (ret)214return ret;215}216return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);217}218219static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,220struct otx2_cptpf_dev *cptpf,221struct otx2_cpt_bitmap bmap,222int blkaddr)223{224int i, timeout = 10;225int busy, ret;226u64 reg = 0;227228/* Detach the cores from group */229for_each_set_bit(i, bmap.bits, bmap.size) {230ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,231CPT_AF_EXEX_CTL2(i), ®, blkaddr);232if (ret)233return ret;234235if (reg & (1ull << eng_grp->idx)) {236eng_grp->g->eng_ref_cnt[i]--;237reg &= ~(1ull << eng_grp->idx);238239ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,240cptpf->pdev,241CPT_AF_EXEX_CTL2(i), reg,242blkaddr);243if (ret)244return ret;245}246}247248/* Wait for cores to become idle */249do {250busy = 0;251usleep_range(10000, 20000);252if (timeout-- < 0)253return -EBUSY;254255for_each_set_bit(i, bmap.bits, bmap.size) {256ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,257cptpf->pdev,258CPT_AF_EXEX_STS(i), ®,259blkaddr);260if (ret)261return ret;262263if (reg & 0x1) {264busy = 1;265break;266}267}268} while (busy);269270/* Disable the cores only if they are not used anymore */271for_each_set_bit(i, bmap.bits, bmap.size) {272if (!eng_grp->g->eng_ref_cnt[i]) {273ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,274cptpf->pdev,275CPT_AF_EXEX_CTL(i), 0x0,276blkaddr);277if (ret)278return ret;279}280}281282return 0;283}284285static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,286void *obj)287{288struct otx2_cptpf_dev *cptpf = obj;289struct otx2_cpt_bitmap bmap;290int ret;291292bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);293if (!bmap.size)294return -EINVAL;295296if (cptpf->has_cpt1) {297ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,298BLKADDR_CPT1);299if (ret)300return ret;301}302return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,303BLKADDR_CPT0);304}305306static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,307struct otx2_cptpf_dev *cptpf,308struct otx2_cpt_bitmap bmap,309int blkaddr)310{311u64 reg = 0;312int i, ret;313314/* Attach the cores to the group */315for_each_set_bit(i, bmap.bits, bmap.size) {316ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,317CPT_AF_EXEX_CTL2(i), ®, blkaddr);318if (ret)319return ret;320321if (!(reg & (1ull << eng_grp->idx))) {322eng_grp->g->eng_ref_cnt[i]++;323reg |= 1ull << eng_grp->idx;324325ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,326cptpf->pdev,327CPT_AF_EXEX_CTL2(i), reg,328blkaddr);329if (ret)330return ret;331}332}333334/* Enable the cores */335for_each_set_bit(i, bmap.bits, bmap.size) {336ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,337CPT_AF_EXEX_CTL(i), 0x1,338blkaddr);339if (ret)340return ret;341}342return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);343}344345static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,346void *obj)347{348struct otx2_cptpf_dev *cptpf = obj;349struct otx2_cpt_bitmap bmap;350int ret;351352bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);353if (!bmap.size)354return -EINVAL;355356if (cptpf->has_cpt1) {357ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,358BLKADDR_CPT1);359if (ret)360return ret;361}362return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);363}364365static int load_fw(struct device *dev, struct fw_info_t *fw_info,366char *filename, u16 rid)367{368struct otx2_cpt_ucode_hdr *ucode_hdr;369struct otx2_cpt_uc_info_t *uc_info;370int ucode_type, ucode_size;371int ret;372373uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);374if (!uc_info)375return -ENOMEM;376377ret = request_firmware(&uc_info->fw, filename, dev);378if (ret)379goto free_uc_info;380381ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;382ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid);383if (ret)384goto release_fw;385386ucode_size = ntohl(ucode_hdr->code_length) * 2;387if (!ucode_size) {388dev_err(dev, "Ucode %s invalid size\n", filename);389ret = -EINVAL;390goto release_fw;391}392393set_ucode_filename(&uc_info->ucode, filename);394memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,395OTX2_CPT_UCODE_VER_STR_SZ);396uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0;397uc_info->ucode.ver_num = ucode_hdr->ver_num;398uc_info->ucode.type = ucode_type;399uc_info->ucode.size = ucode_size;400list_add_tail(&uc_info->list, &fw_info->ucodes);401402return 0;403404release_fw:405release_firmware(uc_info->fw);406free_uc_info:407kfree(uc_info);408return ret;409}410411static void cpt_ucode_release_fw(struct fw_info_t *fw_info)412{413struct otx2_cpt_uc_info_t *curr, *temp;414415if (!fw_info)416return;417418list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {419list_del(&curr->list);420release_firmware(curr->fw);421kfree(curr);422}423}424425static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,426int ucode_type)427{428struct otx2_cpt_uc_info_t *curr;429430list_for_each_entry(curr, &fw_info->ucodes, list) {431if (!is_eng_type(curr->ucode.type, ucode_type))432continue;433434return curr;435}436return NULL;437}438439static void print_uc_info(struct fw_info_t *fw_info)440{441struct otx2_cpt_uc_info_t *curr;442443list_for_each_entry(curr, &fw_info->ucodes, list) {444pr_debug("Ucode filename %s\n", curr->ucode.filename);445pr_debug("Ucode version string %s\n", curr->ucode.ver_str);446pr_debug("Ucode version %d.%d.%d.%d\n",447curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,448curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);449pr_debug("Ucode type (%d) %s\n", curr->ucode.type,450get_ucode_type_str(curr->ucode.type));451pr_debug("Ucode size %d\n", curr->ucode.size);452pr_debug("Ucode ptr %p\n", curr->fw->data);453}454}455456static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,457u16 rid)458{459char filename[OTX2_CPT_NAME_LENGTH];460char eng_type[8] = {0};461int ret, e, i;462463INIT_LIST_HEAD(&fw_info->ucodes);464465for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {466strcpy(eng_type, get_eng_type_str(e));467for (i = 0; i < strlen(eng_type); i++)468eng_type[i] = tolower(eng_type[i]);469470snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",471rid, eng_type);472/* Request firmware for each engine type */473ret = load_fw(&pdev->dev, fw_info, filename, rid);474if (ret)475goto release_fw;476}477print_uc_info(fw_info);478return 0;479480release_fw:481cpt_ucode_release_fw(fw_info);482return ret;483}484485struct otx2_cpt_engs_rsvd *find_engines_by_type(486struct otx2_cpt_eng_grp_info *eng_grp,487int eng_type)488{489int i;490491for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {492if (!eng_grp->engs[i].type)493continue;494495if (eng_grp->engs[i].type == eng_type)496return &eng_grp->engs[i];497}498return NULL;499}500501static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,502int eng_type)503{504struct otx2_cpt_engs_rsvd *engs;505506engs = find_engines_by_type(eng_grp, eng_type);507508return (engs != NULL ? 1 : 0);509}510511static int update_engines_avail_count(struct device *dev,512struct otx2_cpt_engs_available *avail,513struct otx2_cpt_engs_rsvd *engs, int val)514{515switch (engs->type) {516case OTX2_CPT_SE_TYPES:517avail->se_cnt += val;518break;519520case OTX2_CPT_IE_TYPES:521avail->ie_cnt += val;522break;523524case OTX2_CPT_AE_TYPES:525avail->ae_cnt += val;526break;527528default:529dev_err(dev, "Invalid engine type %d\n", engs->type);530return -EINVAL;531}532return 0;533}534535static int update_engines_offset(struct device *dev,536struct otx2_cpt_engs_available *avail,537struct otx2_cpt_engs_rsvd *engs)538{539switch (engs->type) {540case OTX2_CPT_SE_TYPES:541engs->offset = 0;542break;543544case OTX2_CPT_IE_TYPES:545engs->offset = avail->max_se_cnt;546break;547548case OTX2_CPT_AE_TYPES:549engs->offset = avail->max_se_cnt + avail->max_ie_cnt;550break;551552default:553dev_err(dev, "Invalid engine type %d\n", engs->type);554return -EINVAL;555}556return 0;557}558559static int release_engines(struct device *dev,560struct otx2_cpt_eng_grp_info *grp)561{562int i, ret = 0;563564for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {565if (!grp->engs[i].type)566continue;567568if (grp->engs[i].count > 0) {569ret = update_engines_avail_count(dev, &grp->g->avail,570&grp->engs[i],571grp->engs[i].count);572if (ret)573return ret;574}575576grp->engs[i].type = 0;577grp->engs[i].count = 0;578grp->engs[i].offset = 0;579grp->engs[i].ucode = NULL;580bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);581}582return 0;583}584585static int do_reserve_engines(struct device *dev,586struct otx2_cpt_eng_grp_info *grp,587struct otx2_cpt_engines *req_engs)588{589struct otx2_cpt_engs_rsvd *engs = NULL;590int i, ret;591592for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {593if (!grp->engs[i].type) {594engs = &grp->engs[i];595break;596}597}598599if (!engs)600return -ENOMEM;601602engs->type = req_engs->type;603engs->count = req_engs->count;604605ret = update_engines_offset(dev, &grp->g->avail, engs);606if (ret)607return ret;608609if (engs->count > 0) {610ret = update_engines_avail_count(dev, &grp->g->avail, engs,611-engs->count);612if (ret)613return ret;614}615616return 0;617}618619static int check_engines_availability(struct device *dev,620struct otx2_cpt_eng_grp_info *grp,621struct otx2_cpt_engines *req_eng)622{623int avail_cnt = 0;624625switch (req_eng->type) {626case OTX2_CPT_SE_TYPES:627avail_cnt = grp->g->avail.se_cnt;628break;629630case OTX2_CPT_IE_TYPES:631avail_cnt = grp->g->avail.ie_cnt;632break;633634case OTX2_CPT_AE_TYPES:635avail_cnt = grp->g->avail.ae_cnt;636break;637638default:639dev_err(dev, "Invalid engine type %d\n", req_eng->type);640return -EINVAL;641}642643if (avail_cnt < req_eng->count) {644dev_err(dev,645"Error available %s engines %d < than requested %d\n",646get_eng_type_str(req_eng->type),647avail_cnt, req_eng->count);648return -EBUSY;649}650return 0;651}652653static int reserve_engines(struct device *dev,654struct otx2_cpt_eng_grp_info *grp,655struct otx2_cpt_engines *req_engs, int ucodes_cnt)656{657int i, ret = 0;658659/* Validate if a number of requested engines are available */660for (i = 0; i < ucodes_cnt; i++) {661ret = check_engines_availability(dev, grp, &req_engs[i]);662if (ret)663return ret;664}665666/* Reserve requested engines for this engine group */667for (i = 0; i < ucodes_cnt; i++) {668ret = do_reserve_engines(dev, grp, &req_engs[i]);669if (ret)670return ret;671}672return 0;673}674675static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)676{677if (ucode->va) {678dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,679ucode->dma);680ucode->va = NULL;681ucode->dma = 0;682ucode->size = 0;683}684685memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);686memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));687set_ucode_filename(ucode, "");688ucode->type = 0;689}690691static int copy_ucode_to_dma_mem(struct device *dev,692struct otx2_cpt_ucode *ucode,693const u8 *ucode_data)694{695u32 i;696697/* Allocate DMAable space */698ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,699GFP_KERNEL);700if (!ucode->va)701return -ENOMEM;702703memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),704ucode->size);705706/* Byte swap 64-bit */707for (i = 0; i < (ucode->size / 8); i++)708cpu_to_be64s(&((u64 *)ucode->va)[i]);709/* Ucode needs 16-bit swap */710for (i = 0; i < (ucode->size / 2); i++)711cpu_to_be16s(&((u16 *)ucode->va)[i]);712return 0;713}714715static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,716void *obj)717{718int ret;719720/* Point microcode to each core of the group */721ret = cpt_set_ucode_base(eng_grp, obj);722if (ret)723return ret;724725/* Attach the cores to the group and enable them */726ret = cpt_attach_and_enable_cores(eng_grp, obj);727728return ret;729}730731static int disable_eng_grp(struct device *dev,732struct otx2_cpt_eng_grp_info *eng_grp,733void *obj)734{735int i, ret;736737/* Disable all engines used by this group */738ret = cpt_detach_and_disable_cores(eng_grp, obj);739if (ret)740return ret;741742/* Unload ucode used by this engine group */743ucode_unload(dev, &eng_grp->ucode[0]);744ucode_unload(dev, &eng_grp->ucode[1]);745746for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {747if (!eng_grp->engs[i].type)748continue;749750eng_grp->engs[i].ucode = &eng_grp->ucode[0];751}752753/* Clear UCODE_BASE register for each engine used by this group */754ret = cpt_set_ucode_base(eng_grp, obj);755756return ret;757}758759static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,760struct otx2_cpt_eng_grp_info *src_grp)761{762/* Setup fields for engine group which is mirrored */763src_grp->mirror.is_ena = false;764src_grp->mirror.idx = 0;765src_grp->mirror.ref_count++;766767/* Setup fields for mirroring engine group */768dst_grp->mirror.is_ena = true;769dst_grp->mirror.idx = src_grp->idx;770dst_grp->mirror.ref_count = 0;771}772773static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)774{775struct otx2_cpt_eng_grp_info *src_grp;776777if (!dst_grp->mirror.is_ena)778return;779780src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];781782src_grp->mirror.ref_count--;783dst_grp->mirror.is_ena = false;784dst_grp->mirror.idx = 0;785dst_grp->mirror.ref_count = 0;786}787788static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,789struct otx2_cpt_engines *engs, int engs_cnt)790{791struct otx2_cpt_engs_rsvd *mirrored_engs;792int i;793794for (i = 0; i < engs_cnt; i++) {795mirrored_engs = find_engines_by_type(mirror_eng_grp,796engs[i].type);797if (!mirrored_engs)798continue;799800/*801* If mirrored group has this type of engines attached then802* there are 3 scenarios possible:803* 1) mirrored_engs.count == engs[i].count then all engines804* from mirrored engine group will be shared with this engine805* group806* 2) mirrored_engs.count > engs[i].count then only a subset of807* engines from mirrored engine group will be shared with this808* engine group809* 3) mirrored_engs.count < engs[i].count then all engines810* from mirrored engine group will be shared with this group811* and additional engines will be reserved for exclusively use812* by this engine group813*/814engs[i].count -= mirrored_engs->count;815}816}817818static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(819struct otx2_cpt_eng_grp_info *grp)820{821struct otx2_cpt_eng_grps *eng_grps = grp->g;822int i;823824for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {825if (!eng_grps->grp[i].is_enabled)826continue;827if (eng_grps->grp[i].ucode[0].type &&828eng_grps->grp[i].ucode[1].type)829continue;830if (grp->idx == i)831continue;832if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,833grp->ucode[0].ver_str,834OTX2_CPT_UCODE_VER_STR_SZ))835return &eng_grps->grp[i];836}837838return NULL;839}840841static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(842struct otx2_cpt_eng_grps *eng_grps)843{844int i;845846for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {847if (!eng_grps->grp[i].is_enabled)848return &eng_grps->grp[i];849}850return NULL;851}852853static int eng_grp_update_masks(struct device *dev,854struct otx2_cpt_eng_grp_info *eng_grp)855{856struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;857struct otx2_cpt_bitmap tmp_bmap = { {0} };858int i, j, cnt, max_cnt;859int bit;860861for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {862engs = &eng_grp->engs[i];863if (!engs->type)864continue;865if (engs->count <= 0)866continue;867868switch (engs->type) {869case OTX2_CPT_SE_TYPES:870max_cnt = eng_grp->g->avail.max_se_cnt;871break;872873case OTX2_CPT_IE_TYPES:874max_cnt = eng_grp->g->avail.max_ie_cnt;875break;876877case OTX2_CPT_AE_TYPES:878max_cnt = eng_grp->g->avail.max_ae_cnt;879break;880881default:882dev_err(dev, "Invalid engine type %d\n", engs->type);883return -EINVAL;884}885886cnt = engs->count;887WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);888bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);889for (j = engs->offset; j < engs->offset + max_cnt; j++) {890if (!eng_grp->g->eng_ref_cnt[j]) {891bitmap_set(tmp_bmap.bits, j, 1);892cnt--;893if (!cnt)894break;895}896}897898if (cnt)899return -ENOSPC;900901bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);902}903904if (!eng_grp->mirror.is_ena)905return 0;906907for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {908engs = &eng_grp->engs[i];909if (!engs->type)910continue;911912mirrored_engs = find_engines_by_type(913&eng_grp->g->grp[eng_grp->mirror.idx],914engs->type);915WARN_ON(!mirrored_engs && engs->count <= 0);916if (!mirrored_engs)917continue;918919bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,920eng_grp->g->engs_num);921if (engs->count < 0) {922bit = find_first_bit(mirrored_engs->bmap,923eng_grp->g->engs_num);924bitmap_clear(tmp_bmap.bits, bit, -engs->count);925}926bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,927eng_grp->g->engs_num);928}929return 0;930}931932static int delete_engine_group(struct device *dev,933struct otx2_cpt_eng_grp_info *eng_grp)934{935int ret;936937if (!eng_grp->is_enabled)938return 0;939940if (eng_grp->mirror.ref_count)941return -EINVAL;942943/* Removing engine group mirroring if enabled */944remove_eng_grp_mirroring(eng_grp);945946/* Disable engine group */947ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);948if (ret)949return ret;950951/* Release all engines held by this engine group */952ret = release_engines(dev, eng_grp);953if (ret)954return ret;955956eng_grp->is_enabled = false;957958return 0;959}960961static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)962{963struct otx2_cpt_ucode *ucode;964965if (eng_grp->mirror.is_ena)966ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];967else968ucode = &eng_grp->ucode[0];969WARN_ON(!eng_grp->engs[0].type);970eng_grp->engs[0].ucode = ucode;971972if (eng_grp->engs[1].type) {973if (is_2nd_ucode_used(eng_grp))974eng_grp->engs[1].ucode = &eng_grp->ucode[1];975else976eng_grp->engs[1].ucode = ucode;977}978}979980static int create_engine_group(struct device *dev,981struct otx2_cpt_eng_grps *eng_grps,982struct otx2_cpt_engines *engs, int ucodes_cnt,983void *ucode_data[], int is_print)984{985struct otx2_cpt_eng_grp_info *mirrored_eng_grp;986struct otx2_cpt_eng_grp_info *eng_grp;987struct otx2_cpt_uc_info_t *uc_info;988int i, ret = 0;989990/* Find engine group which is not used */991eng_grp = find_unused_eng_grp(eng_grps);992if (!eng_grp) {993dev_err(dev, "Error all engine groups are being used\n");994return -ENOSPC;995}996/* Load ucode */997for (i = 0; i < ucodes_cnt; i++) {998uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];999eng_grp->ucode[i] = uc_info->ucode;1000ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],1001uc_info->fw->data);1002if (ret)1003goto unload_ucode;1004}10051006/* Check if this group mirrors another existing engine group */1007mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);1008if (mirrored_eng_grp) {1009/* Setup mirroring */1010setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);10111012/*1013* Update count of requested engines because some1014* of them might be shared with mirrored group1015*/1016update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);1017}1018ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);1019if (ret)1020goto unload_ucode;10211022/* Update ucode pointers used by engines */1023update_ucode_ptrs(eng_grp);10241025/* Update engine masks used by this group */1026ret = eng_grp_update_masks(dev, eng_grp);1027if (ret)1028goto release_engs;10291030/* Enable engine group */1031ret = enable_eng_grp(eng_grp, eng_grps->obj);1032if (ret)1033goto release_engs;10341035/*1036* If this engine group mirrors another engine group1037* then we need to unload ucode as we will use ucode1038* from mirrored engine group1039*/1040if (eng_grp->mirror.is_ena)1041ucode_unload(dev, &eng_grp->ucode[0]);10421043eng_grp->is_enabled = true;10441045if (!is_print)1046return 0;10471048if (mirrored_eng_grp)1049dev_info(dev,1050"Engine_group%d: reuse microcode %s from group %d\n",1051eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,1052mirrored_eng_grp->idx);1053else1054dev_info(dev, "Engine_group%d: microcode loaded %s\n",1055eng_grp->idx, eng_grp->ucode[0].ver_str);1056if (is_2nd_ucode_used(eng_grp))1057dev_info(dev, "Engine_group%d: microcode loaded %s\n",1058eng_grp->idx, eng_grp->ucode[1].ver_str);10591060return 0;10611062release_engs:1063release_engines(dev, eng_grp);1064unload_ucode:1065ucode_unload(dev, &eng_grp->ucode[0]);1066ucode_unload(dev, &eng_grp->ucode[1]);1067return ret;1068}10691070static void delete_engine_grps(struct pci_dev *pdev,1071struct otx2_cpt_eng_grps *eng_grps)1072{1073int i;10741075/* First delete all mirroring engine groups */1076for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)1077if (eng_grps->grp[i].mirror.is_ena)1078delete_engine_group(&pdev->dev, &eng_grps->grp[i]);10791080/* Delete remaining engine groups */1081for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)1082delete_engine_group(&pdev->dev, &eng_grps->grp[i]);1083}10841085#define PCI_DEVID_CN10K_RNM 0xA0981086#define RNM_ENTROPY_STATUS 0x810871088static void rnm_to_cpt_errata_fixup(struct device *dev)1089{1090struct pci_dev *pdev;1091void __iomem *base;1092int timeout = 5000;10931094pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);1095if (!pdev)1096return;10971098base = pci_ioremap_bar(pdev, 0);1099if (!base)1100goto put_pdev;11011102while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {1103cpu_relax();1104udelay(1);1105timeout--;1106if (!timeout) {1107dev_warn(dev, "RNM is not producing entropy\n");1108break;1109}1110}11111112iounmap(base);11131114put_pdev:1115pci_dev_put(pdev);1116}11171118int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)1119{11201121int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;1122struct otx2_cpt_eng_grp_info *grp;1123int i;11241125for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {1126grp = &eng_grps->grp[i];1127if (!grp->is_enabled)1128continue;11291130if (eng_type == OTX2_CPT_SE_TYPES) {1131if (eng_grp_has_eng_type(grp, eng_type) &&1132!eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {1133eng_grp_num = i;1134break;1135}1136} else {1137if (eng_grp_has_eng_type(grp, eng_type)) {1138eng_grp_num = i;1139break;1140}1141}1142}1143return eng_grp_num;1144}11451146int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,1147struct otx2_cpt_eng_grps *eng_grps)1148{1149struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };1150struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };1151struct pci_dev *pdev = cptpf->pdev;1152struct fw_info_t fw_info;1153u64 reg_val;1154int ret = 0;11551156mutex_lock(&eng_grps->lock);1157/*1158* We don't create engine groups if it was already1159* made (when user enabled VFs for the first time)1160*/1161if (eng_grps->is_grps_created)1162goto unlock;11631164ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);1165if (ret)1166goto unlock;11671168/*1169* Create engine group with SE engines for kernel1170* crypto functionality (symmetric crypto)1171*/1172uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);1173if (uc_info[0] == NULL) {1174dev_err(&pdev->dev, "Unable to find firmware for SE\n");1175ret = -EINVAL;1176goto release_fw;1177}1178engs[0].type = OTX2_CPT_SE_TYPES;1179engs[0].count = eng_grps->avail.max_se_cnt;11801181ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1182(void **) uc_info, 1);1183if (ret)1184goto release_fw;11851186/*1187* Create engine group with SE+IE engines for IPSec.1188* All SE engines will be shared with engine group 0.1189*/1190uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);1191uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);11921193if (uc_info[1] == NULL) {1194dev_err(&pdev->dev, "Unable to find firmware for IE");1195ret = -EINVAL;1196goto delete_eng_grp;1197}1198engs[0].type = OTX2_CPT_SE_TYPES;1199engs[0].count = eng_grps->avail.max_se_cnt;1200engs[1].type = OTX2_CPT_IE_TYPES;1201engs[1].count = eng_grps->avail.max_ie_cnt;12021203ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,1204(void **) uc_info, 1);1205if (ret)1206goto delete_eng_grp;12071208/*1209* Create engine group with AE engines for asymmetric1210* crypto functionality.1211*/1212uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);1213if (uc_info[0] == NULL) {1214dev_err(&pdev->dev, "Unable to find firmware for AE");1215ret = -EINVAL;1216goto delete_eng_grp;1217}1218engs[0].type = OTX2_CPT_AE_TYPES;1219engs[0].count = eng_grps->avail.max_ae_cnt;12201221ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1222(void **) uc_info, 1);1223if (ret)1224goto delete_eng_grp;12251226eng_grps->is_grps_created = true;12271228cpt_ucode_release_fw(&fw_info);12291230if (is_dev_otx2(pdev))1231goto unlock;12321233/*1234* Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing1235* CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.1236*/1237rnm_to_cpt_errata_fixup(&pdev->dev);12381239otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, ®_val,1240BLKADDR_CPT0);1241/*1242* Configure engine group mask to allow context prefetching1243* for the groups and enable random number request, to enable1244* CPT to request random numbers from RNM.1245*/1246reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);1247otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,1248reg_val, BLKADDR_CPT0);1249/*1250* Set interval to periodically flush dirty data for the next1251* CTX cache entry. Set the interval count to maximum supported1252* value.1253*/1254otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,1255CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);12561257/*1258* Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when1259* CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM1260* encounters a fault/poison, a rare case may result in1261* unpredictable data being delivered to a CPT engine.1262*/1263if (cpt_is_errata_38550_exists(pdev)) {1264otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,1265®_val, BLKADDR_CPT0);1266otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,1267reg_val | BIT_ULL(24), BLKADDR_CPT0);1268}12691270mutex_unlock(&eng_grps->lock);1271return 0;12721273delete_eng_grp:1274delete_engine_grps(pdev, eng_grps);1275release_fw:1276cpt_ucode_release_fw(&fw_info);1277unlock:1278mutex_unlock(&eng_grps->lock);1279return ret;1280}12811282static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,1283int blkaddr)1284{1285int timeout = 10, ret;1286int i, busy;1287u64 reg;12881289/* Disengage the cores from groups */1290for (i = 0; i < total_cores; i++) {1291ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,1292CPT_AF_EXEX_CTL2(i), 0x0,1293blkaddr);1294if (ret)1295return ret;12961297cptpf->eng_grps.eng_ref_cnt[i] = 0;1298}1299ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);1300if (ret)1301return ret;13021303/* Wait for cores to become idle */1304do {1305busy = 0;1306usleep_range(10000, 20000);1307if (timeout-- < 0)1308return -EBUSY;13091310for (i = 0; i < total_cores; i++) {1311ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,1312cptpf->pdev,1313CPT_AF_EXEX_STS(i), ®,1314blkaddr);1315if (ret)1316return ret;13171318if (reg & 0x1) {1319busy = 1;1320break;1321}1322}1323} while (busy);13241325/* Disable the cores */1326for (i = 0; i < total_cores; i++) {1327ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,1328CPT_AF_EXEX_CTL(i), 0x0,1329blkaddr);1330if (ret)1331return ret;1332}1333return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);1334}13351336int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)1337{1338int total_cores, ret;13391340total_cores = cptpf->eng_grps.avail.max_se_cnt +1341cptpf->eng_grps.avail.max_ie_cnt +1342cptpf->eng_grps.avail.max_ae_cnt;13431344if (cptpf->has_cpt1) {1345ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);1346if (ret)1347return ret;1348}1349return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);1350}13511352void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,1353struct otx2_cpt_eng_grps *eng_grps)1354{1355struct otx2_cpt_eng_grp_info *grp;1356int i, j;13571358mutex_lock(&eng_grps->lock);1359delete_engine_grps(pdev, eng_grps);1360/* Release memory */1361for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {1362grp = &eng_grps->grp[i];1363for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {1364kfree(grp->engs[j].bmap);1365grp->engs[j].bmap = NULL;1366}1367}1368mutex_unlock(&eng_grps->lock);1369}13701371int otx2_cpt_init_eng_grps(struct pci_dev *pdev,1372struct otx2_cpt_eng_grps *eng_grps)1373{1374struct otx2_cpt_eng_grp_info *grp;1375int i, j, ret;13761377mutex_init(&eng_grps->lock);1378eng_grps->obj = pci_get_drvdata(pdev);1379eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;1380eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;1381eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;13821383eng_grps->engs_num = eng_grps->avail.max_se_cnt +1384eng_grps->avail.max_ie_cnt +1385eng_grps->avail.max_ae_cnt;1386if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {1387dev_err(&pdev->dev,1388"Number of engines %d > than max supported %d\n",1389eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);1390ret = -EINVAL;1391goto cleanup_eng_grps;1392}13931394for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {1395grp = &eng_grps->grp[i];1396grp->g = eng_grps;1397grp->idx = i;13981399for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {1400grp->engs[j].bmap =1401kcalloc(BITS_TO_LONGS(eng_grps->engs_num),1402sizeof(long), GFP_KERNEL);1403if (!grp->engs[j].bmap) {1404ret = -ENOMEM;1405goto cleanup_eng_grps;1406}1407}1408}1409return 0;14101411cleanup_eng_grps:1412otx2_cpt_cleanup_eng_grps(pdev, eng_grps);1413return ret;1414}14151416static int create_eng_caps_discovery_grps(struct pci_dev *pdev,1417struct otx2_cpt_eng_grps *eng_grps)1418{1419struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };1420struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };1421struct fw_info_t fw_info;1422int ret;14231424mutex_lock(&eng_grps->lock);1425ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);1426if (ret) {1427mutex_unlock(&eng_grps->lock);1428return ret;1429}14301431uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);1432if (uc_info[0] == NULL) {1433dev_err(&pdev->dev, "Unable to find firmware for AE\n");1434ret = -EINVAL;1435goto release_fw;1436}1437engs[0].type = OTX2_CPT_AE_TYPES;1438engs[0].count = 2;14391440ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1441(void **) uc_info, 0);1442if (ret)1443goto release_fw;14441445uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);1446if (uc_info[0] == NULL) {1447dev_err(&pdev->dev, "Unable to find firmware for SE\n");1448ret = -EINVAL;1449goto delete_eng_grp;1450}1451engs[0].type = OTX2_CPT_SE_TYPES;1452engs[0].count = 2;14531454ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1455(void **) uc_info, 0);1456if (ret)1457goto delete_eng_grp;14581459uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);1460if (uc_info[0] == NULL) {1461dev_err(&pdev->dev, "Unable to find firmware for IE\n");1462ret = -EINVAL;1463goto delete_eng_grp;1464}1465engs[0].type = OTX2_CPT_IE_TYPES;1466engs[0].count = 2;14671468ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,1469(void **) uc_info, 0);1470if (ret)1471goto delete_eng_grp;14721473cpt_ucode_release_fw(&fw_info);1474mutex_unlock(&eng_grps->lock);1475return 0;14761477delete_eng_grp:1478delete_engine_grps(pdev, eng_grps);1479release_fw:1480cpt_ucode_release_fw(&fw_info);1481mutex_unlock(&eng_grps->lock);1482return ret;1483}14841485/*1486* Get CPT HW capabilities using LOAD_FVC operation.1487*/1488int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)1489{1490struct otx2_cptlfs_info *lfs = &cptpf->lfs;1491struct otx2_cpt_iq_command iq_cmd;1492union otx2_cpt_opcode opcode;1493union otx2_cpt_res_s *result;1494union otx2_cpt_inst_s inst;1495dma_addr_t result_baddr;1496dma_addr_t rptr_baddr;1497struct pci_dev *pdev;1498int timeout = 10000;1499void *base, *rptr;1500int ret, etype;1501u32 len;15021503/*1504* We don't get capabilities if it was already done1505* (when user enabled VFs for the first time)1506*/1507if (cptpf->is_eng_caps_discovered)1508return 0;15091510pdev = cptpf->pdev;1511/*1512* Create engine groups for each type to submit LOAD_FVC op and1513* get engine's capabilities.1514*/1515ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);1516if (ret)1517goto delete_grps;15181519ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,1520OTX2_CPT_QUEUE_HI_PRIO, 1);1521if (ret)1522goto delete_grps;15231524/* Allocate extra memory for "rptr" and "result" pointer alignment */1525len = LOADFVC_RLEN + ARCH_DMA_MINALIGN +1526sizeof(union otx2_cpt_res_s) + OTX2_CPT_RES_ADDR_ALIGN;15271528base = kzalloc(len, GFP_KERNEL);1529if (!base) {1530ret = -ENOMEM;1531goto lf_cleanup;1532}15331534rptr = PTR_ALIGN(base, ARCH_DMA_MINALIGN);1535rptr_baddr = dma_map_single(&pdev->dev, rptr, len, DMA_BIDIRECTIONAL);1536if (dma_mapping_error(&pdev->dev, rptr_baddr)) {1537dev_err(&pdev->dev, "DMA mapping failed\n");1538ret = -EFAULT;1539goto free_rptr;1540}15411542result = (union otx2_cpt_res_s *)PTR_ALIGN(rptr + LOADFVC_RLEN,1543OTX2_CPT_RES_ADDR_ALIGN);1544result_baddr = ALIGN(rptr_baddr + LOADFVC_RLEN,1545OTX2_CPT_RES_ADDR_ALIGN);15461547/* Fill in the command */1548opcode.s.major = LOADFVC_MAJOR_OP;1549opcode.s.minor = LOADFVC_MINOR_OP;15501551iq_cmd.cmd.u = 0;1552iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);15531554/* 64-bit swap for microcode data reads, not needed for addresses */1555cpu_to_be64s(&iq_cmd.cmd.u);1556iq_cmd.dptr = 0;1557iq_cmd.rptr = rptr_baddr;1558iq_cmd.cptr.u = 0;15591560for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {1561result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;1562iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,1563etype);1564otx2_cpt_fill_inst(&inst, &iq_cmd, result_baddr);1565lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);1566timeout = 10000;15671568while (lfs->ops->cpt_get_compcode(result) ==1569OTX2_CPT_COMPLETION_CODE_INIT) {1570cpu_relax();1571udelay(1);1572timeout--;1573if (!timeout) {1574ret = -ENODEV;1575cptpf->is_eng_caps_discovered = false;1576dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n");1577goto error_no_response;1578}1579}15801581cptpf->eng_caps[etype].u = be64_to_cpup(rptr);1582}1583cptpf->is_eng_caps_discovered = true;15841585error_no_response:1586dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);1587free_rptr:1588kfree(base);1589lf_cleanup:1590otx2_cptlf_shutdown(lfs);1591delete_grps:1592delete_engine_grps(pdev, &cptpf->eng_grps);15931594return ret;1595}15961597int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,1598struct devlink_param_gset_ctx *ctx)1599{1600struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };1601struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};1602struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;1603char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];1604char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };1605struct device *dev = &cptpf->pdev->dev;1606char *start, *val, *err_msg, *tmp;1607int grp_idx = 0, ret = -EINVAL;1608bool has_se, has_ie, has_ae;1609struct fw_info_t fw_info;1610int ucode_idx = 0;16111612if (!eng_grps->is_grps_created) {1613dev_err(dev, "Not allowed before creating the default groups\n");1614return -EINVAL;1615}1616err_msg = "Invalid engine group format";1617strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);1618start = tmp_buf;16191620has_se = has_ie = has_ae = false;16211622for (;;) {1623val = strsep(&start, ";");1624if (!val)1625break;1626val = strim(val);1627if (!*val)1628continue;16291630if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {1631if (has_se || ucode_idx)1632goto err_print;1633tmp = strsep(&val, ":");1634if (!tmp)1635goto err_print;1636tmp = strim(tmp);1637if (!val)1638goto err_print;1639if (strlen(tmp) != 2)1640goto err_print;1641if (kstrtoint(strim(val), 10, &engs[grp_idx].count))1642goto err_print;1643engs[grp_idx++].type = OTX2_CPT_SE_TYPES;1644has_se = true;1645} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {1646if (has_ae || ucode_idx)1647goto err_print;1648tmp = strsep(&val, ":");1649if (!tmp)1650goto err_print;1651tmp = strim(tmp);1652if (!val)1653goto err_print;1654if (strlen(tmp) != 2)1655goto err_print;1656if (kstrtoint(strim(val), 10, &engs[grp_idx].count))1657goto err_print;1658engs[grp_idx++].type = OTX2_CPT_AE_TYPES;1659has_ae = true;1660} else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {1661if (has_ie || ucode_idx)1662goto err_print;1663tmp = strsep(&val, ":");1664if (!tmp)1665goto err_print;1666tmp = strim(tmp);1667if (!val)1668goto err_print;1669if (strlen(tmp) != 2)1670goto err_print;1671if (kstrtoint(strim(val), 10, &engs[grp_idx].count))1672goto err_print;1673engs[grp_idx++].type = OTX2_CPT_IE_TYPES;1674has_ie = true;1675} else {1676if (ucode_idx > 1)1677goto err_print;1678if (!strlen(val))1679goto err_print;1680if (strnstr(val, " ", strlen(val)))1681goto err_print;1682ucode_filename[ucode_idx++] = val;1683}1684}16851686/* Validate input parameters */1687if (!(grp_idx && ucode_idx))1688goto err_print;16891690if (ucode_idx > 1 && grp_idx < 2)1691goto err_print;16921693if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {1694err_msg = "Error max 2 engine types can be attached";1695goto err_print;1696}16971698if (grp_idx > 1) {1699if ((engs[0].type + engs[1].type) !=1700(OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {1701err_msg = "Only combination of SE+IE engines is allowed";1702goto err_print;1703}1704/* Keep SE engines at zero index */1705if (engs[1].type == OTX2_CPT_SE_TYPES)1706swap(engs[0], engs[1]);1707}1708mutex_lock(&eng_grps->lock);17091710if (cptpf->enabled_vfs) {1711dev_err(dev, "Disable VFs before modifying engine groups\n");1712ret = -EACCES;1713goto err_unlock;1714}1715INIT_LIST_HEAD(&fw_info.ucodes);17161717ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid);1718if (ret) {1719dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);1720goto err_unlock;1721}1722if (ucode_idx > 1) {1723ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid);1724if (ret) {1725dev_err(dev, "Unable to load firmware %s\n",1726ucode_filename[1]);1727goto release_fw;1728}1729}1730uc_info[0] = get_ucode(&fw_info, engs[0].type);1731if (uc_info[0] == NULL) {1732dev_err(dev, "Unable to find firmware for %s\n",1733get_eng_type_str(engs[0].type));1734ret = -EINVAL;1735goto release_fw;1736}1737if (ucode_idx > 1) {1738uc_info[1] = get_ucode(&fw_info, engs[1].type);1739if (uc_info[1] == NULL) {1740dev_err(dev, "Unable to find firmware for %s\n",1741get_eng_type_str(engs[1].type));1742ret = -EINVAL;1743goto release_fw;1744}1745}1746ret = create_engine_group(dev, eng_grps, engs, grp_idx,1747(void **)uc_info, 1);17481749release_fw:1750cpt_ucode_release_fw(&fw_info);1751err_unlock:1752mutex_unlock(&eng_grps->lock);1753return ret;1754err_print:1755dev_err(dev, "%s\n", err_msg);1756return ret;1757}17581759int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,1760struct devlink_param_gset_ctx *ctx)1761{1762struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;1763struct device *dev = &cptpf->pdev->dev;1764char *tmp, *err_msg;1765int egrp;1766int ret;17671768err_msg = "Invalid input string format(ex: egrp:0)";1769if (strncasecmp(ctx->val.vstr, "egrp", 4))1770goto err_print;1771tmp = ctx->val.vstr;1772strsep(&tmp, ":");1773if (!tmp)1774goto err_print;1775if (kstrtoint(tmp, 10, &egrp))1776goto err_print;17771778if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {1779dev_err(dev, "Invalid engine group %d", egrp);1780return -EINVAL;1781}1782if (!eng_grps->grp[egrp].is_enabled) {1783dev_err(dev, "Error engine_group%d is not configured", egrp);1784return -EINVAL;1785}1786mutex_lock(&eng_grps->lock);1787ret = delete_engine_group(dev, &eng_grps->grp[egrp]);1788mutex_unlock(&eng_grps->lock);17891790return ret;17911792err_print:1793dev_err(dev, "%s\n", err_msg);1794return -EINVAL;1795}179617971798