Path: blob/master/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
26285 views
// SPDX-License-Identifier: GPL-2.0-only1/*2* Copyright (C) 2016 Cavium, Inc.3*/45#include "cptvf.h"6#include "cptvf_algs.h"7#include "request_manager.h"89/**10* get_free_pending_entry - get free entry from pending queue11* @q: pending queue12* @qlen: queue length13*/14static struct pending_entry *get_free_pending_entry(struct pending_queue *q,15int qlen)16{17struct pending_entry *ent = NULL;1819ent = &q->head[q->rear];20if (unlikely(ent->busy)) {21ent = NULL;22goto no_free_entry;23}2425q->rear++;26if (unlikely(q->rear == qlen))27q->rear = 0;2829no_free_entry:30return ent;31}3233static inline void pending_queue_inc_front(struct pending_qinfo *pqinfo,34int qno)35{36struct pending_queue *queue = &pqinfo->queue[qno];3738queue->front++;39if (unlikely(queue->front == pqinfo->qlen))40queue->front = 0;41}4243static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list,44int buf_count, u8 *buffer)45{46int ret = 0, i, j;47int components;48struct sglist_component *sg_ptr = NULL;49struct pci_dev *pdev = cptvf->pdev;5051if (unlikely(!list)) {52dev_err(&pdev->dev, "Input List pointer is NULL\n");53return -EFAULT;54}5556for (i = 0; i < buf_count; i++) {57if (likely(list[i].vptr)) {58list[i].dma_addr = dma_map_single(&pdev->dev,59list[i].vptr,60list[i].size,61DMA_BIDIRECTIONAL);62if (unlikely(dma_mapping_error(&pdev->dev,63list[i].dma_addr))) {64dev_err(&pdev->dev, "DMA map kernel buffer failed for component: %d\n",65i);66ret = -EIO;67goto sg_cleanup;68}69}70}7172components = buf_count / 4;73sg_ptr = (struct sglist_component *)buffer;74for (i = 0; i < components; i++) {75sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);76sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);77sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);78sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size);79sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);80sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);81sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);82sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);83sg_ptr++;84}8586components = buf_count % 4;8788switch (components) {89case 3:90sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);91sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);92fallthrough;93case 2:94sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);95sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);96fallthrough;97case 1:98sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);99sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);100break;101default:102break;103}104105return ret;106107sg_cleanup:108for (j = 0; j < i; j++) {109if (list[j].dma_addr) {110dma_unmap_single(&pdev->dev, list[i].dma_addr,111list[i].size, DMA_BIDIRECTIONAL);112}113114list[j].dma_addr = 0;115}116117return ret;118}119120static inline int setup_sgio_list(struct cpt_vf *cptvf,121struct cpt_info_buffer *info,122struct cpt_request_info *req)123{124u16 g_sz_bytes = 0, s_sz_bytes = 0;125int ret = 0;126struct pci_dev *pdev = cptvf->pdev;127128if (req->incnt > MAX_SG_IN_CNT || req->outcnt > MAX_SG_OUT_CNT) {129dev_err(&pdev->dev, "Request SG components are higher than supported\n");130ret = -EINVAL;131goto scatter_gather_clean;132}133134/* Setup gather (input) components */135g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);136info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);137if (!info->gather_components) {138ret = -ENOMEM;139goto scatter_gather_clean;140}141142ret = setup_sgio_components(cptvf, req->in,143req->incnt,144info->gather_components);145if (ret) {146dev_err(&pdev->dev, "Failed to setup gather list\n");147ret = -EFAULT;148goto scatter_gather_clean;149}150151/* Setup scatter (output) components */152s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);153info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);154if (!info->scatter_components) {155ret = -ENOMEM;156goto scatter_gather_clean;157}158159ret = setup_sgio_components(cptvf, req->out,160req->outcnt,161info->scatter_components);162if (ret) {163dev_err(&pdev->dev, "Failed to setup gather list\n");164ret = -EFAULT;165goto scatter_gather_clean;166}167168/* Create and initialize DPTR */169info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;170info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);171if (!info->in_buffer) {172ret = -ENOMEM;173goto scatter_gather_clean;174}175176((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt);177((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt);178((__be16 *)info->in_buffer)[2] = 0;179((__be16 *)info->in_buffer)[3] = 0;180181memcpy(&info->in_buffer[8], info->gather_components,182g_sz_bytes);183memcpy(&info->in_buffer[8 + g_sz_bytes],184info->scatter_components, s_sz_bytes);185186info->dptr_baddr = dma_map_single(&pdev->dev,187(void *)info->in_buffer,188info->dlen,189DMA_BIDIRECTIONAL);190if (dma_mapping_error(&pdev->dev, info->dptr_baddr)) {191dev_err(&pdev->dev, "Mapping DPTR Failed %d\n", info->dlen);192ret = -EIO;193goto scatter_gather_clean;194}195196/* Create and initialize RPTR */197info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);198if (!info->out_buffer) {199ret = -ENOMEM;200goto scatter_gather_clean;201}202203*((u64 *)info->out_buffer) = ~((u64)COMPLETION_CODE_INIT);204info->alternate_caddr = (u64 *)info->out_buffer;205info->rptr_baddr = dma_map_single(&pdev->dev,206(void *)info->out_buffer,207COMPLETION_CODE_SIZE,208DMA_BIDIRECTIONAL);209if (dma_mapping_error(&pdev->dev, info->rptr_baddr)) {210dev_err(&pdev->dev, "Mapping RPTR Failed %d\n",211COMPLETION_CODE_SIZE);212ret = -EIO;213goto scatter_gather_clean;214}215216return 0;217218scatter_gather_clean:219return ret;220}221222static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,223u32 qno)224{225struct pci_dev *pdev = cptvf->pdev;226struct command_qinfo *qinfo = NULL;227struct command_queue *queue;228struct command_chunk *chunk;229u8 *ent;230int ret = 0;231232if (unlikely(qno >= cptvf->nr_queues)) {233dev_err(&pdev->dev, "Invalid queue (qno: %d, nr_queues: %d)\n",234qno, cptvf->nr_queues);235return -EINVAL;236}237238qinfo = &cptvf->cqinfo;239queue = &qinfo->queue[qno];240/* lock command queue */241spin_lock(&queue->lock);242ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];243memcpy(ent, (void *)cmd, qinfo->cmd_size);244245if (++queue->idx >= queue->qhead->size / 64) {246hlist_for_each_entry(chunk, &queue->chead, nextchunk) {247if (chunk == queue->qhead) {248continue;249} else {250queue->qhead = chunk;251break;252}253}254queue->idx = 0;255}256/* make sure all memory stores are done before ringing doorbell */257smp_wmb();258cptvf_write_vq_doorbell(cptvf, 1);259/* unlock command queue */260spin_unlock(&queue->lock);261262return ret;263}264265static void do_request_cleanup(struct cpt_vf *cptvf,266struct cpt_info_buffer *info)267{268int i;269struct pci_dev *pdev = cptvf->pdev;270struct cpt_request_info *req;271272if (info->dptr_baddr)273dma_unmap_single(&pdev->dev, info->dptr_baddr,274info->dlen, DMA_BIDIRECTIONAL);275276if (info->rptr_baddr)277dma_unmap_single(&pdev->dev, info->rptr_baddr,278COMPLETION_CODE_SIZE, DMA_BIDIRECTIONAL);279280if (info->comp_baddr)281dma_unmap_single(&pdev->dev, info->comp_baddr,282sizeof(union cpt_res_s), DMA_BIDIRECTIONAL);283284if (info->req) {285req = info->req;286for (i = 0; i < req->outcnt; i++) {287if (req->out[i].dma_addr)288dma_unmap_single(&pdev->dev,289req->out[i].dma_addr,290req->out[i].size,291DMA_BIDIRECTIONAL);292}293294for (i = 0; i < req->incnt; i++) {295if (req->in[i].dma_addr)296dma_unmap_single(&pdev->dev,297req->in[i].dma_addr,298req->in[i].size,299DMA_BIDIRECTIONAL);300}301}302303kfree_sensitive(info->scatter_components);304kfree_sensitive(info->gather_components);305kfree_sensitive(info->out_buffer);306kfree_sensitive(info->in_buffer);307kfree_sensitive((void *)info->completion_addr);308kfree_sensitive(info);309}310311static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)312{313struct pci_dev *pdev = cptvf->pdev;314315if (!info) {316dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n");317return;318}319320do_request_cleanup(cptvf, info);321}322323static inline void process_pending_queue(struct cpt_vf *cptvf,324struct pending_qinfo *pqinfo,325int qno)326{327struct pci_dev *pdev = cptvf->pdev;328struct pending_queue *pqueue = &pqinfo->queue[qno];329struct pending_entry *pentry = NULL;330struct cpt_info_buffer *info = NULL;331union cpt_res_s *status = NULL;332unsigned char ccode;333334while (1) {335spin_lock_bh(&pqueue->lock);336pentry = &pqueue->head[pqueue->front];337if (unlikely(!pentry->busy)) {338spin_unlock_bh(&pqueue->lock);339break;340}341342info = (struct cpt_info_buffer *)pentry->post_arg;343if (unlikely(!info)) {344dev_err(&pdev->dev, "Pending Entry post arg NULL\n");345pending_queue_inc_front(pqinfo, qno);346spin_unlock_bh(&pqueue->lock);347continue;348}349350status = (union cpt_res_s *)pentry->completion_addr;351ccode = status->s.compcode;352if ((status->s.compcode == CPT_COMP_E_FAULT) ||353(status->s.compcode == CPT_COMP_E_SWERR)) {354dev_err(&pdev->dev, "Request failed with %s\n",355(status->s.compcode == CPT_COMP_E_FAULT) ?356"DMA Fault" : "Software error");357pentry->completion_addr = NULL;358pentry->busy = false;359atomic64_dec((&pqueue->pending_count));360pentry->post_arg = NULL;361pending_queue_inc_front(pqinfo, qno);362do_request_cleanup(cptvf, info);363spin_unlock_bh(&pqueue->lock);364break;365} else if (status->s.compcode == COMPLETION_CODE_INIT) {366/* check for timeout */367if (time_after_eq(jiffies,368(info->time_in +369(CPT_COMMAND_TIMEOUT * HZ)))) {370dev_err(&pdev->dev, "Request timed out");371pentry->completion_addr = NULL;372pentry->busy = false;373atomic64_dec((&pqueue->pending_count));374pentry->post_arg = NULL;375pending_queue_inc_front(pqinfo, qno);376do_request_cleanup(cptvf, info);377spin_unlock_bh(&pqueue->lock);378break;379} else if ((*info->alternate_caddr ==380(~COMPLETION_CODE_INIT)) &&381(info->extra_time < TIME_IN_RESET_COUNT)) {382info->time_in = jiffies;383info->extra_time++;384spin_unlock_bh(&pqueue->lock);385break;386}387}388389pentry->completion_addr = NULL;390pentry->busy = false;391pentry->post_arg = NULL;392atomic64_dec((&pqueue->pending_count));393pending_queue_inc_front(pqinfo, qno);394spin_unlock_bh(&pqueue->lock);395396do_post_process(info->cptvf, info);397/*398* Calling callback after we find399* that the request has been serviced400*/401pentry->callback(ccode, pentry->callback_arg);402}403}404405int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)406{407int ret = 0, clear = 0, queue = 0;408struct cpt_info_buffer *info = NULL;409struct cptvf_request *cpt_req = NULL;410union ctrl_info *ctrl = NULL;411union cpt_res_s *result = NULL;412struct pending_entry *pentry = NULL;413struct pending_queue *pqueue = NULL;414struct pci_dev *pdev = cptvf->pdev;415u8 group = 0;416struct cpt_vq_command vq_cmd;417union cpt_inst_s cptinst;418419info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);420if (unlikely(!info)) {421dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");422return -ENOMEM;423}424425cpt_req = (struct cptvf_request *)&req->req;426ctrl = (union ctrl_info *)&req->ctrl;427428info->cptvf = cptvf;429group = ctrl->s.grp;430ret = setup_sgio_list(cptvf, info, req);431if (ret) {432dev_err(&pdev->dev, "Setting up SG list failed");433goto request_cleanup;434}435436cpt_req->dlen = info->dlen;437/*438* Get buffer for union cpt_res_s response439* structure and its physical address440*/441info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);442if (unlikely(!info->completion_addr)) {443dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");444ret = -ENOMEM;445goto request_cleanup;446}447448result = (union cpt_res_s *)info->completion_addr;449result->s.compcode = COMPLETION_CODE_INIT;450info->comp_baddr = dma_map_single(&pdev->dev,451(void *)info->completion_addr,452sizeof(union cpt_res_s),453DMA_BIDIRECTIONAL);454if (dma_mapping_error(&pdev->dev, info->comp_baddr)) {455dev_err(&pdev->dev, "mapping compptr Failed %lu\n",456sizeof(union cpt_res_s));457ret = -EFAULT;458goto request_cleanup;459}460461/* Fill the VQ command */462vq_cmd.cmd.u64 = 0;463vq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);464vq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);465vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);466vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);467468vq_cmd.dptr = info->dptr_baddr;469vq_cmd.rptr = info->rptr_baddr;470vq_cmd.cptr.u64 = 0;471vq_cmd.cptr.s.grp = group;472/* Get Pending Entry to submit command */473/* Always queue 0, because 1 queue per VF */474queue = 0;475pqueue = &cptvf->pqinfo.queue[queue];476477if (atomic64_read(&pqueue->pending_count) > PENDING_THOLD) {478dev_err(&pdev->dev, "pending threshold reached\n");479process_pending_queue(cptvf, &cptvf->pqinfo, queue);480}481482get_pending_entry:483spin_lock_bh(&pqueue->lock);484pentry = get_free_pending_entry(pqueue, cptvf->pqinfo.qlen);485if (unlikely(!pentry)) {486spin_unlock_bh(&pqueue->lock);487if (clear == 0) {488process_pending_queue(cptvf, &cptvf->pqinfo, queue);489clear = 1;490goto get_pending_entry;491}492dev_err(&pdev->dev, "Get free entry failed\n");493dev_err(&pdev->dev, "queue: %d, rear: %d, front: %d\n",494queue, pqueue->rear, pqueue->front);495ret = -EFAULT;496goto request_cleanup;497}498499pentry->completion_addr = info->completion_addr;500pentry->post_arg = (void *)info;501pentry->callback = req->callback;502pentry->callback_arg = req->callback_arg;503info->pentry = pentry;504pentry->busy = true;505atomic64_inc(&pqueue->pending_count);506507/* Send CPT command */508info->pentry = pentry;509info->time_in = jiffies;510info->req = req;511512/* Create the CPT_INST_S type command for HW interpretation */513cptinst.s.doneint = true;514cptinst.s.res_addr = (u64)info->comp_baddr;515cptinst.s.tag = 0;516cptinst.s.grp = 0;517cptinst.s.wq_ptr = 0;518cptinst.s.ei0 = vq_cmd.cmd.u64;519cptinst.s.ei1 = vq_cmd.dptr;520cptinst.s.ei2 = vq_cmd.rptr;521cptinst.s.ei3 = vq_cmd.cptr.u64;522523ret = send_cpt_command(cptvf, &cptinst, queue);524spin_unlock_bh(&pqueue->lock);525if (unlikely(ret)) {526dev_err(&pdev->dev, "Send command failed for AE\n");527ret = -EFAULT;528goto request_cleanup;529}530531return 0;532533request_cleanup:534dev_dbg(&pdev->dev, "Failed to submit CPT command\n");535do_request_cleanup(cptvf, info);536537return ret;538}539540void vq_post_process(struct cpt_vf *cptvf, u32 qno)541{542struct pci_dev *pdev = cptvf->pdev;543544if (unlikely(qno > cptvf->nr_queues)) {545dev_err(&pdev->dev, "Request for post processing on invalid pending queue: %u\n",546qno);547return;548}549550process_pending_queue(cptvf, &cptvf->pqinfo, qno);551}552553int cptvf_do_request(void *vfdev, struct cpt_request_info *req)554{555struct cpt_vf *cptvf = (struct cpt_vf *)vfdev;556struct pci_dev *pdev = cptvf->pdev;557558if (!cpt_device_ready(cptvf)) {559dev_err(&pdev->dev, "CPT Device is not ready");560return -ENODEV;561}562563if ((cptvf->vftype == SE_TYPES) && (!req->ctrl.s.se_req)) {564dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",565cptvf->vfid);566return -EINVAL;567} else if ((cptvf->vftype == AE_TYPES) && (req->ctrl.s.se_req)) {568dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",569cptvf->vfid);570return -EINVAL;571}572573return process_request(cptvf, req);574}575576577