Path: blob/main/sys/dev/bnxt/bnxt_re/qplib_rcfw.c
107436 views
/*1* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term2* Broadcom refers to Broadcom Limited and/or its subsidiaries.3*4* Redistribution and use in source and binary forms, with or without5* modification, are permitted provided that the following conditions6* are met:7*8* 1. Redistributions of source code must retain the above copyright9* notice, this list of conditions and the following disclaimer.10* 2. Redistributions in binary form must reproduce the above copyright11* notice, this list of conditions and the following disclaimer in12* the documentation and/or other materials provided with the13* distribution.14*15* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''16* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,17* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR18* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS19* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR20* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF21* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR22* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,23* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE24* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN25* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.26*27* Description: RDMA Controller HW interface28*/2930#include <linux/interrupt.h>31#include <linux/spinlock.h>32#include <linux/dma-mapping.h>33#include <linux/sched.h>34#include <linux/pci.h>35#include <linux/delay.h>36#include <linux/hardirq.h>37#include <linux/device.h>3839#include "hsi_struct_def.h"40#include "qplib_tlv.h"41#include "qplib_res.h"42#include "qplib_sp.h"43#include "qplib_rcfw.h"44#include "bnxt_re.h"4546static void bnxt_qplib_service_creq(unsigned long data);4748int __check_cmdq_stall(struct bnxt_qplib_rcfw *rcfw,49u32 *cur_prod, u32 *cur_cons)50{51struct bnxt_qplib_cmdq_ctx *cmdq;5253cmdq = &rcfw->cmdq;5455if (*cur_prod == cmdq->hwq.prod &&56*cur_cons == cmdq->hwq.cons)57/* No activity on CMDQ or CREQ. FW down */58return -ETIMEDOUT;5960*cur_prod = cmdq->hwq.prod;61*cur_cons = cmdq->hwq.cons;62return 0;63}6465static int bnxt_qplib_map_rc(u8 opcode)66{67switch (opcode) {68case CMDQ_BASE_OPCODE_DESTROY_QP:69case CMDQ_BASE_OPCODE_DESTROY_SRQ:70case CMDQ_BASE_OPCODE_DESTROY_CQ:71case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:72case CMDQ_BASE_OPCODE_DEREGISTER_MR:73case CMDQ_BASE_OPCODE_DELETE_GID:74case CMDQ_BASE_OPCODE_DESTROY_QP1:75case CMDQ_BASE_OPCODE_DESTROY_AH:76case CMDQ_BASE_OPCODE_DEINITIALIZE_FW:77case CMDQ_BASE_OPCODE_MODIFY_ROCE_CC:78case CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE:79return 0;80default:81return -ETIMEDOUT;82}83}8485/**86* bnxt_re_is_fw_stalled - Check firmware health87* @rcfw - rcfw channel instance of rdev88* @cookie - cookie to track the command89*90* If firmware has not responded any rcfw command within91* rcfw->max_timeout, consider firmware as stalled.92*93* Returns:94* 0 if firmware is responding95* -ENODEV if firmware is not responding96*/97static int bnxt_re_is_fw_stalled(struct bnxt_qplib_rcfw *rcfw, u16 cookie)98{99struct bnxt_qplib_cmdq_ctx *cmdq;100struct bnxt_qplib_crsqe *crsqe;101102crsqe = &rcfw->crsqe_tbl[cookie];103cmdq = &rcfw->cmdq;104105if (time_after(jiffies, cmdq->last_seen +106(rcfw->max_timeout * HZ))) {107dev_warn_ratelimited(&rcfw->pdev->dev,108"%s: FW STALL Detected. cmdq[%#x]=%#x waited (%ld > %d) msec active %d\n",109__func__, cookie, crsqe->opcode,110(long)jiffies_to_msecs(jiffies - cmdq->last_seen),111rcfw->max_timeout * 1000,112crsqe->is_in_used);113return -ENODEV;114}115116return 0;117}118/**119* __wait_for_resp - Don't hold the cpu context and wait for response120* @rcfw - rcfw channel instance of rdev121* @cookie - cookie to track the command122*123* Wait for command completion in sleepable context.124*125* Returns:126* 0 if command is completed by firmware.127* Non zero error code for rest of the case.128*/129static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)130{131struct bnxt_qplib_cmdq_ctx *cmdq;132struct bnxt_qplib_crsqe *crsqe;133unsigned long issue_time;134int ret;135136cmdq = &rcfw->cmdq;137issue_time = jiffies;138crsqe = &rcfw->crsqe_tbl[cookie];139140do {141if (RCFW_NO_FW_ACCESS(rcfw))142return bnxt_qplib_map_rc(crsqe->opcode);143if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))144return -ETIMEDOUT;145146/* Non zero means command completed */147ret = wait_event_timeout(cmdq->waitq,148!crsqe->is_in_used ||149RCFW_NO_FW_ACCESS(rcfw),150msecs_to_jiffies(rcfw->max_timeout * 1000));151152if (!crsqe->is_in_used)153return 0;154/*155* Take care if interrupt miss or other cases like DBR drop156*/157bnxt_qplib_service_creq((unsigned long)rcfw);158dev_warn_ratelimited(&rcfw->pdev->dev,159"Non-Blocking QPLIB: cmdq[%#x]=%#x waited (%lu) msec bit %d\n",160cookie, crsqe->opcode,161(long)jiffies_to_msecs(jiffies - issue_time),162crsqe->is_in_used);163164if (!crsqe->is_in_used)165return 0;166167ret = bnxt_re_is_fw_stalled(rcfw, cookie);168if (ret)169return ret;170171} while (true);172};173174/**175* __block_for_resp - hold the cpu context and wait for response176* @rcfw - rcfw channel instance of rdev177* @cookie - cookie to track the command178*179* This function will hold the cpu (non-sleepable context) and180* wait for command completion. Maximum holding interval is 8 second.181*182* Returns:183* -ETIMEOUT if command is not completed in specific time interval.184* 0 if command is completed by firmware.185*/186static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)187{188struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;189struct bnxt_qplib_crsqe *crsqe;190unsigned long issue_time = 0;191192issue_time = jiffies;193crsqe = &rcfw->crsqe_tbl[cookie];194195do {196if (RCFW_NO_FW_ACCESS(rcfw))197return bnxt_qplib_map_rc(crsqe->opcode);198if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))199return -ETIMEDOUT;200201udelay(1);202203/* Below call is must since there can be a deadlock204* if interrupt is mapped to the same cpu205*/206bnxt_qplib_service_creq((unsigned long)rcfw);207if (!crsqe->is_in_used)208return 0;209210} while (time_before(jiffies, issue_time + (8 * HZ)));211212dev_warn_ratelimited(&rcfw->pdev->dev,213"Blocking QPLIB: cmdq[%#x]=%#x taken (%lu) msec",214cookie, crsqe->opcode,215(long)jiffies_to_msecs(jiffies - issue_time));216217return -ETIMEDOUT;218};219220/* __send_message_no_waiter - get cookie and post the message.221* @rcfw - rcfw channel instance of rdev222* @msg - qplib message internal223*224* This function will just post and don't bother about completion.225* Current design of this function is -226* user must hold the completion queue hwq->lock.227* user must have used existing completion and free the resources.228* this function will not check queue full condition.229* this function will explicitly set is_waiter_alive=false.230* current use case is - send destroy_ah if create_ah is return231* after waiter of create_ah is lost. It can be extended for other232* use case as well.233*234* Returns: Nothing235*236*/237static void __send_message_no_waiter(struct bnxt_qplib_rcfw *rcfw,238struct bnxt_qplib_cmdqmsg *msg)239{240struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;241struct bnxt_qplib_hwq *cmdq_hwq = &cmdq->hwq;242struct bnxt_qplib_crsqe *crsqe;243struct bnxt_qplib_cmdqe *cmdqe;244u32 sw_prod, cmdq_prod, bsize;245u16 cookie;246u8 *preq;247248cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;249__set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));250crsqe = &rcfw->crsqe_tbl[cookie];251252/* Set cmd_size in terms of 16B slots in req. */253bsize = bnxt_qplib_set_cmd_slots(msg->req);254/* GET_CMD_SIZE would return number of slots in either case of tlv255* and non-tlv commands after call to bnxt_qplib_set_cmd_slots()256*/257crsqe->send_timestamp = jiffies;258crsqe->is_internal_cmd = true;259crsqe->is_waiter_alive = false;260crsqe->is_in_used = true;261crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);262263preq = (u8 *)msg->req;264do {265/* Locate the next cmdq slot */266sw_prod = HWQ_CMP(cmdq_hwq->prod, cmdq_hwq);267cmdqe = bnxt_qplib_get_qe(cmdq_hwq, sw_prod, NULL);268/* Copy a segment of the req cmd to the cmdq */269memset(cmdqe, 0, sizeof(*cmdqe));270memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));271preq += min_t(u32, bsize, sizeof(*cmdqe));272bsize -= min_t(u32, bsize, sizeof(*cmdqe));273cmdq_hwq->prod++;274} while (bsize > 0);275cmdq->seq_num++;276277cmdq_prod = cmdq_hwq->prod & 0xFFFF;278atomic_inc(&rcfw->timeout_send);279/* ring CMDQ DB */280wmb();281writel(cmdq_prod, cmdq->cmdq_mbox.prod);282writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);283}284285static int __send_message(struct bnxt_qplib_rcfw *rcfw,286struct bnxt_qplib_cmdqmsg *msg)287{288u32 bsize, free_slots, required_slots;289struct bnxt_qplib_cmdq_ctx *cmdq;290struct bnxt_qplib_crsqe *crsqe;291struct bnxt_qplib_cmdqe *cmdqe;292struct bnxt_qplib_hwq *cmdq_hwq;293u32 sw_prod, cmdq_prod;294struct pci_dev *pdev;295unsigned long flags;296u16 cookie;297u8 opcode;298u8 *preq;299300cmdq = &rcfw->cmdq;301cmdq_hwq = &cmdq->hwq;302pdev = rcfw->pdev;303opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);304305/* Cmdq are in 16-byte units, each request can consume 1 or more306cmdqe */307spin_lock_irqsave(&cmdq_hwq->lock, flags);308required_slots = bnxt_qplib_get_cmd_slots(msg->req);309free_slots = HWQ_FREE_SLOTS(cmdq_hwq);310cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;311crsqe = &rcfw->crsqe_tbl[cookie];312313if (required_slots >= free_slots) {314dev_warn_ratelimited(&pdev->dev,315"QPLIB: RCFW: CMDQ is full req/free %d/%d!\n",316required_slots, free_slots);317rcfw->cmdq_full_dbg++;318spin_unlock_irqrestore(&cmdq_hwq->lock, flags);319return -EAGAIN;320}321322if (crsqe->is_in_used)323panic("QPLIB: Cookie was not requested %d\n",324cookie);325326if (msg->block)327cookie |= RCFW_CMD_IS_BLOCKING;328__set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));329330/* Set cmd_size in terms of 16B slots in req. */331bsize = bnxt_qplib_set_cmd_slots(msg->req);332/* GET_CMD_SIZE would return number of slots in either case of tlv333* and non-tlv commands after call to bnxt_qplib_set_cmd_slots()334*/335crsqe->send_timestamp = jiffies;336crsqe->free_slots = free_slots;337crsqe->resp = (struct creq_qp_event *)msg->resp;338crsqe->resp->cookie = cpu_to_le16(cookie);339crsqe->is_internal_cmd = false;340crsqe->is_waiter_alive = true;341crsqe->is_in_used = true;342crsqe->opcode = opcode;343crsqe->requested_qp_state = msg->qp_state;344345crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);346if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) {347struct bnxt_qplib_rcfw_sbuf *sbuf = msg->sb;348349__set_cmdq_base_resp_addr(msg->req, msg->req_sz,350cpu_to_le64(sbuf->dma_addr));351__set_cmdq_base_resp_size(msg->req, msg->req_sz,352ALIGN(sbuf->size, BNXT_QPLIB_CMDQE_UNITS) /353BNXT_QPLIB_CMDQE_UNITS);354}355356preq = (u8 *)msg->req;357do {358/* Locate the next cmdq slot */359sw_prod = HWQ_CMP(cmdq_hwq->prod, cmdq_hwq);360cmdqe = bnxt_qplib_get_qe(cmdq_hwq, sw_prod, NULL);361/* Copy a segment of the req cmd to the cmdq */362memset(cmdqe, 0, sizeof(*cmdqe));363memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));364preq += min_t(u32, bsize, sizeof(*cmdqe));365bsize -= min_t(u32, bsize, sizeof(*cmdqe));366cmdq_hwq->prod++;367} while (bsize > 0);368cmdq->seq_num++;369370cmdq_prod = cmdq_hwq->prod & 0xFFFF;371if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {372/* The very first doorbell write373* is required to set this flag374* which prompts the FW to reset375* its internal pointers376*/377cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);378clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);379}380/* ring CMDQ DB */381wmb();382writel(cmdq_prod, cmdq->cmdq_mbox.prod);383writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);384385dev_dbg(&pdev->dev, "QPLIB: RCFW sent request with 0x%x 0x%x 0x%x\n",386cmdq_prod, cmdq_hwq->prod, crsqe->req_size);387dev_dbg(&pdev->dev,388"QPLIB: opcode 0x%x with cookie 0x%x at cmdq/crsq 0x%p/0x%p\n",389opcode,390__get_cmdq_base_cookie(msg->req, msg->req_sz),391cmdqe, crsqe);392spin_unlock_irqrestore(&cmdq_hwq->lock, flags);393/* Return the CREQ response pointer */394return 0;395}396397/**398* __poll_for_resp - self poll completion for rcfw command399* @rcfw - rcfw channel instance of rdev400* @cookie - cookie to track the command401*402* It works same as __wait_for_resp except this function will403* do self polling in sort interval since interrupt is disabled.404* This function can not be called from non-sleepable context.405*406* Returns:407* -ETIMEOUT if command is not completed in specific time interval.408* 0 if command is completed by firmware.409*/410static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)411{412struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;413struct bnxt_qplib_crsqe *crsqe;414unsigned long issue_time;415int ret;416417issue_time = jiffies;418crsqe = &rcfw->crsqe_tbl[cookie];419420do {421if (RCFW_NO_FW_ACCESS(rcfw))422return bnxt_qplib_map_rc(crsqe->opcode);423if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))424return -ETIMEDOUT;425426usleep_range(1000, 1001);427428bnxt_qplib_service_creq((unsigned long)rcfw);429if (!crsqe->is_in_used)430return 0;431432if (jiffies_to_msecs(jiffies - issue_time) >433(rcfw->max_timeout * 1000)) {434dev_warn_ratelimited(&rcfw->pdev->dev,435"Self Polling QPLIB: cmdq[%#x]=%#x taken (%lu) msec",436cookie, crsqe->opcode,437(long)jiffies_to_msecs(jiffies - issue_time));438ret = bnxt_re_is_fw_stalled(rcfw, cookie);439if (ret)440return ret;441}442} while (true);443444};445446static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,447struct bnxt_qplib_cmdqmsg *msg, u8 opcode)448{449struct bnxt_qplib_cmdq_ctx *cmdq;450451cmdq = &rcfw->cmdq;452453/* Prevent posting if f/w is not in a state to process */454if (RCFW_NO_FW_ACCESS(rcfw))455return -ENXIO;456457if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))458return -ETIMEDOUT;459460if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&461opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {462dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!\n");463return -EINVAL;464}465466if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&467(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&468opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&469opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {470dev_err(&rcfw->pdev->dev,471"QPLIB: RCFW not initialized, reject opcode 0x%x\n",472opcode);473return -ENOTSUPP;474}475476return 0;477}478479/* This function will just post and do not bother about completion */480static void __destroy_timedout_ah(struct bnxt_qplib_rcfw *rcfw,481struct creq_create_ah_resp *create_ah_resp)482{483struct bnxt_qplib_cmdqmsg msg = {};484struct cmdq_destroy_ah req = {};485486bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_AH,487sizeof(req));488req.ah_cid = create_ah_resp->xid;489msg.req = (struct cmdq_base *)&req;490msg.req_sz = sizeof(req);491__send_message_no_waiter(rcfw, &msg);492dev_warn_ratelimited(&rcfw->pdev->dev,493"From %s: ah_cid = %d timeout_send %d\n", __func__,494req.ah_cid,495atomic_read(&rcfw->timeout_send));496}497498/**499* __bnxt_qplib_rcfw_send_message - qplib interface to send500* and complete rcfw command.501* @rcfw - rcfw channel instance of rdev502* @msg - qplib message internal503*504* This function does not account shadow queue depth. It will send505* all the command unconditionally as long as send queue is not full.506*507* Returns:508* 0 if command completed by firmware.509* Non zero if the command is not completed by firmware.510*/511static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,512struct bnxt_qplib_cmdqmsg *msg)513{514struct bnxt_qplib_crsqe *crsqe;515struct creq_qp_event *event;516unsigned long flags;517u16 cookie;518int rc = 0;519u8 opcode;520521opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);522523rc = __send_message_basic_sanity(rcfw, msg, opcode);524if (rc)525return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;526527rc = __send_message(rcfw, msg);528if (rc)529return rc;530531cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req,532msg->req_sz)) & RCFW_MAX_COOKIE_VALUE;533534535if (msg->block)536rc = __block_for_resp(rcfw, cookie);537else if (atomic_read(&rcfw->rcfw_intr_enabled))538rc = __wait_for_resp(rcfw, cookie);539else540rc = __poll_for_resp(rcfw, cookie);541542if (rc) {543/* First check if it is FW stall.544* Use hwq.lock to avoid race with actual completion.545*/546spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags);547crsqe = &rcfw->crsqe_tbl[cookie];548crsqe->is_waiter_alive = false;549if (rc == -ENODEV)550set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);551spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags);552553return -ETIMEDOUT;554}555556event = (struct creq_qp_event *)msg->resp;557if (event->status) {558/* failed with status */559dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x (%s) status %d\n",560cookie, opcode, GET_OPCODE_TYPE(opcode), event->status);561rc = -EFAULT;562/*563* Workaround to avoid errors in the stack during bond564* creation and deletion.565* Disable error returned for ADD_GID/DEL_GID566*/567if (opcode == CMDQ_BASE_OPCODE_ADD_GID ||568opcode == CMDQ_BASE_OPCODE_DELETE_GID)569rc = 0;570}571572dev_dbg(&pdev->dev, "QPLIB: %s:%d - op 0x%x (%s), cookie 0x%x -- Return: e->status 0x%x, rc = 0x%x\n",573__func__, __LINE__, opcode, GET_OPCODE_TYPE(opcode), cookie, event->status, rc);574return rc;575}576577/**578* bnxt_qplib_rcfw_send_message - qplib interface to send579* and complete rcfw command.580* @rcfw - rcfw channel instance of rdev581* @msg - qplib message internal582*583* Driver interact with Firmware through rcfw channel/slow path in two ways.584* a. Blocking rcfw command send. In this path, driver cannot hold585* the context for longer period since it is holding cpu until586* command is not completed.587* b. Non-blocking rcfw command send. In this path, driver can hold the588* context for longer period. There may be many pending command waiting589* for completion because of non-blocking nature.590*591* Driver will use shadow queue depth. Current queue depth of 8K592* (due to size of rcfw message it can be actual ~4K rcfw outstanding)593* is not optimal for rcfw command processing in firmware.594* RCFW_CMD_NON_BLOCKING_SHADOW_QD is defined as 64.595* Restrict at max 64 Non-Blocking rcfw commands.596* Do not allow more than 64 non-blocking command to the Firmware.597* Allow all blocking commands until there is no queue full.598*599* Returns:600* 0 if command completed by firmware.601* Non zero if the command is not completed by firmware.602*/603int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,604struct bnxt_qplib_cmdqmsg *msg)605{606int ret;607608if (!msg->block) {609down(&rcfw->rcfw_inflight);610ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);611up(&rcfw->rcfw_inflight);612} else {613ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);614}615616return ret;617}618619static void bnxt_re_add_perf_stats(struct bnxt_qplib_rcfw *rcfw,620struct bnxt_qplib_crsqe *crsqe)621{622u32 latency_msec, dest_stats_id;623u64 *dest_stats_ptr = NULL;624625latency_msec = jiffies_to_msecs(rcfw->cmdq.last_seen -626crsqe->send_timestamp);627if (latency_msec/1000 < RCFW_MAX_LATENCY_SEC_SLAB_INDEX)628rcfw->rcfw_lat_slab_sec[latency_msec/1000]++;629630if (!rcfw->sp_perf_stats_enabled)631return;632633if (latency_msec < RCFW_MAX_LATENCY_MSEC_SLAB_INDEX)634rcfw->rcfw_lat_slab_msec[latency_msec]++;635636switch (crsqe->opcode) {637case CMDQ_BASE_OPCODE_CREATE_QP:638dest_stats_id = rcfw->qp_create_stats_id++;639dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;640dest_stats_ptr = &rcfw->qp_create_stats[dest_stats_id];641break;642case CMDQ_BASE_OPCODE_DESTROY_QP:643dest_stats_id = rcfw->qp_destroy_stats_id++;644dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;645dest_stats_ptr = &rcfw->qp_destroy_stats[dest_stats_id];646break;647case CMDQ_BASE_OPCODE_REGISTER_MR:648dest_stats_id = rcfw->mr_create_stats_id++;649dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;650dest_stats_ptr = &rcfw->mr_create_stats[dest_stats_id];651break;652case CMDQ_BASE_OPCODE_DEREGISTER_MR:653case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:654dest_stats_id = rcfw->mr_destroy_stats_id++;655dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;656dest_stats_ptr = &rcfw->mr_destroy_stats[dest_stats_id];657break;658case CMDQ_BASE_OPCODE_MODIFY_QP:659if (crsqe->requested_qp_state != IB_QPS_ERR)660break;661dest_stats_id = rcfw->qp_modify_stats_id++;662dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;663dest_stats_ptr = &rcfw->qp_modify_stats[dest_stats_id];664break;665default:666break;667}668if (dest_stats_ptr)669*dest_stats_ptr = max_t(unsigned long,670(rcfw->cmdq.last_seen - crsqe->send_timestamp), 1);671672}673674/* Completions */675static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,676struct creq_qp_event *event,677u32 *num_wait)678{679struct bnxt_qplib_hwq *cmdq_hwq = &rcfw->cmdq.hwq;680struct creq_cq_error_notification *cqerr;681struct creq_qp_error_notification *qperr;682struct bnxt_qplib_crsqe *crsqe;683struct bnxt_qplib_reftbl *tbl;684struct bnxt_qplib_qp *qp;685struct bnxt_qplib_cq *cq;686u16 cookie, blocked = 0;687struct pci_dev *pdev;688bool is_waiter_alive;689unsigned long flags;690u32 wait_cmds = 0;691u32 xid, qp_idx;692u32 req_size;693int rc = 0;694695pdev = rcfw->pdev;696switch (event->event) {697case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:698tbl = &rcfw->res->reftbl.qpref;699qperr = (struct creq_qp_error_notification *)event;700xid = le32_to_cpu(qperr->xid);701qp_idx = map_qp_id_to_tbl_indx(xid, tbl);702spin_lock(&tbl->lock);703qp = tbl->rec[qp_idx].handle;704if (!qp) {705spin_unlock(&tbl->lock);706break;707}708bnxt_qplib_mark_qp_error(qp);709rc = rcfw->creq.aeq_handler(rcfw, event, qp);710spin_unlock(&tbl->lock);711/*712* Keeping these prints as debug to avoid flooding of log713* messages during modify QP to error state by applications714*/715dev_dbg(&pdev->dev, "QPLIB: QP Error encountered!\n");716dev_dbg(&pdev->dev,717"QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",718xid, qperr->req_err_state_reason,719qperr->res_err_state_reason);720break;721case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION:722tbl = &rcfw->res->reftbl.cqref;723cqerr = (struct creq_cq_error_notification *)event;724xid = le32_to_cpu(cqerr->xid);725spin_lock(&tbl->lock);726cq = tbl->rec[GET_TBL_INDEX(xid, tbl)].handle;727if (!cq) {728spin_unlock(&tbl->lock);729break;730}731rc = rcfw->creq.aeq_handler(rcfw, event, cq);732spin_unlock(&tbl->lock);733dev_dbg(&pdev->dev, "QPLIB: CQ error encountered!\n");734break;735default:736/*737* Command Response738* cmdq hwq lock needs to be acquired to synchronize739* the command send and completion reaping. This function740* is always called with creq hwq lock held. So there is no741* chance of deadlock here as the locking is in correct sequence.742* Using the nested variant of spin_lock to annotate743*/744spin_lock_irqsave_nested(&cmdq_hwq->lock, flags,745SINGLE_DEPTH_NESTING);746cookie = le16_to_cpu(event->cookie);747blocked = cookie & RCFW_CMD_IS_BLOCKING;748cookie &= RCFW_MAX_COOKIE_VALUE;749750crsqe = &rcfw->crsqe_tbl[cookie];751752bnxt_re_add_perf_stats(rcfw, crsqe);753754if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,755&rcfw->cmdq.flags),756"QPLIB: Unreponsive rcfw channel detected.!!")) {757dev_info(&pdev->dev, "rcfw timedout: cookie = %#x,"758" latency_msec = %ld free_slots = %d\n", cookie,759(long)jiffies_to_msecs(rcfw->cmdq.last_seen -760crsqe->send_timestamp),761crsqe->free_slots);762spin_unlock_irqrestore(&cmdq_hwq->lock, flags);763return rc;764}765766if (crsqe->is_internal_cmd && !event->status)767atomic_dec(&rcfw->timeout_send);768769if (crsqe->is_waiter_alive) {770if (crsqe->resp)771memcpy(crsqe->resp, event, sizeof(*event));772if (!blocked)773wait_cmds++;774}775776req_size = crsqe->req_size;777is_waiter_alive = crsqe->is_waiter_alive;778779crsqe->req_size = 0;780if (!crsqe->is_waiter_alive)781crsqe->resp = NULL;782crsqe->is_in_used = false;783/* Consumer is updated so that __send_message_no_waiter784* can never see queue full.785* It is safe since we are still holding cmdq_hwq->lock.786*/787cmdq_hwq->cons += req_size;788789/* This is a case to handle below scenario -790* Create AH is completed successfully by firmware,791* but completion took more time and driver already lost792* the context of create_ah from caller.793* We have already return failure for create_ah verbs,794* so let's destroy the same address vector since it is795* no more used in stack. We don't care about completion796* in __send_message_no_waiter.797* If destroy_ah is failued by firmware, there will be AH798* resource leak and relatively not critical + unlikely799* scenario. Current design is not to handle such case.800*/801if (!is_waiter_alive && !event->status &&802event->event == CREQ_QP_EVENT_EVENT_CREATE_AH)803__destroy_timedout_ah(rcfw,804(struct creq_create_ah_resp *)805event);806807spin_unlock_irqrestore(&cmdq_hwq->lock, flags);808}809*num_wait += wait_cmds;810return rc;811}812813/* SP - CREQ Completion handlers */814static void bnxt_qplib_service_creq(unsigned long data)815{816struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;817struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;818struct bnxt_qplib_res *res;819u32 type, budget = CREQ_ENTRY_POLL_BUDGET;820struct bnxt_qplib_hwq *creq_hwq = &creq->hwq;821struct creq_base *creqe;822struct pci_dev *pdev;823unsigned long flags;824u32 num_wakeup = 0;825int rc;826827pdev = rcfw->pdev;828res = rcfw->res;829/* Service the CREQ until empty */830spin_lock_irqsave(&creq_hwq->lock, flags);831while (budget > 0) {832if (RCFW_NO_FW_ACCESS(rcfw)) {833spin_unlock_irqrestore(&creq_hwq->lock, flags);834return;835}836creqe = bnxt_qplib_get_qe(creq_hwq, creq_hwq->cons, NULL);837if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))838break;839/* The valid test of the entry must be done first before840* reading any further.841*/842dma_rmb();843type = creqe->type & CREQ_BASE_TYPE_MASK;844rcfw->cmdq.last_seen = jiffies;845846switch (type) {847case CREQ_BASE_TYPE_QP_EVENT:848bnxt_qplib_process_qp_event849(rcfw,(struct creq_qp_event *)creqe,850&num_wakeup);851creq->stats.creq_qp_event_processed++;852break;853case CREQ_BASE_TYPE_FUNC_EVENT:854rc = rcfw->creq.aeq_handler(rcfw, creqe, NULL);855if (rc)856dev_warn(&pdev->dev,857"QPLIB: async event type = 0x%x not handled",858type);859creq->stats.creq_func_event_processed++;860break;861default:862if (type != HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT) {863dev_warn(&pdev->dev,864"QPLIB: op_event = 0x%x not handled\n",865type);866}867break;868}869budget--;870bnxt_qplib_hwq_incr_cons(creq_hwq->max_elements, &creq_hwq->cons,8711, &creq->creq_db.dbinfo.flags);872}873if (budget == CREQ_ENTRY_POLL_BUDGET &&874!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags)) {875/* No completions received during this poll. Enable interrupt now */876bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);877creq->stats.creq_arm_count++;878dev_dbg(&pdev->dev, "QPLIB: Num of Func (0x%llx) \n",879creq->stats.creq_func_event_processed);880dev_dbg(&pdev->dev, "QPLIB: QP (0x%llx) events processed\n",881creq->stats.creq_qp_event_processed);882dev_dbg(&pdev->dev, "QPLIB: Armed:%#llx resched:%#llx \n",883creq->stats.creq_arm_count,884creq->stats.creq_tasklet_schedule_count);885} else if (creq->requested) {886/*887* Currently there is no bottom half implementation to process888* completions, all completions are processed in interrupt context889* only. So enable interrupts.890*/891bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);892creq->stats.creq_tasklet_schedule_count++;893}894spin_unlock_irqrestore(&creq_hwq->lock, flags);895if (num_wakeup)896wake_up_all(&rcfw->cmdq.waitq);897}898899static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)900{901struct bnxt_qplib_rcfw *rcfw = dev_instance;902903bnxt_qplib_service_creq((unsigned long)rcfw);904return IRQ_HANDLED;905}906907/* RCFW */908int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)909{910struct creq_deinitialize_fw_resp resp = {};911struct cmdq_deinitialize_fw req = {};912struct bnxt_qplib_cmdqmsg msg = {};913int rc;914915bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DEINITIALIZE_FW,916sizeof(req));917bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL,918sizeof(req), sizeof(resp), 0);919rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);920if (rc)921return rc;922clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);923return 0;924}925926int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int is_virtfn)927{928struct creq_initialize_fw_resp resp = {};929struct cmdq_initialize_fw req = {};930struct bnxt_qplib_cmdqmsg msg = {};931struct bnxt_qplib_chip_ctx *cctx;932struct bnxt_qplib_ctx *hctx;933struct bnxt_qplib_res *res;934struct bnxt_qplib_hwq *hwq;935int rc;936937res = rcfw->res;938cctx = res->cctx;939hctx = res->hctx;940941bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_INITIALIZE_FW,942sizeof(req));943/* Supply (log-base-2-of-host-page-size - base-page-shift)944* to bono to adjust the doorbell page sizes.945*/946req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -947RCFW_DBR_BASE_PAGE_SHIFT);948/*949* VFs need not setup the HW context area, PF950* shall setup this area for VF. Skipping the951* HW programming952*/953if (is_virtfn || _is_chip_gen_p5_p7(cctx))954goto skip_ctx_setup;955956hwq = &hctx->qp_ctx.hwq;957req.qpc_page_dir = cpu_to_le64(_get_base_addr(hwq));958req.number_of_qp = cpu_to_le32(hwq->max_elements);959req.qpc_pg_size_qpc_lvl = (_get_pte_pg_size(hwq) <<960CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |961(u8)hwq->level;962963hwq = &hctx->mrw_ctx.hwq;964req.mrw_page_dir = cpu_to_le64(_get_base_addr(hwq));965req.number_of_mrw = cpu_to_le32(hwq->max_elements);966req.mrw_pg_size_mrw_lvl = (_get_pte_pg_size(hwq) <<967CMDQ_INITIALIZE_FW_MRW_PG_SIZE_SFT) |968(u8)hwq->level;969970hwq = &hctx->srq_ctx.hwq;971req.srq_page_dir = cpu_to_le64(_get_base_addr(hwq));972req.number_of_srq = cpu_to_le32(hwq->max_elements);973req.srq_pg_size_srq_lvl = (_get_pte_pg_size(hwq) <<974CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_SFT) |975(u8)hwq->level;976977hwq = &hctx->cq_ctx.hwq;978req.cq_page_dir = cpu_to_le64(_get_base_addr(hwq));979req.number_of_cq = cpu_to_le32(hwq->max_elements);980req.cq_pg_size_cq_lvl = (_get_pte_pg_size(hwq) <<981CMDQ_INITIALIZE_FW_CQ_PG_SIZE_SFT) |982(u8)hwq->level;983984hwq = &hctx->tim_ctx.hwq;985req.tim_page_dir = cpu_to_le64(_get_base_addr(hwq));986req.tim_pg_size_tim_lvl = (_get_pte_pg_size(hwq) <<987CMDQ_INITIALIZE_FW_TIM_PG_SIZE_SFT) |988(u8)hwq->level;989hwq = &hctx->tqm_ctx.pde;990req.tqm_page_dir = cpu_to_le64(_get_base_addr(hwq));991req.tqm_pg_size_tqm_lvl = (_get_pte_pg_size(hwq) <<992CMDQ_INITIALIZE_FW_TQM_PG_SIZE_SFT) |993(u8)hwq->level;994skip_ctx_setup:995if (BNXT_RE_HW_RETX(res->dattr->dev_cap_flags))996req.flags |= CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED;997req.stat_ctx_id = cpu_to_le32(hctx->stats.fw_id);998bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL,999sizeof(req), sizeof(resp), 0);1000rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);1001if (rc)1002return rc;1003set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);10041005return 0;1006}10071008void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_res *res)1009{1010struct bnxt_qplib_rcfw *rcfw = res->rcfw;10111012vfree(rcfw->rcfw_lat_slab_msec);1013rcfw->rcfw_lat_slab_msec = NULL;1014vfree(rcfw->qp_create_stats);1015rcfw->qp_create_stats = NULL;1016vfree(rcfw->qp_destroy_stats);1017rcfw->qp_destroy_stats = NULL;1018vfree(rcfw->mr_create_stats);1019rcfw->mr_create_stats = NULL;1020vfree(rcfw->mr_destroy_stats);1021rcfw->mr_destroy_stats = NULL;1022vfree(rcfw->qp_modify_stats);1023rcfw->qp_modify_stats = NULL;1024rcfw->sp_perf_stats_enabled = false;10251026kfree(rcfw->crsqe_tbl);1027rcfw->crsqe_tbl = NULL;10281029bnxt_qplib_free_hwq(res, &rcfw->cmdq.hwq);1030bnxt_qplib_free_hwq(res, &rcfw->creq.hwq);1031rcfw->pdev = NULL;1032}10331034int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res)1035{1036struct bnxt_qplib_hwq_attr hwq_attr = {};1037struct bnxt_qplib_rcfw *rcfw = res->rcfw;1038struct bnxt_qplib_sg_info sginfo = {};1039struct bnxt_qplib_cmdq_ctx *cmdq;1040struct bnxt_qplib_creq_ctx *creq;10411042rcfw->pdev = res->pdev;1043rcfw->res = res;1044cmdq = &rcfw->cmdq;1045creq = &rcfw->creq;10461047sginfo.pgsize = PAGE_SIZE;1048sginfo.pgshft = PAGE_SHIFT;10491050hwq_attr.sginfo = &sginfo;1051hwq_attr.res = rcfw->res;1052hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT;1053hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS;1054hwq_attr.type = _get_hwq_type(res);10551056if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) {1057dev_err(&rcfw->pdev->dev,1058"QPLIB: HW channel CREQ allocation failed\n");1059return -ENOMEM;1060}10611062sginfo.pgsize = BNXT_QPLIB_CMDQE_PAGE_SIZE;1063hwq_attr.depth = BNXT_QPLIB_CMDQE_MAX_CNT & 0x7FFFFFFF;1064hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;1065hwq_attr.type = HWQ_TYPE_CTX;1066if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {1067dev_err(&rcfw->pdev->dev,1068"QPLIB: HW channel CMDQ allocation failed\n");1069goto fail_free_creq_hwq;1070}10711072rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,1073sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);1074if (!rcfw->crsqe_tbl) {1075dev_err(&rcfw->pdev->dev,1076"QPLIB: HW channel CRSQ allocation failed\n");1077goto fail_free_cmdq_hwq;1078}10791080rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;10811082rcfw->sp_perf_stats_enabled = false;1083rcfw->rcfw_lat_slab_msec = vzalloc(sizeof(u32) *1084RCFW_MAX_LATENCY_MSEC_SLAB_INDEX);1085rcfw->qp_create_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);1086rcfw->qp_destroy_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);1087rcfw->mr_create_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);1088rcfw->mr_destroy_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);1089rcfw->qp_modify_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);10901091if (rcfw->rcfw_lat_slab_msec &&1092rcfw->qp_create_stats &&1093rcfw->qp_destroy_stats &&1094rcfw->mr_create_stats &&1095rcfw->mr_destroy_stats &&1096rcfw->qp_modify_stats)1097rcfw->sp_perf_stats_enabled = true;10981099return 0;1100fail_free_cmdq_hwq:1101bnxt_qplib_free_hwq(res, &rcfw->cmdq.hwq);1102fail_free_creq_hwq:1103bnxt_qplib_free_hwq(res, &rcfw->creq.hwq);1104return -ENOMEM;1105}11061107void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)1108{1109struct bnxt_qplib_creq_ctx *creq;1110struct bnxt_qplib_res *res;11111112creq = &rcfw->creq;1113res = rcfw->res;11141115if (!creq->requested)1116return;11171118creq->requested = false;1119/* Mask h/w interrupts */1120bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, false);1121/* Sync with last running IRQ-handler */1122synchronize_irq(creq->msix_vec);1123free_irq(creq->msix_vec, rcfw);1124kfree(creq->irq_name);1125creq->irq_name = NULL;1126/* rcfw_intr_enabled should not be greater than 1. Debug1127* print to check if that is the case1128*/1129if (atomic_read(&rcfw->rcfw_intr_enabled) > 1) {1130dev_err(&rcfw->pdev->dev,1131"%s: rcfw->rcfw_intr_enabled = 0x%x\n", __func__,1132atomic_read(&rcfw->rcfw_intr_enabled));1133}1134atomic_set(&rcfw->rcfw_intr_enabled, 0);1135rcfw->num_irq_stopped++;1136}11371138void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)1139{1140struct bnxt_qplib_creq_ctx *creq;1141struct bnxt_qplib_cmdq_ctx *cmdq;11421143creq = &rcfw->creq;1144cmdq = &rcfw->cmdq;1145/* Make sure the HW channel is stopped! */1146bnxt_qplib_rcfw_stop_irq(rcfw, true);11471148creq->creq_db.reg.bar_reg = NULL;1149creq->creq_db.db = NULL;11501151if (cmdq->cmdq_mbox.reg.bar_reg) {1152iounmap(cmdq->cmdq_mbox.reg.bar_reg);1153cmdq->cmdq_mbox.reg.bar_reg = NULL;1154cmdq->cmdq_mbox.prod = NULL;1155cmdq->cmdq_mbox.db = NULL;1156}11571158creq->aeq_handler = NULL;1159creq->msix_vec = 0;1160}11611162int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,1163bool need_init)1164{1165struct bnxt_qplib_creq_ctx *creq;1166struct bnxt_qplib_res *res;1167int rc;11681169creq = &rcfw->creq;1170res = rcfw->res;11711172if (creq->requested)1173return -EFAULT;11741175creq->msix_vec = msix_vector;11761177creq->irq_name = kasprintf(GFP_KERNEL, "bnxt_re-creq@pci:%s\n",1178pci_name(res->pdev));1179if (!creq->irq_name)1180return -ENOMEM;11811182rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,1183creq->irq_name, rcfw);1184if (rc) {1185kfree(creq->irq_name);1186creq->irq_name = NULL;1187return rc;1188}1189creq->requested = true;11901191bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);11921193rcfw->num_irq_started++;1194/* Debug print to check rcfw interrupt enable/disable is invoked1195* out of sequence1196*/1197if (atomic_read(&rcfw->rcfw_intr_enabled) > 0) {1198dev_err(&rcfw->pdev->dev,1199"%s: rcfw->rcfw_intr_enabled = 0x%x\n", __func__,1200atomic_read(&rcfw->rcfw_intr_enabled));1201}1202atomic_inc(&rcfw->rcfw_intr_enabled);1203return 0;1204}12051206static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw)1207{1208struct bnxt_qplib_cmdq_mbox *mbox;1209resource_size_t bar_reg;1210struct pci_dev *pdev;12111212pdev = rcfw->pdev;1213mbox = &rcfw->cmdq.cmdq_mbox;12141215mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION;1216mbox->reg.len = RCFW_COMM_SIZE;1217mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);1218if (!mbox->reg.bar_base) {1219dev_err(&pdev->dev,1220"QPLIB: CMDQ BAR region %d resc start is 0!\n",1221mbox->reg.bar_id);1222return -ENOMEM;1223}12241225bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET;1226mbox->reg.len = RCFW_COMM_SIZE;1227mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);1228if (!mbox->reg.bar_reg) {1229dev_err(&pdev->dev,1230"QPLIB: CMDQ BAR region %d mapping failed\n",1231mbox->reg.bar_id);1232return -ENOMEM;1233}12341235mbox->prod = (void __iomem *)((char *)mbox->reg.bar_reg +1236RCFW_PF_VF_COMM_PROD_OFFSET);1237mbox->db = (void __iomem *)((char *)mbox->reg.bar_reg +1238RCFW_COMM_TRIG_OFFSET);1239return 0;1240}12411242static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)1243{1244struct bnxt_qplib_creq_db *creq_db;1245struct bnxt_qplib_reg_desc *dbreg;1246struct bnxt_qplib_res *res;12471248res = rcfw->res;1249creq_db = &rcfw->creq.creq_db;1250dbreg = &res->dpi_tbl.ucreg;12511252creq_db->reg.bar_id = dbreg->bar_id;1253creq_db->reg.bar_base = dbreg->bar_base;1254creq_db->reg.bar_reg = dbreg->bar_reg + reg_offt;1255creq_db->reg.len = _is_chip_gen_p5_p7(res->cctx) ? sizeof(u64) :1256sizeof(u32);12571258creq_db->dbinfo.db = creq_db->reg.bar_reg;1259creq_db->dbinfo.hwq = &rcfw->creq.hwq;1260creq_db->dbinfo.xid = rcfw->creq.ring_id;1261creq_db->dbinfo.seed = rcfw->creq.ring_id;1262creq_db->dbinfo.flags = 0;1263spin_lock_init(&creq_db->dbinfo.lock);1264creq_db->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;1265creq_db->dbinfo.res = rcfw->res;12661267return 0;1268}12691270static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)1271{1272struct bnxt_qplib_cmdq_ctx *cmdq;1273struct bnxt_qplib_creq_ctx *creq;1274struct bnxt_qplib_cmdq_mbox *mbox;1275struct cmdq_init init = {0};12761277cmdq = &rcfw->cmdq;1278creq = &rcfw->creq;1279mbox = &cmdq->cmdq_mbox;12801281init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);1282init.cmdq_size_cmdq_lvl = cpu_to_le16(1283((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &1284CMDQ_INIT_CMDQ_SIZE_MASK) |1285((cmdq->hwq.level << CMDQ_INIT_CMDQ_LVL_SFT) &1286CMDQ_INIT_CMDQ_LVL_MASK));1287init.creq_ring_id = cpu_to_le16(creq->ring_id);1288/* Write to the Bono mailbox register */1289__iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);1290}12911292int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,1293int msix_vector,1294int cp_bar_reg_off,1295aeq_handler_t aeq_handler)1296{1297struct bnxt_qplib_cmdq_ctx *cmdq;1298struct bnxt_qplib_creq_ctx *creq;1299int rc;13001301cmdq = &rcfw->cmdq;1302creq = &rcfw->creq;13031304/* Clear to defaults */1305cmdq->seq_num = 0;1306set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);1307init_waitqueue_head(&cmdq->waitq);13081309creq->stats.creq_qp_event_processed = 0;1310creq->stats.creq_func_event_processed = 0;1311creq->aeq_handler = aeq_handler;13121313rc = bnxt_qplib_map_cmdq_mbox(rcfw);1314if (rc)1315return rc;13161317rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);1318if (rc)1319return rc;13201321rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);1322if (rc) {1323dev_err(&rcfw->pdev->dev,1324"QPLIB: Failed to request IRQ for CREQ rc = 0x%x\n", rc);1325bnxt_qplib_disable_rcfw_channel(rcfw);1326return rc;1327}13281329rcfw->curr_shadow_qd = min_not_zero(cmdq_shadow_qd,1330(unsigned int)RCFW_CMD_NON_BLOCKING_SHADOW_QD);1331sema_init(&rcfw->rcfw_inflight, rcfw->curr_shadow_qd);1332dev_dbg(&rcfw->pdev->dev,1333"Perf Debug: shadow qd %d\n", rcfw->curr_shadow_qd);1334bnxt_qplib_start_rcfw(rcfw);13351336return 0;1337}133813391340