Path: blob/master/drivers/infiniband/hw/cxgb3/iwch_cm.c
15112 views
/*1* Copyright (c) 2006 Chelsio, Inc. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the7* OpenIB.org BSD license below:8*9* Redistribution and use in source and binary forms, with or10* without modification, are permitted provided that the following11* conditions are met:12*13* - Redistributions of source code must retain the above14* copyright notice, this list of conditions and the following15* disclaimer.16*17* - Redistributions in binary form must reproduce the above18* copyright notice, this list of conditions and the following19* disclaimer in the documentation and/or other materials20* provided with the distribution.21*22* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,23* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF24* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND25* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS26* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN27* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN28* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE29* SOFTWARE.30*/31#include <linux/module.h>32#include <linux/list.h>33#include <linux/slab.h>34#include <linux/workqueue.h>35#include <linux/skbuff.h>36#include <linux/timer.h>37#include <linux/notifier.h>38#include <linux/inetdevice.h>3940#include <net/neighbour.h>41#include <net/netevent.h>42#include <net/route.h>4344#include "tcb.h"45#include "cxgb3_offload.h"46#include "iwch.h"47#include "iwch_provider.h"48#include "iwch_cm.h"4950static char *states[] = {51"idle",52"listen",53"connecting",54"mpa_wait_req",55"mpa_req_sent",56"mpa_req_rcvd",57"mpa_rep_sent",58"fpdu_mode",59"aborting",60"closing",61"moribund",62"dead",63NULL,64};6566int peer2peer = 0;67module_param(peer2peer, int, 0644);68MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");6970static int ep_timeout_secs = 60;71module_param(ep_timeout_secs, int, 0644);72MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "73"in seconds (default=60)");7475static int mpa_rev = 1;76module_param(mpa_rev, int, 0644);77MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "78"1 is spec compliant. (default=1)");7980static int markers_enabled = 0;81module_param(markers_enabled, int, 0644);82MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");8384static int crc_enabled = 1;85module_param(crc_enabled, int, 0644);86MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");8788static int rcv_win = 256 * 1024;89module_param(rcv_win, int, 0644);90MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");9192static int snd_win = 32 * 1024;93module_param(snd_win, int, 0644);94MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");9596static unsigned int nocong = 0;97module_param(nocong, uint, 0644);98MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");99100static unsigned int cong_flavor = 1;101module_param(cong_flavor, uint, 0644);102MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");103104static struct workqueue_struct *workq;105106static struct sk_buff_head rxq;107108static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);109static void ep_timeout(unsigned long arg);110static void connect_reply_upcall(struct iwch_ep *ep, int status);111112static void start_ep_timer(struct iwch_ep *ep)113{114PDBG("%s ep %p\n", __func__, ep);115if (timer_pending(&ep->timer)) {116PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);117del_timer_sync(&ep->timer);118} else119get_ep(&ep->com);120ep->timer.expires = jiffies + ep_timeout_secs * HZ;121ep->timer.data = (unsigned long)ep;122ep->timer.function = ep_timeout;123add_timer(&ep->timer);124}125126static void stop_ep_timer(struct iwch_ep *ep)127{128PDBG("%s ep %p\n", __func__, ep);129if (!timer_pending(&ep->timer)) {130printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",131__func__, ep, ep->com.state);132WARN_ON(1);133return;134}135del_timer_sync(&ep->timer);136put_ep(&ep->com);137}138139static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)140{141int error = 0;142struct cxio_rdev *rdev;143144rdev = (struct cxio_rdev *)tdev->ulp;145if (cxio_fatal_error(rdev)) {146kfree_skb(skb);147return -EIO;148}149error = l2t_send(tdev, skb, l2e);150if (error < 0)151kfree_skb(skb);152return error;153}154155int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)156{157int error = 0;158struct cxio_rdev *rdev;159160rdev = (struct cxio_rdev *)tdev->ulp;161if (cxio_fatal_error(rdev)) {162kfree_skb(skb);163return -EIO;164}165error = cxgb3_ofld_send(tdev, skb);166if (error < 0)167kfree_skb(skb);168return error;169}170171static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)172{173struct cpl_tid_release *req;174175skb = get_skb(skb, sizeof *req, GFP_KERNEL);176if (!skb)177return;178req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));179req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));180OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));181skb->priority = CPL_PRIORITY_SETUP;182iwch_cxgb3_ofld_send(tdev, skb);183return;184}185186int iwch_quiesce_tid(struct iwch_ep *ep)187{188struct cpl_set_tcb_field *req;189struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);190191if (!skb)192return -ENOMEM;193req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));194req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));195req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));196OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));197req->reply = 0;198req->cpu_idx = 0;199req->word = htons(W_TCB_RX_QUIESCE);200req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);201req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);202203skb->priority = CPL_PRIORITY_DATA;204return iwch_cxgb3_ofld_send(ep->com.tdev, skb);205}206207int iwch_resume_tid(struct iwch_ep *ep)208{209struct cpl_set_tcb_field *req;210struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);211212if (!skb)213return -ENOMEM;214req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));215req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));216req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));217OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));218req->reply = 0;219req->cpu_idx = 0;220req->word = htons(W_TCB_RX_QUIESCE);221req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);222req->val = 0;223224skb->priority = CPL_PRIORITY_DATA;225return iwch_cxgb3_ofld_send(ep->com.tdev, skb);226}227228static void set_emss(struct iwch_ep *ep, u16 opt)229{230PDBG("%s ep %p opt %u\n", __func__, ep, opt);231ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;232if (G_TCPOPT_TSTAMP(opt))233ep->emss -= 12;234if (ep->emss < 128)235ep->emss = 128;236PDBG("emss=%d\n", ep->emss);237}238239static enum iwch_ep_state state_read(struct iwch_ep_common *epc)240{241unsigned long flags;242enum iwch_ep_state state;243244spin_lock_irqsave(&epc->lock, flags);245state = epc->state;246spin_unlock_irqrestore(&epc->lock, flags);247return state;248}249250static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)251{252epc->state = new;253}254255static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)256{257unsigned long flags;258259spin_lock_irqsave(&epc->lock, flags);260PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);261__state_set(epc, new);262spin_unlock_irqrestore(&epc->lock, flags);263return;264}265266static void *alloc_ep(int size, gfp_t gfp)267{268struct iwch_ep_common *epc;269270epc = kzalloc(size, gfp);271if (epc) {272kref_init(&epc->kref);273spin_lock_init(&epc->lock);274init_waitqueue_head(&epc->waitq);275}276PDBG("%s alloc ep %p\n", __func__, epc);277return epc;278}279280void __free_ep(struct kref *kref)281{282struct iwch_ep *ep;283ep = container_of(container_of(kref, struct iwch_ep_common, kref),284struct iwch_ep, com);285PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);286if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {287cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);288dst_release(ep->dst);289l2t_release(L2DATA(ep->com.tdev), ep->l2t);290}291kfree(ep);292}293294static void release_ep_resources(struct iwch_ep *ep)295{296PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);297set_bit(RELEASE_RESOURCES, &ep->com.flags);298put_ep(&ep->com);299}300301static int status2errno(int status)302{303switch (status) {304case CPL_ERR_NONE:305return 0;306case CPL_ERR_CONN_RESET:307return -ECONNRESET;308case CPL_ERR_ARP_MISS:309return -EHOSTUNREACH;310case CPL_ERR_CONN_TIMEDOUT:311return -ETIMEDOUT;312case CPL_ERR_TCAM_FULL:313return -ENOMEM;314case CPL_ERR_CONN_EXIST:315return -EADDRINUSE;316default:317return -EIO;318}319}320321/*322* Try and reuse skbs already allocated...323*/324static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)325{326if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {327skb_trim(skb, 0);328skb_get(skb);329} else {330skb = alloc_skb(len, gfp);331}332return skb;333}334335static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,336__be32 peer_ip, __be16 local_port,337__be16 peer_port, u8 tos)338{339struct rtable *rt;340struct flowi4 fl4;341342rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,343peer_port, local_port, IPPROTO_TCP,344tos, 0);345if (IS_ERR(rt))346return NULL;347return rt;348}349350static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)351{352int i = 0;353354while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)355++i;356return i;357}358359static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)360{361PDBG("%s t3cdev %p\n", __func__, dev);362kfree_skb(skb);363}364365/*366* Handle an ARP failure for an active open.367*/368static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)369{370printk(KERN_ERR MOD "ARP failure duing connect\n");371kfree_skb(skb);372}373374/*375* Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant376* and send it along.377*/378static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)379{380struct cpl_abort_req *req = cplhdr(skb);381382PDBG("%s t3cdev %p\n", __func__, dev);383req->cmd = CPL_ABORT_NO_RST;384iwch_cxgb3_ofld_send(dev, skb);385}386387static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)388{389struct cpl_close_con_req *req;390struct sk_buff *skb;391392PDBG("%s ep %p\n", __func__, ep);393skb = get_skb(NULL, sizeof(*req), gfp);394if (!skb) {395printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);396return -ENOMEM;397}398skb->priority = CPL_PRIORITY_DATA;399set_arp_failure_handler(skb, arp_failure_discard);400req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));401req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));402req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));403OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));404return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);405}406407static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)408{409struct cpl_abort_req *req;410411PDBG("%s ep %p\n", __func__, ep);412skb = get_skb(skb, sizeof(*req), gfp);413if (!skb) {414printk(KERN_ERR MOD "%s - failed to alloc skb.\n",415__func__);416return -ENOMEM;417}418skb->priority = CPL_PRIORITY_DATA;419set_arp_failure_handler(skb, abort_arp_failure);420req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));421req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));422req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));423OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));424req->cmd = CPL_ABORT_SEND_RST;425return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);426}427428static int send_connect(struct iwch_ep *ep)429{430struct cpl_act_open_req *req;431struct sk_buff *skb;432u32 opt0h, opt0l, opt2;433unsigned int mtu_idx;434int wscale;435436PDBG("%s ep %p\n", __func__, ep);437438skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);439if (!skb) {440printk(KERN_ERR MOD "%s - failed to alloc skb.\n",441__func__);442return -ENOMEM;443}444mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));445wscale = compute_wscale(rcv_win);446opt0h = V_NAGLE(0) |447V_NO_CONG(nocong) |448V_KEEP_ALIVE(1) |449F_TCAM_BYPASS |450V_WND_SCALE(wscale) |451V_MSS_IDX(mtu_idx) |452V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);453opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);454opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |455V_CONG_CONTROL_FLAVOR(cong_flavor);456skb->priority = CPL_PRIORITY_SETUP;457set_arp_failure_handler(skb, act_open_req_arp_failure);458459req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));460req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));461OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));462req->local_port = ep->com.local_addr.sin_port;463req->peer_port = ep->com.remote_addr.sin_port;464req->local_ip = ep->com.local_addr.sin_addr.s_addr;465req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;466req->opt0h = htonl(opt0h);467req->opt0l = htonl(opt0l);468req->params = 0;469req->opt2 = htonl(opt2);470return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);471}472473static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)474{475int mpalen;476struct tx_data_wr *req;477struct mpa_message *mpa;478int len;479480PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);481482BUG_ON(skb_cloned(skb));483484mpalen = sizeof(*mpa) + ep->plen;485if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {486kfree_skb(skb);487skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);488if (!skb) {489connect_reply_upcall(ep, -ENOMEM);490return;491}492}493skb_trim(skb, 0);494skb_reserve(skb, sizeof(*req));495skb_put(skb, mpalen);496skb->priority = CPL_PRIORITY_DATA;497mpa = (struct mpa_message *) skb->data;498memset(mpa, 0, sizeof(*mpa));499memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));500mpa->flags = (crc_enabled ? MPA_CRC : 0) |501(markers_enabled ? MPA_MARKERS : 0);502mpa->private_data_size = htons(ep->plen);503mpa->revision = mpa_rev;504505if (ep->plen)506memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);507508/*509* Reference the mpa skb. This ensures the data area510* will remain in memory until the hw acks the tx.511* Function tx_ack() will deref it.512*/513skb_get(skb);514set_arp_failure_handler(skb, arp_failure_discard);515skb_reset_transport_header(skb);516len = skb->len;517req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));518req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);519req->wr_lo = htonl(V_WR_TID(ep->hwtid));520req->len = htonl(len);521req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |522V_TX_SNDBUF(snd_win>>15));523req->flags = htonl(F_TX_INIT);524req->sndseq = htonl(ep->snd_seq);525BUG_ON(ep->mpa_skb);526ep->mpa_skb = skb;527iwch_l2t_send(ep->com.tdev, skb, ep->l2t);528start_ep_timer(ep);529state_set(&ep->com, MPA_REQ_SENT);530return;531}532533static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)534{535int mpalen;536struct tx_data_wr *req;537struct mpa_message *mpa;538struct sk_buff *skb;539540PDBG("%s ep %p plen %d\n", __func__, ep, plen);541542mpalen = sizeof(*mpa) + plen;543544skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);545if (!skb) {546printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);547return -ENOMEM;548}549skb_reserve(skb, sizeof(*req));550mpa = (struct mpa_message *) skb_put(skb, mpalen);551memset(mpa, 0, sizeof(*mpa));552memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));553mpa->flags = MPA_REJECT;554mpa->revision = mpa_rev;555mpa->private_data_size = htons(plen);556if (plen)557memcpy(mpa->private_data, pdata, plen);558559/*560* Reference the mpa skb again. This ensures the data area561* will remain in memory until the hw acks the tx.562* Function tx_ack() will deref it.563*/564skb_get(skb);565skb->priority = CPL_PRIORITY_DATA;566set_arp_failure_handler(skb, arp_failure_discard);567skb_reset_transport_header(skb);568req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));569req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);570req->wr_lo = htonl(V_WR_TID(ep->hwtid));571req->len = htonl(mpalen);572req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |573V_TX_SNDBUF(snd_win>>15));574req->flags = htonl(F_TX_INIT);575req->sndseq = htonl(ep->snd_seq);576BUG_ON(ep->mpa_skb);577ep->mpa_skb = skb;578return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);579}580581static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)582{583int mpalen;584struct tx_data_wr *req;585struct mpa_message *mpa;586int len;587struct sk_buff *skb;588589PDBG("%s ep %p plen %d\n", __func__, ep, plen);590591mpalen = sizeof(*mpa) + plen;592593skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);594if (!skb) {595printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);596return -ENOMEM;597}598skb->priority = CPL_PRIORITY_DATA;599skb_reserve(skb, sizeof(*req));600mpa = (struct mpa_message *) skb_put(skb, mpalen);601memset(mpa, 0, sizeof(*mpa));602memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));603mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |604(markers_enabled ? MPA_MARKERS : 0);605mpa->revision = mpa_rev;606mpa->private_data_size = htons(plen);607if (plen)608memcpy(mpa->private_data, pdata, plen);609610/*611* Reference the mpa skb. This ensures the data area612* will remain in memory until the hw acks the tx.613* Function tx_ack() will deref it.614*/615skb_get(skb);616set_arp_failure_handler(skb, arp_failure_discard);617skb_reset_transport_header(skb);618len = skb->len;619req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));620req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);621req->wr_lo = htonl(V_WR_TID(ep->hwtid));622req->len = htonl(len);623req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |624V_TX_SNDBUF(snd_win>>15));625req->flags = htonl(F_TX_INIT);626req->sndseq = htonl(ep->snd_seq);627ep->mpa_skb = skb;628state_set(&ep->com, MPA_REP_SENT);629return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);630}631632static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)633{634struct iwch_ep *ep = ctx;635struct cpl_act_establish *req = cplhdr(skb);636unsigned int tid = GET_TID(req);637638PDBG("%s ep %p tid %d\n", __func__, ep, tid);639640dst_confirm(ep->dst);641642/* setup the hwtid for this connection */643ep->hwtid = tid;644cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);645646ep->snd_seq = ntohl(req->snd_isn);647ep->rcv_seq = ntohl(req->rcv_isn);648649set_emss(ep, ntohs(req->tcp_opt));650651/* dealloc the atid */652cxgb3_free_atid(ep->com.tdev, ep->atid);653654/* start MPA negotiation */655send_mpa_req(ep, skb);656657return 0;658}659660static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)661{662PDBG("%s ep %p\n", __FILE__, ep);663state_set(&ep->com, ABORTING);664send_abort(ep, skb, gfp);665}666667static void close_complete_upcall(struct iwch_ep *ep)668{669struct iw_cm_event event;670671PDBG("%s ep %p\n", __func__, ep);672memset(&event, 0, sizeof(event));673event.event = IW_CM_EVENT_CLOSE;674if (ep->com.cm_id) {675PDBG("close complete delivered ep %p cm_id %p tid %d\n",676ep, ep->com.cm_id, ep->hwtid);677ep->com.cm_id->event_handler(ep->com.cm_id, &event);678ep->com.cm_id->rem_ref(ep->com.cm_id);679ep->com.cm_id = NULL;680ep->com.qp = NULL;681}682}683684static void peer_close_upcall(struct iwch_ep *ep)685{686struct iw_cm_event event;687688PDBG("%s ep %p\n", __func__, ep);689memset(&event, 0, sizeof(event));690event.event = IW_CM_EVENT_DISCONNECT;691if (ep->com.cm_id) {692PDBG("peer close delivered ep %p cm_id %p tid %d\n",693ep, ep->com.cm_id, ep->hwtid);694ep->com.cm_id->event_handler(ep->com.cm_id, &event);695}696}697698static void peer_abort_upcall(struct iwch_ep *ep)699{700struct iw_cm_event event;701702PDBG("%s ep %p\n", __func__, ep);703memset(&event, 0, sizeof(event));704event.event = IW_CM_EVENT_CLOSE;705event.status = -ECONNRESET;706if (ep->com.cm_id) {707PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,708ep->com.cm_id, ep->hwtid);709ep->com.cm_id->event_handler(ep->com.cm_id, &event);710ep->com.cm_id->rem_ref(ep->com.cm_id);711ep->com.cm_id = NULL;712ep->com.qp = NULL;713}714}715716static void connect_reply_upcall(struct iwch_ep *ep, int status)717{718struct iw_cm_event event;719720PDBG("%s ep %p status %d\n", __func__, ep, status);721memset(&event, 0, sizeof(event));722event.event = IW_CM_EVENT_CONNECT_REPLY;723event.status = status;724event.local_addr = ep->com.local_addr;725event.remote_addr = ep->com.remote_addr;726727if ((status == 0) || (status == -ECONNREFUSED)) {728event.private_data_len = ep->plen;729event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);730}731if (ep->com.cm_id) {732PDBG("%s ep %p tid %d status %d\n", __func__, ep,733ep->hwtid, status);734ep->com.cm_id->event_handler(ep->com.cm_id, &event);735}736if (status < 0) {737ep->com.cm_id->rem_ref(ep->com.cm_id);738ep->com.cm_id = NULL;739ep->com.qp = NULL;740}741}742743static void connect_request_upcall(struct iwch_ep *ep)744{745struct iw_cm_event event;746747PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);748memset(&event, 0, sizeof(event));749event.event = IW_CM_EVENT_CONNECT_REQUEST;750event.local_addr = ep->com.local_addr;751event.remote_addr = ep->com.remote_addr;752event.private_data_len = ep->plen;753event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);754event.provider_data = ep;755if (state_read(&ep->parent_ep->com) != DEAD) {756get_ep(&ep->com);757ep->parent_ep->com.cm_id->event_handler(758ep->parent_ep->com.cm_id,759&event);760}761put_ep(&ep->parent_ep->com);762ep->parent_ep = NULL;763}764765static void established_upcall(struct iwch_ep *ep)766{767struct iw_cm_event event;768769PDBG("%s ep %p\n", __func__, ep);770memset(&event, 0, sizeof(event));771event.event = IW_CM_EVENT_ESTABLISHED;772if (ep->com.cm_id) {773PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);774ep->com.cm_id->event_handler(ep->com.cm_id, &event);775}776}777778static int update_rx_credits(struct iwch_ep *ep, u32 credits)779{780struct cpl_rx_data_ack *req;781struct sk_buff *skb;782783PDBG("%s ep %p credits %u\n", __func__, ep, credits);784skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);785if (!skb) {786printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");787return 0;788}789790req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));791req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));792OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));793req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));794skb->priority = CPL_PRIORITY_ACK;795iwch_cxgb3_ofld_send(ep->com.tdev, skb);796return credits;797}798799static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)800{801struct mpa_message *mpa;802u16 plen;803struct iwch_qp_attributes attrs;804enum iwch_qp_attr_mask mask;805int err;806807PDBG("%s ep %p\n", __func__, ep);808809/*810* Stop mpa timer. If it expired, then the state has811* changed and we bail since ep_timeout already aborted812* the connection.813*/814stop_ep_timer(ep);815if (state_read(&ep->com) != MPA_REQ_SENT)816return;817818/*819* If we get more than the supported amount of private data820* then we must fail this connection.821*/822if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {823err = -EINVAL;824goto err;825}826827/*828* copy the new data into our accumulation buffer.829*/830skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),831skb->len);832ep->mpa_pkt_len += skb->len;833834/*835* if we don't even have the mpa message, then bail.836*/837if (ep->mpa_pkt_len < sizeof(*mpa))838return;839mpa = (struct mpa_message *) ep->mpa_pkt;840841/* Validate MPA header. */842if (mpa->revision != mpa_rev) {843err = -EPROTO;844goto err;845}846if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {847err = -EPROTO;848goto err;849}850851plen = ntohs(mpa->private_data_size);852853/*854* Fail if there's too much private data.855*/856if (plen > MPA_MAX_PRIVATE_DATA) {857err = -EPROTO;858goto err;859}860861/*862* If plen does not account for pkt size863*/864if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {865err = -EPROTO;866goto err;867}868869ep->plen = (u8) plen;870871/*872* If we don't have all the pdata yet, then bail.873* We'll continue process when more data arrives.874*/875if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))876return;877878if (mpa->flags & MPA_REJECT) {879err = -ECONNREFUSED;880goto err;881}882883/*884* If we get here we have accumulated the entire mpa885* start reply message including private data. And886* the MPA header is valid.887*/888state_set(&ep->com, FPDU_MODE);889ep->mpa_attr.initiator = 1;890ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;891ep->mpa_attr.recv_marker_enabled = markers_enabled;892ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;893ep->mpa_attr.version = mpa_rev;894PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "895"xmit_marker_enabled=%d, version=%d\n", __func__,896ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,897ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);898899attrs.mpa_attr = ep->mpa_attr;900attrs.max_ird = ep->ird;901attrs.max_ord = ep->ord;902attrs.llp_stream_handle = ep;903attrs.next_state = IWCH_QP_STATE_RTS;904905mask = IWCH_QP_ATTR_NEXT_STATE |906IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |907IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;908909/* bind QP and TID with INIT_WR */910err = iwch_modify_qp(ep->com.qp->rhp,911ep->com.qp, mask, &attrs, 1);912if (err)913goto err;914915if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {916iwch_post_zb_read(ep);917}918919goto out;920err:921abort_connection(ep, skb, GFP_KERNEL);922out:923connect_reply_upcall(ep, err);924return;925}926927static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)928{929struct mpa_message *mpa;930u16 plen;931932PDBG("%s ep %p\n", __func__, ep);933934/*935* Stop mpa timer. If it expired, then the state has936* changed and we bail since ep_timeout already aborted937* the connection.938*/939stop_ep_timer(ep);940if (state_read(&ep->com) != MPA_REQ_WAIT)941return;942943/*944* If we get more than the supported amount of private data945* then we must fail this connection.946*/947if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {948abort_connection(ep, skb, GFP_KERNEL);949return;950}951952PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);953954/*955* Copy the new data into our accumulation buffer.956*/957skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),958skb->len);959ep->mpa_pkt_len += skb->len;960961/*962* If we don't even have the mpa message, then bail.963* We'll continue process when more data arrives.964*/965if (ep->mpa_pkt_len < sizeof(*mpa))966return;967PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);968mpa = (struct mpa_message *) ep->mpa_pkt;969970/*971* Validate MPA Header.972*/973if (mpa->revision != mpa_rev) {974abort_connection(ep, skb, GFP_KERNEL);975return;976}977978if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {979abort_connection(ep, skb, GFP_KERNEL);980return;981}982983plen = ntohs(mpa->private_data_size);984985/*986* Fail if there's too much private data.987*/988if (plen > MPA_MAX_PRIVATE_DATA) {989abort_connection(ep, skb, GFP_KERNEL);990return;991}992993/*994* If plen does not account for pkt size995*/996if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {997abort_connection(ep, skb, GFP_KERNEL);998return;999}1000ep->plen = (u8) plen;10011002/*1003* If we don't have all the pdata yet, then bail.1004*/1005if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))1006return;10071008/*1009* If we get here we have accumulated the entire mpa1010* start reply message including private data.1011*/1012ep->mpa_attr.initiator = 0;1013ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;1014ep->mpa_attr.recv_marker_enabled = markers_enabled;1015ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;1016ep->mpa_attr.version = mpa_rev;1017PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "1018"xmit_marker_enabled=%d, version=%d\n", __func__,1019ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,1020ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);10211022state_set(&ep->com, MPA_REQ_RCVD);10231024/* drive upcall */1025connect_request_upcall(ep);1026return;1027}10281029static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1030{1031struct iwch_ep *ep = ctx;1032struct cpl_rx_data *hdr = cplhdr(skb);1033unsigned int dlen = ntohs(hdr->len);10341035PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);10361037skb_pull(skb, sizeof(*hdr));1038skb_trim(skb, dlen);10391040ep->rcv_seq += dlen;1041BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));10421043switch (state_read(&ep->com)) {1044case MPA_REQ_SENT:1045process_mpa_reply(ep, skb);1046break;1047case MPA_REQ_WAIT:1048process_mpa_request(ep, skb);1049break;1050case MPA_REP_SENT:1051break;1052default:1053printk(KERN_ERR MOD "%s Unexpected streaming data."1054" ep %p state %d tid %d\n",1055__func__, ep, state_read(&ep->com), ep->hwtid);10561057/*1058* The ep will timeout and inform the ULP of the failure.1059* See ep_timeout().1060*/1061break;1062}10631064/* update RX credits */1065update_rx_credits(ep, dlen);10661067return CPL_RET_BUF_DONE;1068}10691070/*1071* Upcall from the adapter indicating data has been transmitted.1072* For us its just the single MPA request or reply. We can now free1073* the skb holding the mpa message.1074*/1075static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1076{1077struct iwch_ep *ep = ctx;1078struct cpl_wr_ack *hdr = cplhdr(skb);1079unsigned int credits = ntohs(hdr->credits);1080unsigned long flags;1081int post_zb = 0;10821083PDBG("%s ep %p credits %u\n", __func__, ep, credits);10841085if (credits == 0) {1086PDBG("%s 0 credit ack ep %p state %u\n",1087__func__, ep, state_read(&ep->com));1088return CPL_RET_BUF_DONE;1089}10901091spin_lock_irqsave(&ep->com.lock, flags);1092BUG_ON(credits != 1);1093dst_confirm(ep->dst);1094if (!ep->mpa_skb) {1095PDBG("%s rdma_init wr_ack ep %p state %u\n",1096__func__, ep, ep->com.state);1097if (ep->mpa_attr.initiator) {1098PDBG("%s initiator ep %p state %u\n",1099__func__, ep, ep->com.state);1100if (peer2peer && ep->com.state == FPDU_MODE)1101post_zb = 1;1102} else {1103PDBG("%s responder ep %p state %u\n",1104__func__, ep, ep->com.state);1105if (ep->com.state == MPA_REQ_RCVD) {1106ep->com.rpl_done = 1;1107wake_up(&ep->com.waitq);1108}1109}1110} else {1111PDBG("%s lsm ack ep %p state %u freeing skb\n",1112__func__, ep, ep->com.state);1113kfree_skb(ep->mpa_skb);1114ep->mpa_skb = NULL;1115}1116spin_unlock_irqrestore(&ep->com.lock, flags);1117if (post_zb)1118iwch_post_zb_read(ep);1119return CPL_RET_BUF_DONE;1120}11211122static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1123{1124struct iwch_ep *ep = ctx;1125unsigned long flags;1126int release = 0;11271128PDBG("%s ep %p\n", __func__, ep);1129BUG_ON(!ep);11301131/*1132* We get 2 abort replies from the HW. The first one must1133* be ignored except for scribbling that we need one more.1134*/1135if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {1136return CPL_RET_BUF_DONE;1137}11381139spin_lock_irqsave(&ep->com.lock, flags);1140switch (ep->com.state) {1141case ABORTING:1142close_complete_upcall(ep);1143__state_set(&ep->com, DEAD);1144release = 1;1145break;1146default:1147printk(KERN_ERR "%s ep %p state %d\n",1148__func__, ep, ep->com.state);1149break;1150}1151spin_unlock_irqrestore(&ep->com.lock, flags);11521153if (release)1154release_ep_resources(ep);1155return CPL_RET_BUF_DONE;1156}11571158/*1159* Return whether a failed active open has allocated a TID1160*/1161static inline int act_open_has_tid(int status)1162{1163return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&1164status != CPL_ERR_ARP_MISS;1165}11661167static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1168{1169struct iwch_ep *ep = ctx;1170struct cpl_act_open_rpl *rpl = cplhdr(skb);11711172PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,1173status2errno(rpl->status));1174connect_reply_upcall(ep, status2errno(rpl->status));1175state_set(&ep->com, DEAD);1176if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))1177release_tid(ep->com.tdev, GET_TID(rpl), NULL);1178cxgb3_free_atid(ep->com.tdev, ep->atid);1179dst_release(ep->dst);1180l2t_release(L2DATA(ep->com.tdev), ep->l2t);1181put_ep(&ep->com);1182return CPL_RET_BUF_DONE;1183}11841185static int listen_start(struct iwch_listen_ep *ep)1186{1187struct sk_buff *skb;1188struct cpl_pass_open_req *req;11891190PDBG("%s ep %p\n", __func__, ep);1191skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);1192if (!skb) {1193printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");1194return -ENOMEM;1195}11961197req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));1198req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));1199OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));1200req->local_port = ep->com.local_addr.sin_port;1201req->local_ip = ep->com.local_addr.sin_addr.s_addr;1202req->peer_port = 0;1203req->peer_ip = 0;1204req->peer_netmask = 0;1205req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);1206req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));1207req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));12081209skb->priority = 1;1210return iwch_cxgb3_ofld_send(ep->com.tdev, skb);1211}12121213static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1214{1215struct iwch_listen_ep *ep = ctx;1216struct cpl_pass_open_rpl *rpl = cplhdr(skb);12171218PDBG("%s ep %p status %d error %d\n", __func__, ep,1219rpl->status, status2errno(rpl->status));1220ep->com.rpl_err = status2errno(rpl->status);1221ep->com.rpl_done = 1;1222wake_up(&ep->com.waitq);12231224return CPL_RET_BUF_DONE;1225}12261227static int listen_stop(struct iwch_listen_ep *ep)1228{1229struct sk_buff *skb;1230struct cpl_close_listserv_req *req;12311232PDBG("%s ep %p\n", __func__, ep);1233skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);1234if (!skb) {1235printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);1236return -ENOMEM;1237}1238req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));1239req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));1240req->cpu_idx = 0;1241OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));1242skb->priority = 1;1243return iwch_cxgb3_ofld_send(ep->com.tdev, skb);1244}12451246static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,1247void *ctx)1248{1249struct iwch_listen_ep *ep = ctx;1250struct cpl_close_listserv_rpl *rpl = cplhdr(skb);12511252PDBG("%s ep %p\n", __func__, ep);1253ep->com.rpl_err = status2errno(rpl->status);1254ep->com.rpl_done = 1;1255wake_up(&ep->com.waitq);1256return CPL_RET_BUF_DONE;1257}12581259static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)1260{1261struct cpl_pass_accept_rpl *rpl;1262unsigned int mtu_idx;1263u32 opt0h, opt0l, opt2;1264int wscale;12651266PDBG("%s ep %p\n", __func__, ep);1267BUG_ON(skb_cloned(skb));1268skb_trim(skb, sizeof(*rpl));1269skb_get(skb);1270mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));1271wscale = compute_wscale(rcv_win);1272opt0h = V_NAGLE(0) |1273V_NO_CONG(nocong) |1274V_KEEP_ALIVE(1) |1275F_TCAM_BYPASS |1276V_WND_SCALE(wscale) |1277V_MSS_IDX(mtu_idx) |1278V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);1279opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);1280opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |1281V_CONG_CONTROL_FLAVOR(cong_flavor);12821283rpl = cplhdr(skb);1284rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));1285OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));1286rpl->peer_ip = peer_ip;1287rpl->opt0h = htonl(opt0h);1288rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);1289rpl->opt2 = htonl(opt2);1290rpl->rsvd = rpl->opt2; /* workaround for HW bug */1291skb->priority = CPL_PRIORITY_SETUP;1292iwch_l2t_send(ep->com.tdev, skb, ep->l2t);12931294return;1295}12961297static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,1298struct sk_buff *skb)1299{1300PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,1301peer_ip);1302BUG_ON(skb_cloned(skb));1303skb_trim(skb, sizeof(struct cpl_tid_release));1304skb_get(skb);13051306if (tdev->type != T3A)1307release_tid(tdev, hwtid, skb);1308else {1309struct cpl_pass_accept_rpl *rpl;13101311rpl = cplhdr(skb);1312skb->priority = CPL_PRIORITY_SETUP;1313rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));1314OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,1315hwtid));1316rpl->peer_ip = peer_ip;1317rpl->opt0h = htonl(F_TCAM_BYPASS);1318rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);1319rpl->opt2 = 0;1320rpl->rsvd = rpl->opt2;1321iwch_cxgb3_ofld_send(tdev, skb);1322}1323}13241325static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1326{1327struct iwch_ep *child_ep, *parent_ep = ctx;1328struct cpl_pass_accept_req *req = cplhdr(skb);1329unsigned int hwtid = GET_TID(req);1330struct dst_entry *dst;1331struct l2t_entry *l2t;1332struct rtable *rt;1333struct iff_mac tim;13341335PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);13361337if (state_read(&parent_ep->com) != LISTEN) {1338printk(KERN_ERR "%s - listening ep not in LISTEN\n",1339__func__);1340goto reject;1341}13421343/*1344* Find the netdev for this connection request.1345*/1346tim.mac_addr = req->dst_mac;1347tim.vlan_tag = ntohs(req->vlan_tag);1348if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {1349printk(KERN_ERR "%s bad dst mac %pM\n",1350__func__, req->dst_mac);1351goto reject;1352}13531354/* Find output route */1355rt = find_route(tdev,1356req->local_ip,1357req->peer_ip,1358req->local_port,1359req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));1360if (!rt) {1361printk(KERN_ERR MOD "%s - failed to find dst entry!\n",1362__func__);1363goto reject;1364}1365dst = &rt->dst;1366l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);1367if (!l2t) {1368printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",1369__func__);1370dst_release(dst);1371goto reject;1372}1373child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);1374if (!child_ep) {1375printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",1376__func__);1377l2t_release(L2DATA(tdev), l2t);1378dst_release(dst);1379goto reject;1380}1381state_set(&child_ep->com, CONNECTING);1382child_ep->com.tdev = tdev;1383child_ep->com.cm_id = NULL;1384child_ep->com.local_addr.sin_family = PF_INET;1385child_ep->com.local_addr.sin_port = req->local_port;1386child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;1387child_ep->com.remote_addr.sin_family = PF_INET;1388child_ep->com.remote_addr.sin_port = req->peer_port;1389child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;1390get_ep(&parent_ep->com);1391child_ep->parent_ep = parent_ep;1392child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));1393child_ep->l2t = l2t;1394child_ep->dst = dst;1395child_ep->hwtid = hwtid;1396init_timer(&child_ep->timer);1397cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);1398accept_cr(child_ep, req->peer_ip, skb);1399goto out;1400reject:1401reject_cr(tdev, hwtid, req->peer_ip, skb);1402out:1403return CPL_RET_BUF_DONE;1404}14051406static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1407{1408struct iwch_ep *ep = ctx;1409struct cpl_pass_establish *req = cplhdr(skb);14101411PDBG("%s ep %p\n", __func__, ep);1412ep->snd_seq = ntohl(req->snd_isn);1413ep->rcv_seq = ntohl(req->rcv_isn);14141415set_emss(ep, ntohs(req->tcp_opt));14161417dst_confirm(ep->dst);1418state_set(&ep->com, MPA_REQ_WAIT);1419start_ep_timer(ep);14201421return CPL_RET_BUF_DONE;1422}14231424static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1425{1426struct iwch_ep *ep = ctx;1427struct iwch_qp_attributes attrs;1428unsigned long flags;1429int disconnect = 1;1430int release = 0;14311432PDBG("%s ep %p\n", __func__, ep);1433dst_confirm(ep->dst);14341435spin_lock_irqsave(&ep->com.lock, flags);1436switch (ep->com.state) {1437case MPA_REQ_WAIT:1438__state_set(&ep->com, CLOSING);1439break;1440case MPA_REQ_SENT:1441__state_set(&ep->com, CLOSING);1442connect_reply_upcall(ep, -ECONNRESET);1443break;1444case MPA_REQ_RCVD:14451446/*1447* We're gonna mark this puppy DEAD, but keep1448* the reference on it until the ULP accepts or1449* rejects the CR. Also wake up anyone waiting1450* in rdma connection migration (see iwch_accept_cr()).1451*/1452__state_set(&ep->com, CLOSING);1453ep->com.rpl_done = 1;1454ep->com.rpl_err = -ECONNRESET;1455PDBG("waking up ep %p\n", ep);1456wake_up(&ep->com.waitq);1457break;1458case MPA_REP_SENT:1459__state_set(&ep->com, CLOSING);1460ep->com.rpl_done = 1;1461ep->com.rpl_err = -ECONNRESET;1462PDBG("waking up ep %p\n", ep);1463wake_up(&ep->com.waitq);1464break;1465case FPDU_MODE:1466start_ep_timer(ep);1467__state_set(&ep->com, CLOSING);1468attrs.next_state = IWCH_QP_STATE_CLOSING;1469iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,1470IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);1471peer_close_upcall(ep);1472break;1473case ABORTING:1474disconnect = 0;1475break;1476case CLOSING:1477__state_set(&ep->com, MORIBUND);1478disconnect = 0;1479break;1480case MORIBUND:1481stop_ep_timer(ep);1482if (ep->com.cm_id && ep->com.qp) {1483attrs.next_state = IWCH_QP_STATE_IDLE;1484iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,1485IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);1486}1487close_complete_upcall(ep);1488__state_set(&ep->com, DEAD);1489release = 1;1490disconnect = 0;1491break;1492case DEAD:1493disconnect = 0;1494break;1495default:1496BUG_ON(1);1497}1498spin_unlock_irqrestore(&ep->com.lock, flags);1499if (disconnect)1500iwch_ep_disconnect(ep, 0, GFP_KERNEL);1501if (release)1502release_ep_resources(ep);1503return CPL_RET_BUF_DONE;1504}15051506/*1507* Returns whether an ABORT_REQ_RSS message is a negative advice.1508*/1509static int is_neg_adv_abort(unsigned int status)1510{1511return status == CPL_ERR_RTX_NEG_ADVICE ||1512status == CPL_ERR_PERSIST_NEG_ADVICE;1513}15141515static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1516{1517struct cpl_abort_req_rss *req = cplhdr(skb);1518struct iwch_ep *ep = ctx;1519struct cpl_abort_rpl *rpl;1520struct sk_buff *rpl_skb;1521struct iwch_qp_attributes attrs;1522int ret;1523int release = 0;1524unsigned long flags;15251526if (is_neg_adv_abort(req->status)) {1527PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,1528ep->hwtid);1529t3_l2t_send_event(ep->com.tdev, ep->l2t);1530return CPL_RET_BUF_DONE;1531}15321533/*1534* We get 2 peer aborts from the HW. The first one must1535* be ignored except for scribbling that we need one more.1536*/1537if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {1538return CPL_RET_BUF_DONE;1539}15401541spin_lock_irqsave(&ep->com.lock, flags);1542PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);1543switch (ep->com.state) {1544case CONNECTING:1545break;1546case MPA_REQ_WAIT:1547stop_ep_timer(ep);1548break;1549case MPA_REQ_SENT:1550stop_ep_timer(ep);1551connect_reply_upcall(ep, -ECONNRESET);1552break;1553case MPA_REP_SENT:1554ep->com.rpl_done = 1;1555ep->com.rpl_err = -ECONNRESET;1556PDBG("waking up ep %p\n", ep);1557wake_up(&ep->com.waitq);1558break;1559case MPA_REQ_RCVD:15601561/*1562* We're gonna mark this puppy DEAD, but keep1563* the reference on it until the ULP accepts or1564* rejects the CR. Also wake up anyone waiting1565* in rdma connection migration (see iwch_accept_cr()).1566*/1567ep->com.rpl_done = 1;1568ep->com.rpl_err = -ECONNRESET;1569PDBG("waking up ep %p\n", ep);1570wake_up(&ep->com.waitq);1571break;1572case MORIBUND:1573case CLOSING:1574stop_ep_timer(ep);1575/*FALLTHROUGH*/1576case FPDU_MODE:1577if (ep->com.cm_id && ep->com.qp) {1578attrs.next_state = IWCH_QP_STATE_ERROR;1579ret = iwch_modify_qp(ep->com.qp->rhp,1580ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,1581&attrs, 1);1582if (ret)1583printk(KERN_ERR MOD1584"%s - qp <- error failed!\n",1585__func__);1586}1587peer_abort_upcall(ep);1588break;1589case ABORTING:1590break;1591case DEAD:1592PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);1593spin_unlock_irqrestore(&ep->com.lock, flags);1594return CPL_RET_BUF_DONE;1595default:1596BUG_ON(1);1597break;1598}1599dst_confirm(ep->dst);1600if (ep->com.state != ABORTING) {1601__state_set(&ep->com, DEAD);1602release = 1;1603}1604spin_unlock_irqrestore(&ep->com.lock, flags);16051606rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);1607if (!rpl_skb) {1608printk(KERN_ERR MOD "%s - cannot allocate skb!\n",1609__func__);1610release = 1;1611goto out;1612}1613rpl_skb->priority = CPL_PRIORITY_DATA;1614rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));1615rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));1616rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));1617OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));1618rpl->cmd = CPL_ABORT_NO_RST;1619iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);1620out:1621if (release)1622release_ep_resources(ep);1623return CPL_RET_BUF_DONE;1624}16251626static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1627{1628struct iwch_ep *ep = ctx;1629struct iwch_qp_attributes attrs;1630unsigned long flags;1631int release = 0;16321633PDBG("%s ep %p\n", __func__, ep);1634BUG_ON(!ep);16351636/* The cm_id may be null if we failed to connect */1637spin_lock_irqsave(&ep->com.lock, flags);1638switch (ep->com.state) {1639case CLOSING:1640__state_set(&ep->com, MORIBUND);1641break;1642case MORIBUND:1643stop_ep_timer(ep);1644if ((ep->com.cm_id) && (ep->com.qp)) {1645attrs.next_state = IWCH_QP_STATE_IDLE;1646iwch_modify_qp(ep->com.qp->rhp,1647ep->com.qp,1648IWCH_QP_ATTR_NEXT_STATE,1649&attrs, 1);1650}1651close_complete_upcall(ep);1652__state_set(&ep->com, DEAD);1653release = 1;1654break;1655case ABORTING:1656case DEAD:1657break;1658default:1659BUG_ON(1);1660break;1661}1662spin_unlock_irqrestore(&ep->com.lock, flags);1663if (release)1664release_ep_resources(ep);1665return CPL_RET_BUF_DONE;1666}16671668/*1669* T3A does 3 things when a TERM is received:1670* 1) send up a CPL_RDMA_TERMINATE message with the TERM packet1671* 2) generate an async event on the QP with the TERMINATE opcode1672* 3) post a TERMINATE opcde cqe into the associated CQ.1673*1674* For (1), we save the message in the qp for later consumer consumption.1675* For (2), we move the QP into TERMINATE, post a QP event and disconnect.1676* For (3), we toss the CQE in cxio_poll_cq().1677*1678* terminate() handles case (1)...1679*/1680static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1681{1682struct iwch_ep *ep = ctx;16831684if (state_read(&ep->com) != FPDU_MODE)1685return CPL_RET_BUF_DONE;16861687PDBG("%s ep %p\n", __func__, ep);1688skb_pull(skb, sizeof(struct cpl_rdma_terminate));1689PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);1690skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,1691skb->len);1692ep->com.qp->attr.terminate_msg_len = skb->len;1693ep->com.qp->attr.is_terminate_local = 0;1694return CPL_RET_BUF_DONE;1695}16961697static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)1698{1699struct cpl_rdma_ec_status *rep = cplhdr(skb);1700struct iwch_ep *ep = ctx;17011702PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,1703rep->status);1704if (rep->status) {1705struct iwch_qp_attributes attrs;17061707printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",1708__func__, ep->hwtid);1709stop_ep_timer(ep);1710attrs.next_state = IWCH_QP_STATE_ERROR;1711iwch_modify_qp(ep->com.qp->rhp,1712ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,1713&attrs, 1);1714abort_connection(ep, NULL, GFP_KERNEL);1715}1716return CPL_RET_BUF_DONE;1717}17181719static void ep_timeout(unsigned long arg)1720{1721struct iwch_ep *ep = (struct iwch_ep *)arg;1722struct iwch_qp_attributes attrs;1723unsigned long flags;1724int abort = 1;17251726spin_lock_irqsave(&ep->com.lock, flags);1727PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,1728ep->com.state);1729switch (ep->com.state) {1730case MPA_REQ_SENT:1731__state_set(&ep->com, ABORTING);1732connect_reply_upcall(ep, -ETIMEDOUT);1733break;1734case MPA_REQ_WAIT:1735__state_set(&ep->com, ABORTING);1736break;1737case CLOSING:1738case MORIBUND:1739if (ep->com.cm_id && ep->com.qp) {1740attrs.next_state = IWCH_QP_STATE_ERROR;1741iwch_modify_qp(ep->com.qp->rhp,1742ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,1743&attrs, 1);1744}1745__state_set(&ep->com, ABORTING);1746break;1747default:1748printk(KERN_ERR "%s unexpected state ep %p state %u\n",1749__func__, ep, ep->com.state);1750WARN_ON(1);1751abort = 0;1752}1753spin_unlock_irqrestore(&ep->com.lock, flags);1754if (abort)1755abort_connection(ep, NULL, GFP_ATOMIC);1756put_ep(&ep->com);1757}17581759int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)1760{1761int err;1762struct iwch_ep *ep = to_ep(cm_id);1763PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);17641765if (state_read(&ep->com) == DEAD) {1766put_ep(&ep->com);1767return -ECONNRESET;1768}1769BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);1770if (mpa_rev == 0)1771abort_connection(ep, NULL, GFP_KERNEL);1772else {1773err = send_mpa_reject(ep, pdata, pdata_len);1774err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);1775}1776put_ep(&ep->com);1777return 0;1778}17791780int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)1781{1782int err;1783struct iwch_qp_attributes attrs;1784enum iwch_qp_attr_mask mask;1785struct iwch_ep *ep = to_ep(cm_id);1786struct iwch_dev *h = to_iwch_dev(cm_id->device);1787struct iwch_qp *qp = get_qhp(h, conn_param->qpn);17881789PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);1790if (state_read(&ep->com) == DEAD) {1791err = -ECONNRESET;1792goto err;1793}17941795BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);1796BUG_ON(!qp);17971798if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||1799(conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {1800abort_connection(ep, NULL, GFP_KERNEL);1801err = -EINVAL;1802goto err;1803}18041805cm_id->add_ref(cm_id);1806ep->com.cm_id = cm_id;1807ep->com.qp = qp;18081809ep->ird = conn_param->ird;1810ep->ord = conn_param->ord;18111812if (peer2peer && ep->ird == 0)1813ep->ird = 1;18141815PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);18161817/* bind QP to EP and move to RTS */1818attrs.mpa_attr = ep->mpa_attr;1819attrs.max_ird = ep->ird;1820attrs.max_ord = ep->ord;1821attrs.llp_stream_handle = ep;1822attrs.next_state = IWCH_QP_STATE_RTS;18231824/* bind QP and TID with INIT_WR */1825mask = IWCH_QP_ATTR_NEXT_STATE |1826IWCH_QP_ATTR_LLP_STREAM_HANDLE |1827IWCH_QP_ATTR_MPA_ATTR |1828IWCH_QP_ATTR_MAX_IRD |1829IWCH_QP_ATTR_MAX_ORD;18301831err = iwch_modify_qp(ep->com.qp->rhp,1832ep->com.qp, mask, &attrs, 1);1833if (err)1834goto err1;18351836/* if needed, wait for wr_ack */1837if (iwch_rqes_posted(qp)) {1838wait_event(ep->com.waitq, ep->com.rpl_done);1839err = ep->com.rpl_err;1840if (err)1841goto err1;1842}18431844err = send_mpa_reply(ep, conn_param->private_data,1845conn_param->private_data_len);1846if (err)1847goto err1;184818491850state_set(&ep->com, FPDU_MODE);1851established_upcall(ep);1852put_ep(&ep->com);1853return 0;1854err1:1855ep->com.cm_id = NULL;1856ep->com.qp = NULL;1857cm_id->rem_ref(cm_id);1858err:1859put_ep(&ep->com);1860return err;1861}18621863static int is_loopback_dst(struct iw_cm_id *cm_id)1864{1865struct net_device *dev;18661867dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);1868if (!dev)1869return 0;1870dev_put(dev);1871return 1;1872}18731874int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)1875{1876int err = 0;1877struct iwch_dev *h = to_iwch_dev(cm_id->device);1878struct iwch_ep *ep;1879struct rtable *rt;18801881if (is_loopback_dst(cm_id)) {1882err = -ENOSYS;1883goto out;1884}18851886ep = alloc_ep(sizeof(*ep), GFP_KERNEL);1887if (!ep) {1888printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);1889err = -ENOMEM;1890goto out;1891}1892init_timer(&ep->timer);1893ep->plen = conn_param->private_data_len;1894if (ep->plen)1895memcpy(ep->mpa_pkt + sizeof(struct mpa_message),1896conn_param->private_data, ep->plen);1897ep->ird = conn_param->ird;1898ep->ord = conn_param->ord;18991900if (peer2peer && ep->ord == 0)1901ep->ord = 1;19021903ep->com.tdev = h->rdev.t3cdev_p;19041905cm_id->add_ref(cm_id);1906ep->com.cm_id = cm_id;1907ep->com.qp = get_qhp(h, conn_param->qpn);1908BUG_ON(!ep->com.qp);1909PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,1910ep->com.qp, cm_id);19111912/*1913* Allocate an active TID to initiate a TCP connection.1914*/1915ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);1916if (ep->atid == -1) {1917printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);1918err = -ENOMEM;1919goto fail2;1920}19211922/* find a route */1923rt = find_route(h->rdev.t3cdev_p,1924cm_id->local_addr.sin_addr.s_addr,1925cm_id->remote_addr.sin_addr.s_addr,1926cm_id->local_addr.sin_port,1927cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);1928if (!rt) {1929printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);1930err = -EHOSTUNREACH;1931goto fail3;1932}1933ep->dst = &rt->dst;19341935/* get a l2t entry */1936ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,1937ep->dst->neighbour->dev);1938if (!ep->l2t) {1939printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);1940err = -ENOMEM;1941goto fail4;1942}19431944state_set(&ep->com, CONNECTING);1945ep->tos = IPTOS_LOWDELAY;1946ep->com.local_addr = cm_id->local_addr;1947ep->com.remote_addr = cm_id->remote_addr;19481949/* send connect request to rnic */1950err = send_connect(ep);1951if (!err)1952goto out;19531954l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);1955fail4:1956dst_release(ep->dst);1957fail3:1958cxgb3_free_atid(ep->com.tdev, ep->atid);1959fail2:1960cm_id->rem_ref(cm_id);1961put_ep(&ep->com);1962out:1963return err;1964}19651966int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)1967{1968int err = 0;1969struct iwch_dev *h = to_iwch_dev(cm_id->device);1970struct iwch_listen_ep *ep;197119721973might_sleep();19741975ep = alloc_ep(sizeof(*ep), GFP_KERNEL);1976if (!ep) {1977printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);1978err = -ENOMEM;1979goto fail1;1980}1981PDBG("%s ep %p\n", __func__, ep);1982ep->com.tdev = h->rdev.t3cdev_p;1983cm_id->add_ref(cm_id);1984ep->com.cm_id = cm_id;1985ep->backlog = backlog;1986ep->com.local_addr = cm_id->local_addr;19871988/*1989* Allocate a server TID.1990*/1991ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);1992if (ep->stid == -1) {1993printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);1994err = -ENOMEM;1995goto fail2;1996}19971998state_set(&ep->com, LISTEN);1999err = listen_start(ep);2000if (err)2001goto fail3;20022003/* wait for pass_open_rpl */2004wait_event(ep->com.waitq, ep->com.rpl_done);2005err = ep->com.rpl_err;2006if (!err) {2007cm_id->provider_data = ep;2008goto out;2009}2010fail3:2011cxgb3_free_stid(ep->com.tdev, ep->stid);2012fail2:2013cm_id->rem_ref(cm_id);2014put_ep(&ep->com);2015fail1:2016out:2017return err;2018}20192020int iwch_destroy_listen(struct iw_cm_id *cm_id)2021{2022int err;2023struct iwch_listen_ep *ep = to_listen_ep(cm_id);20242025PDBG("%s ep %p\n", __func__, ep);20262027might_sleep();2028state_set(&ep->com, DEAD);2029ep->com.rpl_done = 0;2030ep->com.rpl_err = 0;2031err = listen_stop(ep);2032if (err)2033goto done;2034wait_event(ep->com.waitq, ep->com.rpl_done);2035cxgb3_free_stid(ep->com.tdev, ep->stid);2036done:2037err = ep->com.rpl_err;2038cm_id->rem_ref(cm_id);2039put_ep(&ep->com);2040return err;2041}20422043int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)2044{2045int ret=0;2046unsigned long flags;2047int close = 0;2048int fatal = 0;2049struct t3cdev *tdev;2050struct cxio_rdev *rdev;20512052spin_lock_irqsave(&ep->com.lock, flags);20532054PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,2055states[ep->com.state], abrupt);20562057tdev = (struct t3cdev *)ep->com.tdev;2058rdev = (struct cxio_rdev *)tdev->ulp;2059if (cxio_fatal_error(rdev)) {2060fatal = 1;2061close_complete_upcall(ep);2062ep->com.state = DEAD;2063}2064switch (ep->com.state) {2065case MPA_REQ_WAIT:2066case MPA_REQ_SENT:2067case MPA_REQ_RCVD:2068case MPA_REP_SENT:2069case FPDU_MODE:2070close = 1;2071if (abrupt)2072ep->com.state = ABORTING;2073else {2074ep->com.state = CLOSING;2075start_ep_timer(ep);2076}2077set_bit(CLOSE_SENT, &ep->com.flags);2078break;2079case CLOSING:2080if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {2081close = 1;2082if (abrupt) {2083stop_ep_timer(ep);2084ep->com.state = ABORTING;2085} else2086ep->com.state = MORIBUND;2087}2088break;2089case MORIBUND:2090case ABORTING:2091case DEAD:2092PDBG("%s ignoring disconnect ep %p state %u\n",2093__func__, ep, ep->com.state);2094break;2095default:2096BUG();2097break;2098}20992100spin_unlock_irqrestore(&ep->com.lock, flags);2101if (close) {2102if (abrupt)2103ret = send_abort(ep, NULL, gfp);2104else2105ret = send_halfclose(ep, gfp);2106if (ret)2107fatal = 1;2108}2109if (fatal)2110release_ep_resources(ep);2111return ret;2112}21132114int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,2115struct l2t_entry *l2t)2116{2117struct iwch_ep *ep = ctx;21182119if (ep->dst != old)2120return 0;21212122PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,2123l2t);2124dst_hold(new);2125l2t_release(L2DATA(ep->com.tdev), ep->l2t);2126ep->l2t = l2t;2127dst_release(old);2128ep->dst = new;2129return 1;2130}21312132/*2133* All the CM events are handled on a work queue to have a safe context.2134* These are the real handlers that are called from the work queue.2135*/2136static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {2137[CPL_ACT_ESTABLISH] = act_establish,2138[CPL_ACT_OPEN_RPL] = act_open_rpl,2139[CPL_RX_DATA] = rx_data,2140[CPL_TX_DMA_ACK] = tx_ack,2141[CPL_ABORT_RPL_RSS] = abort_rpl,2142[CPL_ABORT_RPL] = abort_rpl,2143[CPL_PASS_OPEN_RPL] = pass_open_rpl,2144[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,2145[CPL_PASS_ACCEPT_REQ] = pass_accept_req,2146[CPL_PASS_ESTABLISH] = pass_establish,2147[CPL_PEER_CLOSE] = peer_close,2148[CPL_ABORT_REQ_RSS] = peer_abort,2149[CPL_CLOSE_CON_RPL] = close_con_rpl,2150[CPL_RDMA_TERMINATE] = terminate,2151[CPL_RDMA_EC_STATUS] = ec_status,2152};21532154static void process_work(struct work_struct *work)2155{2156struct sk_buff *skb = NULL;2157void *ep;2158struct t3cdev *tdev;2159int ret;21602161while ((skb = skb_dequeue(&rxq))) {2162ep = *((void **) (skb->cb));2163tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));2164ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);2165if (ret & CPL_RET_BUF_DONE)2166kfree_skb(skb);21672168/*2169* ep was referenced in sched(), and is freed here.2170*/2171put_ep((struct iwch_ep_common *)ep);2172}2173}21742175static DECLARE_WORK(skb_work, process_work);21762177static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)2178{2179struct iwch_ep_common *epc = ctx;21802181get_ep(epc);21822183/*2184* Save ctx and tdev in the skb->cb area.2185*/2186*((void **) skb->cb) = ctx;2187*((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;21882189/*2190* Queue the skb and schedule the worker thread.2191*/2192skb_queue_tail(&rxq, skb);2193queue_work(workq, &skb_work);2194return 0;2195}21962197static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)2198{2199struct cpl_set_tcb_rpl *rpl = cplhdr(skb);22002201if (rpl->status != CPL_ERR_NONE) {2202printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "2203"for tid %u\n", rpl->status, GET_TID(rpl));2204}2205return CPL_RET_BUF_DONE;2206}22072208/*2209* All upcalls from the T3 Core go to sched() to schedule the2210* processing on a work queue.2211*/2212cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {2213[CPL_ACT_ESTABLISH] = sched,2214[CPL_ACT_OPEN_RPL] = sched,2215[CPL_RX_DATA] = sched,2216[CPL_TX_DMA_ACK] = sched,2217[CPL_ABORT_RPL_RSS] = sched,2218[CPL_ABORT_RPL] = sched,2219[CPL_PASS_OPEN_RPL] = sched,2220[CPL_CLOSE_LISTSRV_RPL] = sched,2221[CPL_PASS_ACCEPT_REQ] = sched,2222[CPL_PASS_ESTABLISH] = sched,2223[CPL_PEER_CLOSE] = sched,2224[CPL_CLOSE_CON_RPL] = sched,2225[CPL_ABORT_REQ_RSS] = sched,2226[CPL_RDMA_TERMINATE] = sched,2227[CPL_RDMA_EC_STATUS] = sched,2228[CPL_SET_TCB_RPL] = set_tcb_rpl,2229};22302231int __init iwch_cm_init(void)2232{2233skb_queue_head_init(&rxq);22342235workq = create_singlethread_workqueue("iw_cxgb3");2236if (!workq)2237return -ENOMEM;22382239return 0;2240}22412242void __exit iwch_cm_term(void)2243{2244flush_workqueue(workq);2245destroy_workqueue(workq);2246}224722482249