Path: blob/master/drivers/infiniband/hw/qib/qib_mad.c
15112 views
/*1* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.2* All rights reserved.3* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.4*5* This software is available to you under a choice of one of two6* licenses. You may choose to be licensed under the terms of the GNU7* General Public License (GPL) Version 2, available from the file8* COPYING in the main directory of this source tree, or the9* OpenIB.org BSD license below:10*11* Redistribution and use in source and binary forms, with or12* without modification, are permitted provided that the following13* conditions are met:14*15* - Redistributions of source code must retain the above16* copyright notice, this list of conditions and the following17* disclaimer.18*19* - Redistributions in binary form must reproduce the above20* copyright notice, this list of conditions and the following21* disclaimer in the documentation and/or other materials22* provided with the distribution.23*24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE31* SOFTWARE.32*/3334#include <rdma/ib_smi.h>3536#include "qib.h"37#include "qib_mad.h"3839static int reply(struct ib_smp *smp)40{41/*42* The verbs framework will handle the directed/LID route43* packet changes.44*/45smp->method = IB_MGMT_METHOD_GET_RESP;46if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)47smp->status |= IB_SMP_DIRECTION;48return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;49}5051static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)52{53struct ib_mad_send_buf *send_buf;54struct ib_mad_agent *agent;55struct ib_smp *smp;56int ret;57unsigned long flags;58unsigned long timeout;5960agent = ibp->send_agent;61if (!agent)62return;6364/* o14-3.2.1 */65if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))66return;6768/* o14-2 */69if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))70return;7172send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,73IB_MGMT_MAD_DATA, GFP_ATOMIC);74if (IS_ERR(send_buf))75return;7677smp = send_buf->mad;78smp->base_version = IB_MGMT_BASE_VERSION;79smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;80smp->class_version = 1;81smp->method = IB_MGMT_METHOD_TRAP;82ibp->tid++;83smp->tid = cpu_to_be64(ibp->tid);84smp->attr_id = IB_SMP_ATTR_NOTICE;85/* o14-1: smp->mkey = 0; */86memcpy(smp->data, data, len);8788spin_lock_irqsave(&ibp->lock, flags);89if (!ibp->sm_ah) {90if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {91struct ib_ah *ah;92struct ib_ah_attr attr;9394memset(&attr, 0, sizeof attr);95attr.dlid = ibp->sm_lid;96attr.port_num = ppd_from_ibp(ibp)->port;97ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);98if (IS_ERR(ah))99ret = -EINVAL;100else {101send_buf->ah = ah;102ibp->sm_ah = to_iah(ah);103ret = 0;104}105} else106ret = -EINVAL;107} else {108send_buf->ah = &ibp->sm_ah->ibah;109ret = 0;110}111spin_unlock_irqrestore(&ibp->lock, flags);112113if (!ret)114ret = ib_post_send_mad(send_buf, NULL);115if (!ret) {116/* 4.096 usec. */117timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;118ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);119} else {120ib_free_send_mad(send_buf);121ibp->trap_timeout = 0;122}123}124125/*126* Send a bad [PQ]_Key trap (ch. 14.3.8).127*/128void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,129u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)130{131struct ib_mad_notice_attr data;132133if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)134ibp->pkey_violations++;135else136ibp->qkey_violations++;137ibp->n_pkt_drops++;138139/* Send violation trap */140data.generic_type = IB_NOTICE_TYPE_SECURITY;141data.prod_type_msb = 0;142data.prod_type_lsb = IB_NOTICE_PROD_CA;143data.trap_num = trap_num;144data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);145data.toggle_count = 0;146memset(&data.details, 0, sizeof data.details);147data.details.ntc_257_258.lid1 = lid1;148data.details.ntc_257_258.lid2 = lid2;149data.details.ntc_257_258.key = cpu_to_be32(key);150data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);151data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);152153qib_send_trap(ibp, &data, sizeof data);154}155156/*157* Send a bad M_Key trap (ch. 14.3.9).158*/159static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)160{161struct ib_mad_notice_attr data;162163/* Send violation trap */164data.generic_type = IB_NOTICE_TYPE_SECURITY;165data.prod_type_msb = 0;166data.prod_type_lsb = IB_NOTICE_PROD_CA;167data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;168data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);169data.toggle_count = 0;170memset(&data.details, 0, sizeof data.details);171data.details.ntc_256.lid = data.issuer_lid;172data.details.ntc_256.method = smp->method;173data.details.ntc_256.attr_id = smp->attr_id;174data.details.ntc_256.attr_mod = smp->attr_mod;175data.details.ntc_256.mkey = smp->mkey;176if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {177u8 hop_cnt;178179data.details.ntc_256.dr_slid = smp->dr_slid;180data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;181hop_cnt = smp->hop_cnt;182if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {183data.details.ntc_256.dr_trunc_hop |=184IB_NOTICE_TRAP_DR_TRUNC;185hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);186}187data.details.ntc_256.dr_trunc_hop |= hop_cnt;188memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,189hop_cnt);190}191192qib_send_trap(ibp, &data, sizeof data);193}194195/*196* Send a Port Capability Mask Changed trap (ch. 14.3.11).197*/198void qib_cap_mask_chg(struct qib_ibport *ibp)199{200struct ib_mad_notice_attr data;201202data.generic_type = IB_NOTICE_TYPE_INFO;203data.prod_type_msb = 0;204data.prod_type_lsb = IB_NOTICE_PROD_CA;205data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;206data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);207data.toggle_count = 0;208memset(&data.details, 0, sizeof data.details);209data.details.ntc_144.lid = data.issuer_lid;210data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);211212qib_send_trap(ibp, &data, sizeof data);213}214215/*216* Send a System Image GUID Changed trap (ch. 14.3.12).217*/218void qib_sys_guid_chg(struct qib_ibport *ibp)219{220struct ib_mad_notice_attr data;221222data.generic_type = IB_NOTICE_TYPE_INFO;223data.prod_type_msb = 0;224data.prod_type_lsb = IB_NOTICE_PROD_CA;225data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;226data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);227data.toggle_count = 0;228memset(&data.details, 0, sizeof data.details);229data.details.ntc_145.lid = data.issuer_lid;230data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;231232qib_send_trap(ibp, &data, sizeof data);233}234235/*236* Send a Node Description Changed trap (ch. 14.3.13).237*/238void qib_node_desc_chg(struct qib_ibport *ibp)239{240struct ib_mad_notice_attr data;241242data.generic_type = IB_NOTICE_TYPE_INFO;243data.prod_type_msb = 0;244data.prod_type_lsb = IB_NOTICE_PROD_CA;245data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;246data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);247data.toggle_count = 0;248memset(&data.details, 0, sizeof data.details);249data.details.ntc_144.lid = data.issuer_lid;250data.details.ntc_144.local_changes = 1;251data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;252253qib_send_trap(ibp, &data, sizeof data);254}255256static int subn_get_nodedescription(struct ib_smp *smp,257struct ib_device *ibdev)258{259if (smp->attr_mod)260smp->status |= IB_SMP_INVALID_FIELD;261262memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));263264return reply(smp);265}266267static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,268u8 port)269{270struct ib_node_info *nip = (struct ib_node_info *)&smp->data;271struct qib_devdata *dd = dd_from_ibdev(ibdev);272u32 vendor, majrev, minrev;273unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */274275/* GUID 0 is illegal */276if (smp->attr_mod || pidx >= dd->num_pports ||277dd->pport[pidx].guid == 0)278smp->status |= IB_SMP_INVALID_FIELD;279else280nip->port_guid = dd->pport[pidx].guid;281282nip->base_version = 1;283nip->class_version = 1;284nip->node_type = 1; /* channel adapter */285nip->num_ports = ibdev->phys_port_cnt;286/* This is already in network order */287nip->sys_guid = ib_qib_sys_image_guid;288nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */289nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));290nip->device_id = cpu_to_be16(dd->deviceid);291majrev = dd->majrev;292minrev = dd->minrev;293nip->revision = cpu_to_be32((majrev << 16) | minrev);294nip->local_port_num = port;295vendor = dd->vendorid;296nip->vendor_id[0] = QIB_SRC_OUI_1;297nip->vendor_id[1] = QIB_SRC_OUI_2;298nip->vendor_id[2] = QIB_SRC_OUI_3;299300return reply(smp);301}302303static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,304u8 port)305{306struct qib_devdata *dd = dd_from_ibdev(ibdev);307u32 startgx = 8 * be32_to_cpu(smp->attr_mod);308__be64 *p = (__be64 *) smp->data;309unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */310311/* 32 blocks of 8 64-bit GUIDs per block */312313memset(smp->data, 0, sizeof(smp->data));314315if (startgx == 0 && pidx < dd->num_pports) {316struct qib_pportdata *ppd = dd->pport + pidx;317struct qib_ibport *ibp = &ppd->ibport_data;318__be64 g = ppd->guid;319unsigned i;320321/* GUID 0 is illegal */322if (g == 0)323smp->status |= IB_SMP_INVALID_FIELD;324else {325/* The first is a copy of the read-only HW GUID. */326p[0] = g;327for (i = 1; i < QIB_GUIDS_PER_PORT; i++)328p[i] = ibp->guids[i - 1];329}330} else331smp->status |= IB_SMP_INVALID_FIELD;332333return reply(smp);334}335336static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)337{338(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);339}340341static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)342{343(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);344}345346static int get_overrunthreshold(struct qib_pportdata *ppd)347{348return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);349}350351/**352* set_overrunthreshold - set the overrun threshold353* @ppd: the physical port data354* @n: the new threshold355*356* Note that this will only take effect when the link state changes.357*/358static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)359{360(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,361(u32)n);362return 0;363}364365static int get_phyerrthreshold(struct qib_pportdata *ppd)366{367return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);368}369370/**371* set_phyerrthreshold - set the physical error threshold372* @ppd: the physical port data373* @n: the new threshold374*375* Note that this will only take effect when the link state changes.376*/377static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)378{379(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,380(u32)n);381return 0;382}383384/**385* get_linkdowndefaultstate - get the default linkdown state386* @ppd: the physical port data387*388* Returns zero if the default is POLL, 1 if the default is SLEEP.389*/390static int get_linkdowndefaultstate(struct qib_pportdata *ppd)391{392return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==393IB_LINKINITCMD_SLEEP;394}395396static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)397{398int ret = 0;399400/* Is the mkey in the process of expiring? */401if (ibp->mkey_lease_timeout &&402time_after_eq(jiffies, ibp->mkey_lease_timeout)) {403/* Clear timeout and mkey protection field. */404ibp->mkey_lease_timeout = 0;405ibp->mkeyprot = 0;406}407408/* M_Key checking depends on Portinfo:M_Key_protect_bits */409if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&410ibp->mkey != smp->mkey &&411(smp->method == IB_MGMT_METHOD_SET ||412smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||413(smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {414if (ibp->mkey_violations != 0xFFFF)415++ibp->mkey_violations;416if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)417ibp->mkey_lease_timeout = jiffies +418ibp->mkey_lease_period * HZ;419/* Generate a trap notice. */420qib_bad_mkey(ibp, smp);421ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;422} else if (ibp->mkey_lease_timeout)423ibp->mkey_lease_timeout = 0;424425return ret;426}427428static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,429u8 port)430{431struct qib_devdata *dd;432struct qib_pportdata *ppd;433struct qib_ibport *ibp;434struct ib_port_info *pip = (struct ib_port_info *)smp->data;435u16 lid;436u8 mtu;437int ret;438u32 state;439u32 port_num = be32_to_cpu(smp->attr_mod);440441if (port_num == 0)442port_num = port;443else {444if (port_num > ibdev->phys_port_cnt) {445smp->status |= IB_SMP_INVALID_FIELD;446ret = reply(smp);447goto bail;448}449if (port_num != port) {450ibp = to_iport(ibdev, port_num);451ret = check_mkey(ibp, smp, 0);452if (ret)453goto bail;454}455}456457dd = dd_from_ibdev(ibdev);458/* IB numbers ports from 1, hdw from 0 */459ppd = dd->pport + (port_num - 1);460ibp = &ppd->ibport_data;461462/* Clear all fields. Only set the non-zero fields. */463memset(smp->data, 0, sizeof(smp->data));464465/* Only return the mkey if the protection field allows it. */466if (!(smp->method == IB_MGMT_METHOD_GET &&467ibp->mkey != smp->mkey &&468ibp->mkeyprot == 1))469pip->mkey = ibp->mkey;470pip->gid_prefix = ibp->gid_prefix;471lid = ppd->lid;472pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;473pip->sm_lid = cpu_to_be16(ibp->sm_lid);474pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);475/* pip->diag_code; */476pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);477pip->local_port_num = port;478pip->link_width_enabled = ppd->link_width_enabled;479pip->link_width_supported = ppd->link_width_supported;480pip->link_width_active = ppd->link_width_active;481state = dd->f_iblink_state(ppd->lastibcstat);482pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;483484pip->portphysstate_linkdown =485(dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |486(get_linkdowndefaultstate(ppd) ? 1 : 2);487pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;488pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |489ppd->link_speed_enabled;490switch (ppd->ibmtu) {491default: /* something is wrong; fall through */492case 4096:493mtu = IB_MTU_4096;494break;495case 2048:496mtu = IB_MTU_2048;497break;498case 1024:499mtu = IB_MTU_1024;500break;501case 512:502mtu = IB_MTU_512;503break;504case 256:505mtu = IB_MTU_256;506break;507}508pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;509pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */510pip->vl_high_limit = ibp->vl_high_limit;511pip->vl_arb_high_cap =512dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);513pip->vl_arb_low_cap =514dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);515/* InitTypeReply = 0 */516pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;517/* HCAs ignore VLStallCount and HOQLife */518/* pip->vlstallcnt_hoqlife; */519pip->operationalvl_pei_peo_fpi_fpo =520dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;521pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);522/* P_KeyViolations are counted by hardware. */523pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);524pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);525/* Only the hardware GUID is supported for now */526pip->guid_cap = QIB_GUIDS_PER_PORT;527pip->clientrereg_resv_subnetto = ibp->subnet_timeout;528/* 32.768 usec. response time (guessing) */529pip->resv_resptimevalue = 3;530pip->localphyerrors_overrunerrors =531(get_phyerrthreshold(ppd) << 4) |532get_overrunthreshold(ppd);533/* pip->max_credit_hint; */534if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {535u32 v;536537v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);538pip->link_roundtrip_latency[0] = v >> 16;539pip->link_roundtrip_latency[1] = v >> 8;540pip->link_roundtrip_latency[2] = v;541}542543ret = reply(smp);544545bail:546return ret;547}548549/**550* get_pkeys - return the PKEY table551* @dd: the qlogic_ib device552* @port: the IB port number553* @pkeys: the pkey table is placed here554*/555static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)556{557struct qib_pportdata *ppd = dd->pport + port - 1;558/*559* always a kernel context, no locking needed.560* If we get here with ppd setup, no need to check561* that pd is valid.562*/563struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];564565memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));566567return 0;568}569570static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,571u8 port)572{573u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);574u16 *p = (u16 *) smp->data;575__be16 *q = (__be16 *) smp->data;576577/* 64 blocks of 32 16-bit P_Key entries */578579memset(smp->data, 0, sizeof(smp->data));580if (startpx == 0) {581struct qib_devdata *dd = dd_from_ibdev(ibdev);582unsigned i, n = qib_get_npkeys(dd);583584get_pkeys(dd, port, p);585586for (i = 0; i < n; i++)587q[i] = cpu_to_be16(p[i]);588} else589smp->status |= IB_SMP_INVALID_FIELD;590591return reply(smp);592}593594static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,595u8 port)596{597struct qib_devdata *dd = dd_from_ibdev(ibdev);598u32 startgx = 8 * be32_to_cpu(smp->attr_mod);599__be64 *p = (__be64 *) smp->data;600unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */601602/* 32 blocks of 8 64-bit GUIDs per block */603604if (startgx == 0 && pidx < dd->num_pports) {605struct qib_pportdata *ppd = dd->pport + pidx;606struct qib_ibport *ibp = &ppd->ibport_data;607unsigned i;608609/* The first entry is read-only. */610for (i = 1; i < QIB_GUIDS_PER_PORT; i++)611ibp->guids[i - 1] = p[i];612} else613smp->status |= IB_SMP_INVALID_FIELD;614615/* The only GUID we support is the first read-only entry. */616return subn_get_guidinfo(smp, ibdev, port);617}618619/**620* subn_set_portinfo - set port information621* @smp: the incoming SM packet622* @ibdev: the infiniband device623* @port: the port on the device624*625* Set Portinfo (see ch. 14.2.5.6).626*/627static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,628u8 port)629{630struct ib_port_info *pip = (struct ib_port_info *)smp->data;631struct ib_event event;632struct qib_devdata *dd;633struct qib_pportdata *ppd;634struct qib_ibport *ibp;635char clientrereg = 0;636unsigned long flags;637u16 lid, smlid;638u8 lwe;639u8 lse;640u8 state;641u8 vls;642u8 msl;643u16 lstate;644int ret, ore, mtu;645u32 port_num = be32_to_cpu(smp->attr_mod);646647if (port_num == 0)648port_num = port;649else {650if (port_num > ibdev->phys_port_cnt)651goto err;652/* Port attributes can only be set on the receiving port */653if (port_num != port)654goto get_only;655}656657dd = dd_from_ibdev(ibdev);658/* IB numbers ports from 1, hdw from 0 */659ppd = dd->pport + (port_num - 1);660ibp = &ppd->ibport_data;661event.device = ibdev;662event.element.port_num = port;663664ibp->mkey = pip->mkey;665ibp->gid_prefix = pip->gid_prefix;666ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);667668lid = be16_to_cpu(pip->lid);669/* Must be a valid unicast LID address. */670if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)671smp->status |= IB_SMP_INVALID_FIELD;672else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {673if (ppd->lid != lid)674qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);675if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))676qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);677qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);678event.event = IB_EVENT_LID_CHANGE;679ib_dispatch_event(&event);680}681682smlid = be16_to_cpu(pip->sm_lid);683msl = pip->neighbormtu_mastersmsl & 0xF;684/* Must be a valid unicast LID address. */685if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)686smp->status |= IB_SMP_INVALID_FIELD;687else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {688spin_lock_irqsave(&ibp->lock, flags);689if (ibp->sm_ah) {690if (smlid != ibp->sm_lid)691ibp->sm_ah->attr.dlid = smlid;692if (msl != ibp->sm_sl)693ibp->sm_ah->attr.sl = msl;694}695spin_unlock_irqrestore(&ibp->lock, flags);696if (smlid != ibp->sm_lid)697ibp->sm_lid = smlid;698if (msl != ibp->sm_sl)699ibp->sm_sl = msl;700event.event = IB_EVENT_SM_CHANGE;701ib_dispatch_event(&event);702}703704/* Allow 1x or 4x to be set (see 14.2.6.6). */705lwe = pip->link_width_enabled;706if (lwe) {707if (lwe == 0xFF)708set_link_width_enabled(ppd, ppd->link_width_supported);709else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))710smp->status |= IB_SMP_INVALID_FIELD;711else if (lwe != ppd->link_width_enabled)712set_link_width_enabled(ppd, lwe);713}714715lse = pip->linkspeedactive_enabled & 0xF;716if (lse) {717/*718* The IB 1.2 spec. only allows link speed values719* 1, 3, 5, 7, 15. 1.2.1 extended to allow specific720* speeds.721*/722if (lse == 15)723set_link_speed_enabled(ppd,724ppd->link_speed_supported);725else if (lse >= 8 || (lse & ~ppd->link_speed_supported))726smp->status |= IB_SMP_INVALID_FIELD;727else if (lse != ppd->link_speed_enabled)728set_link_speed_enabled(ppd, lse);729}730731/* Set link down default state. */732switch (pip->portphysstate_linkdown & 0xF) {733case 0: /* NOP */734break;735case 1: /* SLEEP */736(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,737IB_LINKINITCMD_SLEEP);738break;739case 2: /* POLL */740(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,741IB_LINKINITCMD_POLL);742break;743default:744smp->status |= IB_SMP_INVALID_FIELD;745}746747ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;748ibp->vl_high_limit = pip->vl_high_limit;749(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,750ibp->vl_high_limit);751752mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);753if (mtu == -1)754smp->status |= IB_SMP_INVALID_FIELD;755else756qib_set_mtu(ppd, mtu);757758/* Set operational VLs */759vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;760if (vls) {761if (vls > ppd->vls_supported)762smp->status |= IB_SMP_INVALID_FIELD;763else764(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);765}766767if (pip->mkey_violations == 0)768ibp->mkey_violations = 0;769770if (pip->pkey_violations == 0)771ibp->pkey_violations = 0;772773if (pip->qkey_violations == 0)774ibp->qkey_violations = 0;775776ore = pip->localphyerrors_overrunerrors;777if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))778smp->status |= IB_SMP_INVALID_FIELD;779780if (set_overrunthreshold(ppd, (ore & 0xF)))781smp->status |= IB_SMP_INVALID_FIELD;782783ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;784785if (pip->clientrereg_resv_subnetto & 0x80) {786clientrereg = 1;787event.event = IB_EVENT_CLIENT_REREGISTER;788ib_dispatch_event(&event);789}790791/*792* Do the port state change now that the other link parameters793* have been set.794* Changing the port physical state only makes sense if the link795* is down or is being set to down.796*/797state = pip->linkspeed_portstate & 0xF;798lstate = (pip->portphysstate_linkdown >> 4) & 0xF;799if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))800smp->status |= IB_SMP_INVALID_FIELD;801802/*803* Only state changes of DOWN, ARM, and ACTIVE are valid804* and must be in the correct state to take effect (see 7.2.6).805*/806switch (state) {807case IB_PORT_NOP:808if (lstate == 0)809break;810/* FALLTHROUGH */811case IB_PORT_DOWN:812if (lstate == 0)813lstate = QIB_IB_LINKDOWN_ONLY;814else if (lstate == 1)815lstate = QIB_IB_LINKDOWN_SLEEP;816else if (lstate == 2)817lstate = QIB_IB_LINKDOWN;818else if (lstate == 3)819lstate = QIB_IB_LINKDOWN_DISABLE;820else {821smp->status |= IB_SMP_INVALID_FIELD;822break;823}824spin_lock_irqsave(&ppd->lflags_lock, flags);825ppd->lflags &= ~QIBL_LINKV;826spin_unlock_irqrestore(&ppd->lflags_lock, flags);827qib_set_linkstate(ppd, lstate);828/*829* Don't send a reply if the response would be sent830* through the disabled port.831*/832if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {833ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;834goto done;835}836qib_wait_linkstate(ppd, QIBL_LINKV, 10);837break;838case IB_PORT_ARMED:839qib_set_linkstate(ppd, QIB_IB_LINKARM);840break;841case IB_PORT_ACTIVE:842qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);843break;844default:845smp->status |= IB_SMP_INVALID_FIELD;846}847848ret = subn_get_portinfo(smp, ibdev, port);849850if (clientrereg)851pip->clientrereg_resv_subnetto |= 0x80;852853goto get_only;854855err:856smp->status |= IB_SMP_INVALID_FIELD;857get_only:858ret = subn_get_portinfo(smp, ibdev, port);859done:860return ret;861}862863/**864* rm_pkey - decrecment the reference count for the given PKEY865* @dd: the qlogic_ib device866* @key: the PKEY index867*868* Return true if this was the last reference and the hardware table entry869* needs to be changed.870*/871static int rm_pkey(struct qib_pportdata *ppd, u16 key)872{873int i;874int ret;875876for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {877if (ppd->pkeys[i] != key)878continue;879if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {880ppd->pkeys[i] = 0;881ret = 1;882goto bail;883}884break;885}886887ret = 0;888889bail:890return ret;891}892893/**894* add_pkey - add the given PKEY to the hardware table895* @dd: the qlogic_ib device896* @key: the PKEY897*898* Return an error code if unable to add the entry, zero if no change,899* or 1 if the hardware PKEY register needs to be updated.900*/901static int add_pkey(struct qib_pportdata *ppd, u16 key)902{903int i;904u16 lkey = key & 0x7FFF;905int any = 0;906int ret;907908if (lkey == 0x7FFF) {909ret = 0;910goto bail;911}912913/* Look for an empty slot or a matching PKEY. */914for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {915if (!ppd->pkeys[i]) {916any++;917continue;918}919/* If it matches exactly, try to increment the ref count */920if (ppd->pkeys[i] == key) {921if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {922ret = 0;923goto bail;924}925/* Lost the race. Look for an empty slot below. */926atomic_dec(&ppd->pkeyrefs[i]);927any++;928}929/*930* It makes no sense to have both the limited and unlimited931* PKEY set at the same time since the unlimited one will932* disable the limited one.933*/934if ((ppd->pkeys[i] & 0x7FFF) == lkey) {935ret = -EEXIST;936goto bail;937}938}939if (!any) {940ret = -EBUSY;941goto bail;942}943for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {944if (!ppd->pkeys[i] &&945atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {946/* for qibstats, etc. */947ppd->pkeys[i] = key;948ret = 1;949goto bail;950}951}952ret = -EBUSY;953954bail:955return ret;956}957958/**959* set_pkeys - set the PKEY table for ctxt 0960* @dd: the qlogic_ib device961* @port: the IB port number962* @pkeys: the PKEY table963*/964static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)965{966struct qib_pportdata *ppd;967struct qib_ctxtdata *rcd;968int i;969int changed = 0;970971/*972* IB port one/two always maps to context zero/one,973* always a kernel context, no locking needed974* If we get here with ppd setup, no need to check975* that rcd is valid.976*/977ppd = dd->pport + (port - 1);978rcd = dd->rcd[ppd->hw_pidx];979980for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {981u16 key = pkeys[i];982u16 okey = rcd->pkeys[i];983984if (key == okey)985continue;986/*987* The value of this PKEY table entry is changing.988* Remove the old entry in the hardware's array of PKEYs.989*/990if (okey & 0x7FFF)991changed |= rm_pkey(ppd, okey);992if (key & 0x7FFF) {993int ret = add_pkey(ppd, key);994995if (ret < 0)996key = 0;997else998changed |= ret;999}1000rcd->pkeys[i] = key;1001}1002if (changed) {1003struct ib_event event;10041005(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);10061007event.event = IB_EVENT_PKEY_CHANGE;1008event.device = &dd->verbs_dev.ibdev;1009event.element.port_num = 1;1010ib_dispatch_event(&event);1011}1012return 0;1013}10141015static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,1016u8 port)1017{1018u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);1019__be16 *p = (__be16 *) smp->data;1020u16 *q = (u16 *) smp->data;1021struct qib_devdata *dd = dd_from_ibdev(ibdev);1022unsigned i, n = qib_get_npkeys(dd);10231024for (i = 0; i < n; i++)1025q[i] = be16_to_cpu(p[i]);10261027if (startpx != 0 || set_pkeys(dd, port, q) != 0)1028smp->status |= IB_SMP_INVALID_FIELD;10291030return subn_get_pkeytable(smp, ibdev, port);1031}10321033static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,1034u8 port)1035{1036struct qib_ibport *ibp = to_iport(ibdev, port);1037u8 *p = (u8 *) smp->data;1038unsigned i;10391040memset(smp->data, 0, sizeof(smp->data));10411042if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))1043smp->status |= IB_SMP_UNSUP_METHOD;1044else1045for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)1046*p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];10471048return reply(smp);1049}10501051static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,1052u8 port)1053{1054struct qib_ibport *ibp = to_iport(ibdev, port);1055u8 *p = (u8 *) smp->data;1056unsigned i;10571058if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {1059smp->status |= IB_SMP_UNSUP_METHOD;1060return reply(smp);1061}10621063for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {1064ibp->sl_to_vl[i] = *p >> 4;1065ibp->sl_to_vl[i + 1] = *p & 0xF;1066}1067qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),1068_QIB_EVENT_SL2VL_CHANGE_BIT);10691070return subn_get_sl_to_vl(smp, ibdev, port);1071}10721073static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,1074u8 port)1075{1076unsigned which = be32_to_cpu(smp->attr_mod) >> 16;1077struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));10781079memset(smp->data, 0, sizeof(smp->data));10801081if (ppd->vls_supported == IB_VL_VL0)1082smp->status |= IB_SMP_UNSUP_METHOD;1083else if (which == IB_VLARB_LOWPRI_0_31)1084(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,1085smp->data);1086else if (which == IB_VLARB_HIGHPRI_0_31)1087(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,1088smp->data);1089else1090smp->status |= IB_SMP_INVALID_FIELD;10911092return reply(smp);1093}10941095static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,1096u8 port)1097{1098unsigned which = be32_to_cpu(smp->attr_mod) >> 16;1099struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));11001101if (ppd->vls_supported == IB_VL_VL0)1102smp->status |= IB_SMP_UNSUP_METHOD;1103else if (which == IB_VLARB_LOWPRI_0_31)1104(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,1105smp->data);1106else if (which == IB_VLARB_HIGHPRI_0_31)1107(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,1108smp->data);1109else1110smp->status |= IB_SMP_INVALID_FIELD;11111112return subn_get_vl_arb(smp, ibdev, port);1113}11141115static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,1116u8 port)1117{1118/*1119* For now, we only send the trap once so no need to process this.1120* o13-6, o13-7,1121* o14-3.a4 The SMA shall not send any message in response to a valid1122* SubnTrapRepress() message.1123*/1124return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;1125}11261127static int pma_get_classportinfo(struct ib_perf *pmp,1128struct ib_device *ibdev)1129{1130struct ib_pma_classportinfo *p =1131(struct ib_pma_classportinfo *)pmp->data;1132struct qib_devdata *dd = dd_from_ibdev(ibdev);11331134memset(pmp->data, 0, sizeof(pmp->data));11351136if (pmp->attr_mod != 0)1137pmp->status |= IB_SMP_INVALID_FIELD;11381139/* Note that AllPortSelect is not valid */1140p->base_version = 1;1141p->class_version = 1;1142p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;1143/*1144* Set the most significant bit of CM2 to indicate support for1145* congestion statistics1146*/1147p->reserved[0] = dd->psxmitwait_supported << 7;1148/*1149* Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.1150*/1151p->resp_time_value = 18;11521153return reply((struct ib_smp *) pmp);1154}11551156static int pma_get_portsamplescontrol(struct ib_perf *pmp,1157struct ib_device *ibdev, u8 port)1158{1159struct ib_pma_portsamplescontrol *p =1160(struct ib_pma_portsamplescontrol *)pmp->data;1161struct qib_ibdev *dev = to_idev(ibdev);1162struct qib_devdata *dd = dd_from_dev(dev);1163struct qib_ibport *ibp = to_iport(ibdev, port);1164struct qib_pportdata *ppd = ppd_from_ibp(ibp);1165unsigned long flags;1166u8 port_select = p->port_select;11671168memset(pmp->data, 0, sizeof(pmp->data));11691170p->port_select = port_select;1171if (pmp->attr_mod != 0 || port_select != port) {1172pmp->status |= IB_SMP_INVALID_FIELD;1173goto bail;1174}1175spin_lock_irqsave(&ibp->lock, flags);1176p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);1177p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);1178p->counter_width = 4; /* 32 bit counters */1179p->counter_mask0_9 = COUNTER_MASK0_9;1180p->sample_start = cpu_to_be32(ibp->pma_sample_start);1181p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);1182p->tag = cpu_to_be16(ibp->pma_tag);1183p->counter_select[0] = ibp->pma_counter_select[0];1184p->counter_select[1] = ibp->pma_counter_select[1];1185p->counter_select[2] = ibp->pma_counter_select[2];1186p->counter_select[3] = ibp->pma_counter_select[3];1187p->counter_select[4] = ibp->pma_counter_select[4];1188spin_unlock_irqrestore(&ibp->lock, flags);11891190bail:1191return reply((struct ib_smp *) pmp);1192}11931194static int pma_set_portsamplescontrol(struct ib_perf *pmp,1195struct ib_device *ibdev, u8 port)1196{1197struct ib_pma_portsamplescontrol *p =1198(struct ib_pma_portsamplescontrol *)pmp->data;1199struct qib_ibdev *dev = to_idev(ibdev);1200struct qib_devdata *dd = dd_from_dev(dev);1201struct qib_ibport *ibp = to_iport(ibdev, port);1202struct qib_pportdata *ppd = ppd_from_ibp(ibp);1203unsigned long flags;1204u8 status, xmit_flags;1205int ret;12061207if (pmp->attr_mod != 0 || p->port_select != port) {1208pmp->status |= IB_SMP_INVALID_FIELD;1209ret = reply((struct ib_smp *) pmp);1210goto bail;1211}12121213spin_lock_irqsave(&ibp->lock, flags);12141215/* Port Sampling code owns the PS* HW counters */1216xmit_flags = ppd->cong_stats.flags;1217ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;1218status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);1219if (status == IB_PMA_SAMPLE_STATUS_DONE ||1220(status == IB_PMA_SAMPLE_STATUS_RUNNING &&1221xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {1222ibp->pma_sample_start = be32_to_cpu(p->sample_start);1223ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);1224ibp->pma_tag = be16_to_cpu(p->tag);1225ibp->pma_counter_select[0] = p->counter_select[0];1226ibp->pma_counter_select[1] = p->counter_select[1];1227ibp->pma_counter_select[2] = p->counter_select[2];1228ibp->pma_counter_select[3] = p->counter_select[3];1229ibp->pma_counter_select[4] = p->counter_select[4];1230dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,1231ibp->pma_sample_start);1232}1233spin_unlock_irqrestore(&ibp->lock, flags);12341235ret = pma_get_portsamplescontrol(pmp, ibdev, port);12361237bail:1238return ret;1239}12401241static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,1242__be16 sel)1243{1244u64 ret;12451246switch (sel) {1247case IB_PMA_PORT_XMIT_DATA:1248ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);1249break;1250case IB_PMA_PORT_RCV_DATA:1251ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);1252break;1253case IB_PMA_PORT_XMIT_PKTS:1254ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);1255break;1256case IB_PMA_PORT_RCV_PKTS:1257ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);1258break;1259case IB_PMA_PORT_XMIT_WAIT:1260ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);1261break;1262default:1263ret = 0;1264}12651266return ret;1267}12681269/* This function assumes that the xmit_wait lock is already held */1270static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)1271{1272u32 delta;12731274delta = get_counter(&ppd->ibport_data, ppd,1275IB_PMA_PORT_XMIT_WAIT);1276return ppd->cong_stats.counter + delta;1277}12781279static void cache_hw_sample_counters(struct qib_pportdata *ppd)1280{1281struct qib_ibport *ibp = &ppd->ibport_data;12821283ppd->cong_stats.counter_cache.psxmitdata =1284get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);1285ppd->cong_stats.counter_cache.psrcvdata =1286get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);1287ppd->cong_stats.counter_cache.psxmitpkts =1288get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);1289ppd->cong_stats.counter_cache.psrcvpkts =1290get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);1291ppd->cong_stats.counter_cache.psxmitwait =1292get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);1293}12941295static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,1296__be16 sel)1297{1298u64 ret;12991300switch (sel) {1301case IB_PMA_PORT_XMIT_DATA:1302ret = ppd->cong_stats.counter_cache.psxmitdata;1303break;1304case IB_PMA_PORT_RCV_DATA:1305ret = ppd->cong_stats.counter_cache.psrcvdata;1306break;1307case IB_PMA_PORT_XMIT_PKTS:1308ret = ppd->cong_stats.counter_cache.psxmitpkts;1309break;1310case IB_PMA_PORT_RCV_PKTS:1311ret = ppd->cong_stats.counter_cache.psrcvpkts;1312break;1313case IB_PMA_PORT_XMIT_WAIT:1314ret = ppd->cong_stats.counter_cache.psxmitwait;1315break;1316default:1317ret = 0;1318}13191320return ret;1321}13221323static int pma_get_portsamplesresult(struct ib_perf *pmp,1324struct ib_device *ibdev, u8 port)1325{1326struct ib_pma_portsamplesresult *p =1327(struct ib_pma_portsamplesresult *)pmp->data;1328struct qib_ibdev *dev = to_idev(ibdev);1329struct qib_devdata *dd = dd_from_dev(dev);1330struct qib_ibport *ibp = to_iport(ibdev, port);1331struct qib_pportdata *ppd = ppd_from_ibp(ibp);1332unsigned long flags;1333u8 status;1334int i;13351336memset(pmp->data, 0, sizeof(pmp->data));1337spin_lock_irqsave(&ibp->lock, flags);1338p->tag = cpu_to_be16(ibp->pma_tag);1339if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)1340p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;1341else {1342status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);1343p->sample_status = cpu_to_be16(status);1344if (status == IB_PMA_SAMPLE_STATUS_DONE) {1345cache_hw_sample_counters(ppd);1346ppd->cong_stats.counter =1347xmit_wait_get_value_delta(ppd);1348dd->f_set_cntr_sample(ppd,1349QIB_CONG_TIMER_PSINTERVAL, 0);1350ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;1351}1352}1353for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)1354p->counter[i] = cpu_to_be32(1355get_cache_hw_sample_counters(1356ppd, ibp->pma_counter_select[i]));1357spin_unlock_irqrestore(&ibp->lock, flags);13581359return reply((struct ib_smp *) pmp);1360}13611362static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,1363struct ib_device *ibdev, u8 port)1364{1365struct ib_pma_portsamplesresult_ext *p =1366(struct ib_pma_portsamplesresult_ext *)pmp->data;1367struct qib_ibdev *dev = to_idev(ibdev);1368struct qib_devdata *dd = dd_from_dev(dev);1369struct qib_ibport *ibp = to_iport(ibdev, port);1370struct qib_pportdata *ppd = ppd_from_ibp(ibp);1371unsigned long flags;1372u8 status;1373int i;13741375/* Port Sampling code owns the PS* HW counters */1376memset(pmp->data, 0, sizeof(pmp->data));1377spin_lock_irqsave(&ibp->lock, flags);1378p->tag = cpu_to_be16(ibp->pma_tag);1379if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)1380p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;1381else {1382status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);1383p->sample_status = cpu_to_be16(status);1384/* 64 bits */1385p->extended_width = cpu_to_be32(0x80000000);1386if (status == IB_PMA_SAMPLE_STATUS_DONE) {1387cache_hw_sample_counters(ppd);1388ppd->cong_stats.counter =1389xmit_wait_get_value_delta(ppd);1390dd->f_set_cntr_sample(ppd,1391QIB_CONG_TIMER_PSINTERVAL, 0);1392ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;1393}1394}1395for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)1396p->counter[i] = cpu_to_be64(1397get_cache_hw_sample_counters(1398ppd, ibp->pma_counter_select[i]));1399spin_unlock_irqrestore(&ibp->lock, flags);14001401return reply((struct ib_smp *) pmp);1402}14031404static int pma_get_portcounters(struct ib_perf *pmp,1405struct ib_device *ibdev, u8 port)1406{1407struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)1408pmp->data;1409struct qib_ibport *ibp = to_iport(ibdev, port);1410struct qib_pportdata *ppd = ppd_from_ibp(ibp);1411struct qib_verbs_counters cntrs;1412u8 port_select = p->port_select;14131414qib_get_counters(ppd, &cntrs);14151416/* Adjust counters for any resets done. */1417cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;1418cntrs.link_error_recovery_counter -=1419ibp->z_link_error_recovery_counter;1420cntrs.link_downed_counter -= ibp->z_link_downed_counter;1421cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;1422cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;1423cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;1424cntrs.port_xmit_data -= ibp->z_port_xmit_data;1425cntrs.port_rcv_data -= ibp->z_port_rcv_data;1426cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;1427cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;1428cntrs.local_link_integrity_errors -=1429ibp->z_local_link_integrity_errors;1430cntrs.excessive_buffer_overrun_errors -=1431ibp->z_excessive_buffer_overrun_errors;1432cntrs.vl15_dropped -= ibp->z_vl15_dropped;1433cntrs.vl15_dropped += ibp->n_vl15_dropped;14341435memset(pmp->data, 0, sizeof(pmp->data));14361437p->port_select = port_select;1438if (pmp->attr_mod != 0 || port_select != port)1439pmp->status |= IB_SMP_INVALID_FIELD;14401441if (cntrs.symbol_error_counter > 0xFFFFUL)1442p->symbol_error_counter = cpu_to_be16(0xFFFF);1443else1444p->symbol_error_counter =1445cpu_to_be16((u16)cntrs.symbol_error_counter);1446if (cntrs.link_error_recovery_counter > 0xFFUL)1447p->link_error_recovery_counter = 0xFF;1448else1449p->link_error_recovery_counter =1450(u8)cntrs.link_error_recovery_counter;1451if (cntrs.link_downed_counter > 0xFFUL)1452p->link_downed_counter = 0xFF;1453else1454p->link_downed_counter = (u8)cntrs.link_downed_counter;1455if (cntrs.port_rcv_errors > 0xFFFFUL)1456p->port_rcv_errors = cpu_to_be16(0xFFFF);1457else1458p->port_rcv_errors =1459cpu_to_be16((u16) cntrs.port_rcv_errors);1460if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)1461p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);1462else1463p->port_rcv_remphys_errors =1464cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);1465if (cntrs.port_xmit_discards > 0xFFFFUL)1466p->port_xmit_discards = cpu_to_be16(0xFFFF);1467else1468p->port_xmit_discards =1469cpu_to_be16((u16)cntrs.port_xmit_discards);1470if (cntrs.local_link_integrity_errors > 0xFUL)1471cntrs.local_link_integrity_errors = 0xFUL;1472if (cntrs.excessive_buffer_overrun_errors > 0xFUL)1473cntrs.excessive_buffer_overrun_errors = 0xFUL;1474p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |1475cntrs.excessive_buffer_overrun_errors;1476if (cntrs.vl15_dropped > 0xFFFFUL)1477p->vl15_dropped = cpu_to_be16(0xFFFF);1478else1479p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);1480if (cntrs.port_xmit_data > 0xFFFFFFFFUL)1481p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);1482else1483p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);1484if (cntrs.port_rcv_data > 0xFFFFFFFFUL)1485p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);1486else1487p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);1488if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)1489p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);1490else1491p->port_xmit_packets =1492cpu_to_be32((u32)cntrs.port_xmit_packets);1493if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)1494p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);1495else1496p->port_rcv_packets =1497cpu_to_be32((u32) cntrs.port_rcv_packets);14981499return reply((struct ib_smp *) pmp);1500}15011502static int pma_get_portcounters_cong(struct ib_perf *pmp,1503struct ib_device *ibdev, u8 port)1504{1505/* Congestion PMA packets start at offset 24 not 64 */1506struct ib_pma_portcounters_cong *p =1507(struct ib_pma_portcounters_cong *)pmp->reserved;1508struct qib_verbs_counters cntrs;1509struct qib_ibport *ibp = to_iport(ibdev, port);1510struct qib_pportdata *ppd = ppd_from_ibp(ibp);1511struct qib_devdata *dd = dd_from_ppd(ppd);1512u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;1513u64 xmit_wait_counter;1514unsigned long flags;15151516/*1517* This check is performed only in the GET method because the1518* SET method ends up calling this anyway.1519*/1520if (!dd->psxmitwait_supported)1521pmp->status |= IB_SMP_UNSUP_METH_ATTR;1522if (port_select != port)1523pmp->status |= IB_SMP_INVALID_FIELD;15241525qib_get_counters(ppd, &cntrs);1526spin_lock_irqsave(&ppd->ibport_data.lock, flags);1527xmit_wait_counter = xmit_wait_get_value_delta(ppd);1528spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);15291530/* Adjust counters for any resets done. */1531cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;1532cntrs.link_error_recovery_counter -=1533ibp->z_link_error_recovery_counter;1534cntrs.link_downed_counter -= ibp->z_link_downed_counter;1535cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;1536cntrs.port_rcv_remphys_errors -=1537ibp->z_port_rcv_remphys_errors;1538cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;1539cntrs.local_link_integrity_errors -=1540ibp->z_local_link_integrity_errors;1541cntrs.excessive_buffer_overrun_errors -=1542ibp->z_excessive_buffer_overrun_errors;1543cntrs.vl15_dropped -= ibp->z_vl15_dropped;1544cntrs.vl15_dropped += ibp->n_vl15_dropped;1545cntrs.port_xmit_data -= ibp->z_port_xmit_data;1546cntrs.port_rcv_data -= ibp->z_port_rcv_data;1547cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;1548cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;15491550memset(pmp->reserved, 0, sizeof(pmp->reserved) +1551sizeof(pmp->data));15521553/*1554* Set top 3 bits to indicate interval in picoseconds in1555* remaining bits.1556*/1557p->port_check_rate =1558cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |1559(dd->psxmitwait_check_rate &1560~(QIB_XMIT_RATE_PICO << 13)));1561p->port_adr_events = cpu_to_be64(0);1562p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);1563p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);1564p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);1565p->port_xmit_packets =1566cpu_to_be64(cntrs.port_xmit_packets);1567p->port_rcv_packets =1568cpu_to_be64(cntrs.port_rcv_packets);1569if (cntrs.symbol_error_counter > 0xFFFFUL)1570p->symbol_error_counter = cpu_to_be16(0xFFFF);1571else1572p->symbol_error_counter =1573cpu_to_be16(1574(u16)cntrs.symbol_error_counter);1575if (cntrs.link_error_recovery_counter > 0xFFUL)1576p->link_error_recovery_counter = 0xFF;1577else1578p->link_error_recovery_counter =1579(u8)cntrs.link_error_recovery_counter;1580if (cntrs.link_downed_counter > 0xFFUL)1581p->link_downed_counter = 0xFF;1582else1583p->link_downed_counter =1584(u8)cntrs.link_downed_counter;1585if (cntrs.port_rcv_errors > 0xFFFFUL)1586p->port_rcv_errors = cpu_to_be16(0xFFFF);1587else1588p->port_rcv_errors =1589cpu_to_be16((u16) cntrs.port_rcv_errors);1590if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)1591p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);1592else1593p->port_rcv_remphys_errors =1594cpu_to_be16(1595(u16)cntrs.port_rcv_remphys_errors);1596if (cntrs.port_xmit_discards > 0xFFFFUL)1597p->port_xmit_discards = cpu_to_be16(0xFFFF);1598else1599p->port_xmit_discards =1600cpu_to_be16((u16)cntrs.port_xmit_discards);1601if (cntrs.local_link_integrity_errors > 0xFUL)1602cntrs.local_link_integrity_errors = 0xFUL;1603if (cntrs.excessive_buffer_overrun_errors > 0xFUL)1604cntrs.excessive_buffer_overrun_errors = 0xFUL;1605p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |1606cntrs.excessive_buffer_overrun_errors;1607if (cntrs.vl15_dropped > 0xFFFFUL)1608p->vl15_dropped = cpu_to_be16(0xFFFF);1609else1610p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);16111612return reply((struct ib_smp *)pmp);1613}16141615static int pma_get_portcounters_ext(struct ib_perf *pmp,1616struct ib_device *ibdev, u8 port)1617{1618struct ib_pma_portcounters_ext *p =1619(struct ib_pma_portcounters_ext *)pmp->data;1620struct qib_ibport *ibp = to_iport(ibdev, port);1621struct qib_pportdata *ppd = ppd_from_ibp(ibp);1622u64 swords, rwords, spkts, rpkts, xwait;1623u8 port_select = p->port_select;16241625memset(pmp->data, 0, sizeof(pmp->data));16261627p->port_select = port_select;1628if (pmp->attr_mod != 0 || port_select != port) {1629pmp->status |= IB_SMP_INVALID_FIELD;1630goto bail;1631}16321633qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);16341635/* Adjust counters for any resets done. */1636swords -= ibp->z_port_xmit_data;1637rwords -= ibp->z_port_rcv_data;1638spkts -= ibp->z_port_xmit_packets;1639rpkts -= ibp->z_port_rcv_packets;16401641p->port_xmit_data = cpu_to_be64(swords);1642p->port_rcv_data = cpu_to_be64(rwords);1643p->port_xmit_packets = cpu_to_be64(spkts);1644p->port_rcv_packets = cpu_to_be64(rpkts);1645p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);1646p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);1647p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);1648p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);16491650bail:1651return reply((struct ib_smp *) pmp);1652}16531654static int pma_set_portcounters(struct ib_perf *pmp,1655struct ib_device *ibdev, u8 port)1656{1657struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)1658pmp->data;1659struct qib_ibport *ibp = to_iport(ibdev, port);1660struct qib_pportdata *ppd = ppd_from_ibp(ibp);1661struct qib_verbs_counters cntrs;16621663/*1664* Since the HW doesn't support clearing counters, we save the1665* current count and subtract it from future responses.1666*/1667qib_get_counters(ppd, &cntrs);16681669if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)1670ibp->z_symbol_error_counter = cntrs.symbol_error_counter;16711672if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)1673ibp->z_link_error_recovery_counter =1674cntrs.link_error_recovery_counter;16751676if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)1677ibp->z_link_downed_counter = cntrs.link_downed_counter;16781679if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)1680ibp->z_port_rcv_errors = cntrs.port_rcv_errors;16811682if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)1683ibp->z_port_rcv_remphys_errors =1684cntrs.port_rcv_remphys_errors;16851686if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)1687ibp->z_port_xmit_discards = cntrs.port_xmit_discards;16881689if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)1690ibp->z_local_link_integrity_errors =1691cntrs.local_link_integrity_errors;16921693if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)1694ibp->z_excessive_buffer_overrun_errors =1695cntrs.excessive_buffer_overrun_errors;16961697if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {1698ibp->n_vl15_dropped = 0;1699ibp->z_vl15_dropped = cntrs.vl15_dropped;1700}17011702if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)1703ibp->z_port_xmit_data = cntrs.port_xmit_data;17041705if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)1706ibp->z_port_rcv_data = cntrs.port_rcv_data;17071708if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)1709ibp->z_port_xmit_packets = cntrs.port_xmit_packets;17101711if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)1712ibp->z_port_rcv_packets = cntrs.port_rcv_packets;17131714return pma_get_portcounters(pmp, ibdev, port);1715}17161717static int pma_set_portcounters_cong(struct ib_perf *pmp,1718struct ib_device *ibdev, u8 port)1719{1720struct qib_ibport *ibp = to_iport(ibdev, port);1721struct qib_pportdata *ppd = ppd_from_ibp(ibp);1722struct qib_devdata *dd = dd_from_ppd(ppd);1723struct qib_verbs_counters cntrs;1724u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;1725int ret = 0;1726unsigned long flags;17271728qib_get_counters(ppd, &cntrs);1729/* Get counter values before we save them */1730ret = pma_get_portcounters_cong(pmp, ibdev, port);17311732if (counter_select & IB_PMA_SEL_CONG_XMIT) {1733spin_lock_irqsave(&ppd->ibport_data.lock, flags);1734ppd->cong_stats.counter = 0;1735dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,17360x0);1737spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);1738}1739if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {1740ibp->z_port_xmit_data = cntrs.port_xmit_data;1741ibp->z_port_rcv_data = cntrs.port_rcv_data;1742ibp->z_port_xmit_packets = cntrs.port_xmit_packets;1743ibp->z_port_rcv_packets = cntrs.port_rcv_packets;1744}1745if (counter_select & IB_PMA_SEL_CONG_ALL) {1746ibp->z_symbol_error_counter =1747cntrs.symbol_error_counter;1748ibp->z_link_error_recovery_counter =1749cntrs.link_error_recovery_counter;1750ibp->z_link_downed_counter =1751cntrs.link_downed_counter;1752ibp->z_port_rcv_errors = cntrs.port_rcv_errors;1753ibp->z_port_rcv_remphys_errors =1754cntrs.port_rcv_remphys_errors;1755ibp->z_port_xmit_discards =1756cntrs.port_xmit_discards;1757ibp->z_local_link_integrity_errors =1758cntrs.local_link_integrity_errors;1759ibp->z_excessive_buffer_overrun_errors =1760cntrs.excessive_buffer_overrun_errors;1761ibp->n_vl15_dropped = 0;1762ibp->z_vl15_dropped = cntrs.vl15_dropped;1763}17641765return ret;1766}17671768static int pma_set_portcounters_ext(struct ib_perf *pmp,1769struct ib_device *ibdev, u8 port)1770{1771struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)1772pmp->data;1773struct qib_ibport *ibp = to_iport(ibdev, port);1774struct qib_pportdata *ppd = ppd_from_ibp(ibp);1775u64 swords, rwords, spkts, rpkts, xwait;17761777qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);17781779if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)1780ibp->z_port_xmit_data = swords;17811782if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)1783ibp->z_port_rcv_data = rwords;17841785if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)1786ibp->z_port_xmit_packets = spkts;17871788if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)1789ibp->z_port_rcv_packets = rpkts;17901791if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)1792ibp->n_unicast_xmit = 0;17931794if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)1795ibp->n_unicast_rcv = 0;17961797if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)1798ibp->n_multicast_xmit = 0;17991800if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)1801ibp->n_multicast_rcv = 0;18021803return pma_get_portcounters_ext(pmp, ibdev, port);1804}18051806static int process_subn(struct ib_device *ibdev, int mad_flags,1807u8 port, struct ib_mad *in_mad,1808struct ib_mad *out_mad)1809{1810struct ib_smp *smp = (struct ib_smp *)out_mad;1811struct qib_ibport *ibp = to_iport(ibdev, port);1812struct qib_pportdata *ppd = ppd_from_ibp(ibp);1813int ret;18141815*out_mad = *in_mad;1816if (smp->class_version != 1) {1817smp->status |= IB_SMP_UNSUP_VERSION;1818ret = reply(smp);1819goto bail;1820}18211822ret = check_mkey(ibp, smp, mad_flags);1823if (ret) {1824u32 port_num = be32_to_cpu(smp->attr_mod);18251826/*1827* If this is a get/set portinfo, we already check the1828* M_Key if the MAD is for another port and the M_Key1829* is OK on the receiving port. This check is needed1830* to increment the error counters when the M_Key1831* fails to match on *both* ports.1832*/1833if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&1834(smp->method == IB_MGMT_METHOD_GET ||1835smp->method == IB_MGMT_METHOD_SET) &&1836port_num && port_num <= ibdev->phys_port_cnt &&1837port != port_num)1838(void) check_mkey(to_iport(ibdev, port_num), smp, 0);1839goto bail;1840}18411842switch (smp->method) {1843case IB_MGMT_METHOD_GET:1844switch (smp->attr_id) {1845case IB_SMP_ATTR_NODE_DESC:1846ret = subn_get_nodedescription(smp, ibdev);1847goto bail;1848case IB_SMP_ATTR_NODE_INFO:1849ret = subn_get_nodeinfo(smp, ibdev, port);1850goto bail;1851case IB_SMP_ATTR_GUID_INFO:1852ret = subn_get_guidinfo(smp, ibdev, port);1853goto bail;1854case IB_SMP_ATTR_PORT_INFO:1855ret = subn_get_portinfo(smp, ibdev, port);1856goto bail;1857case IB_SMP_ATTR_PKEY_TABLE:1858ret = subn_get_pkeytable(smp, ibdev, port);1859goto bail;1860case IB_SMP_ATTR_SL_TO_VL_TABLE:1861ret = subn_get_sl_to_vl(smp, ibdev, port);1862goto bail;1863case IB_SMP_ATTR_VL_ARB_TABLE:1864ret = subn_get_vl_arb(smp, ibdev, port);1865goto bail;1866case IB_SMP_ATTR_SM_INFO:1867if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {1868ret = IB_MAD_RESULT_SUCCESS |1869IB_MAD_RESULT_CONSUMED;1870goto bail;1871}1872if (ibp->port_cap_flags & IB_PORT_SM) {1873ret = IB_MAD_RESULT_SUCCESS;1874goto bail;1875}1876/* FALLTHROUGH */1877default:1878smp->status |= IB_SMP_UNSUP_METH_ATTR;1879ret = reply(smp);1880goto bail;1881}18821883case IB_MGMT_METHOD_SET:1884switch (smp->attr_id) {1885case IB_SMP_ATTR_GUID_INFO:1886ret = subn_set_guidinfo(smp, ibdev, port);1887goto bail;1888case IB_SMP_ATTR_PORT_INFO:1889ret = subn_set_portinfo(smp, ibdev, port);1890goto bail;1891case IB_SMP_ATTR_PKEY_TABLE:1892ret = subn_set_pkeytable(smp, ibdev, port);1893goto bail;1894case IB_SMP_ATTR_SL_TO_VL_TABLE:1895ret = subn_set_sl_to_vl(smp, ibdev, port);1896goto bail;1897case IB_SMP_ATTR_VL_ARB_TABLE:1898ret = subn_set_vl_arb(smp, ibdev, port);1899goto bail;1900case IB_SMP_ATTR_SM_INFO:1901if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {1902ret = IB_MAD_RESULT_SUCCESS |1903IB_MAD_RESULT_CONSUMED;1904goto bail;1905}1906if (ibp->port_cap_flags & IB_PORT_SM) {1907ret = IB_MAD_RESULT_SUCCESS;1908goto bail;1909}1910/* FALLTHROUGH */1911default:1912smp->status |= IB_SMP_UNSUP_METH_ATTR;1913ret = reply(smp);1914goto bail;1915}19161917case IB_MGMT_METHOD_TRAP_REPRESS:1918if (smp->attr_id == IB_SMP_ATTR_NOTICE)1919ret = subn_trap_repress(smp, ibdev, port);1920else {1921smp->status |= IB_SMP_UNSUP_METH_ATTR;1922ret = reply(smp);1923}1924goto bail;19251926case IB_MGMT_METHOD_TRAP:1927case IB_MGMT_METHOD_REPORT:1928case IB_MGMT_METHOD_REPORT_RESP:1929case IB_MGMT_METHOD_GET_RESP:1930/*1931* The ib_mad module will call us to process responses1932* before checking for other consumers.1933* Just tell the caller to process it normally.1934*/1935ret = IB_MAD_RESULT_SUCCESS;1936goto bail;19371938case IB_MGMT_METHOD_SEND:1939if (ib_get_smp_direction(smp) &&1940smp->attr_id == QIB_VENDOR_IPG) {1941ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,1942smp->data[0]);1943ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;1944} else1945ret = IB_MAD_RESULT_SUCCESS;1946goto bail;19471948default:1949smp->status |= IB_SMP_UNSUP_METHOD;1950ret = reply(smp);1951}19521953bail:1954return ret;1955}19561957static int process_perf(struct ib_device *ibdev, u8 port,1958struct ib_mad *in_mad,1959struct ib_mad *out_mad)1960{1961struct ib_perf *pmp = (struct ib_perf *)out_mad;1962int ret;19631964*out_mad = *in_mad;1965if (pmp->class_version != 1) {1966pmp->status |= IB_SMP_UNSUP_VERSION;1967ret = reply((struct ib_smp *) pmp);1968goto bail;1969}19701971switch (pmp->method) {1972case IB_MGMT_METHOD_GET:1973switch (pmp->attr_id) {1974case IB_PMA_CLASS_PORT_INFO:1975ret = pma_get_classportinfo(pmp, ibdev);1976goto bail;1977case IB_PMA_PORT_SAMPLES_CONTROL:1978ret = pma_get_portsamplescontrol(pmp, ibdev, port);1979goto bail;1980case IB_PMA_PORT_SAMPLES_RESULT:1981ret = pma_get_portsamplesresult(pmp, ibdev, port);1982goto bail;1983case IB_PMA_PORT_SAMPLES_RESULT_EXT:1984ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);1985goto bail;1986case IB_PMA_PORT_COUNTERS:1987ret = pma_get_portcounters(pmp, ibdev, port);1988goto bail;1989case IB_PMA_PORT_COUNTERS_EXT:1990ret = pma_get_portcounters_ext(pmp, ibdev, port);1991goto bail;1992case IB_PMA_PORT_COUNTERS_CONG:1993ret = pma_get_portcounters_cong(pmp, ibdev, port);1994goto bail;1995default:1996pmp->status |= IB_SMP_UNSUP_METH_ATTR;1997ret = reply((struct ib_smp *) pmp);1998goto bail;1999}20002001case IB_MGMT_METHOD_SET:2002switch (pmp->attr_id) {2003case IB_PMA_PORT_SAMPLES_CONTROL:2004ret = pma_set_portsamplescontrol(pmp, ibdev, port);2005goto bail;2006case IB_PMA_PORT_COUNTERS:2007ret = pma_set_portcounters(pmp, ibdev, port);2008goto bail;2009case IB_PMA_PORT_COUNTERS_EXT:2010ret = pma_set_portcounters_ext(pmp, ibdev, port);2011goto bail;2012case IB_PMA_PORT_COUNTERS_CONG:2013ret = pma_set_portcounters_cong(pmp, ibdev, port);2014goto bail;2015default:2016pmp->status |= IB_SMP_UNSUP_METH_ATTR;2017ret = reply((struct ib_smp *) pmp);2018goto bail;2019}20202021case IB_MGMT_METHOD_TRAP:2022case IB_MGMT_METHOD_GET_RESP:2023/*2024* The ib_mad module will call us to process responses2025* before checking for other consumers.2026* Just tell the caller to process it normally.2027*/2028ret = IB_MAD_RESULT_SUCCESS;2029goto bail;20302031default:2032pmp->status |= IB_SMP_UNSUP_METHOD;2033ret = reply((struct ib_smp *) pmp);2034}20352036bail:2037return ret;2038}20392040/**2041* qib_process_mad - process an incoming MAD packet2042* @ibdev: the infiniband device this packet came in on2043* @mad_flags: MAD flags2044* @port: the port number this packet came in on2045* @in_wc: the work completion entry for this packet2046* @in_grh: the global route header for this packet2047* @in_mad: the incoming MAD2048* @out_mad: any outgoing MAD reply2049*2050* Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not2051* interested in processing.2052*2053* Note that the verbs framework has already done the MAD sanity checks,2054* and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE2055* MADs.2056*2057* This is called by the ib_mad module.2058*/2059int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,2060struct ib_wc *in_wc, struct ib_grh *in_grh,2061struct ib_mad *in_mad, struct ib_mad *out_mad)2062{2063int ret;20642065switch (in_mad->mad_hdr.mgmt_class) {2066case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:2067case IB_MGMT_CLASS_SUBN_LID_ROUTED:2068ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);2069goto bail;20702071case IB_MGMT_CLASS_PERF_MGMT:2072ret = process_perf(ibdev, port, in_mad, out_mad);2073goto bail;20742075default:2076ret = IB_MAD_RESULT_SUCCESS;2077}20782079bail:2080return ret;2081}20822083static void send_handler(struct ib_mad_agent *agent,2084struct ib_mad_send_wc *mad_send_wc)2085{2086ib_free_send_mad(mad_send_wc->send_buf);2087}20882089static void xmit_wait_timer_func(unsigned long opaque)2090{2091struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;2092struct qib_devdata *dd = dd_from_ppd(ppd);2093unsigned long flags;2094u8 status;20952096spin_lock_irqsave(&ppd->ibport_data.lock, flags);2097if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {2098status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);2099if (status == IB_PMA_SAMPLE_STATUS_DONE) {2100/* save counter cache */2101cache_hw_sample_counters(ppd);2102ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;2103} else2104goto done;2105}2106ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);2107dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);2108done:2109spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);2110mod_timer(&ppd->cong_stats.timer, jiffies + HZ);2111}21122113int qib_create_agents(struct qib_ibdev *dev)2114{2115struct qib_devdata *dd = dd_from_dev(dev);2116struct ib_mad_agent *agent;2117struct qib_ibport *ibp;2118int p;2119int ret;21202121for (p = 0; p < dd->num_pports; p++) {2122ibp = &dd->pport[p].ibport_data;2123agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,2124NULL, 0, send_handler,2125NULL, NULL);2126if (IS_ERR(agent)) {2127ret = PTR_ERR(agent);2128goto err;2129}21302131/* Initialize xmit_wait structure */2132dd->pport[p].cong_stats.counter = 0;2133init_timer(&dd->pport[p].cong_stats.timer);2134dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;2135dd->pport[p].cong_stats.timer.data =2136(unsigned long)(&dd->pport[p]);2137dd->pport[p].cong_stats.timer.expires = 0;2138add_timer(&dd->pport[p].cong_stats.timer);21392140ibp->send_agent = agent;2141}21422143return 0;21442145err:2146for (p = 0; p < dd->num_pports; p++) {2147ibp = &dd->pport[p].ibport_data;2148if (ibp->send_agent) {2149agent = ibp->send_agent;2150ibp->send_agent = NULL;2151ib_unregister_mad_agent(agent);2152}2153}21542155return ret;2156}21572158void qib_free_agents(struct qib_ibdev *dev)2159{2160struct qib_devdata *dd = dd_from_dev(dev);2161struct ib_mad_agent *agent;2162struct qib_ibport *ibp;2163int p;21642165for (p = 0; p < dd->num_pports; p++) {2166ibp = &dd->pport[p].ibport_data;2167if (ibp->send_agent) {2168agent = ibp->send_agent;2169ibp->send_agent = NULL;2170ib_unregister_mad_agent(agent);2171}2172if (ibp->sm_ah) {2173ib_destroy_ah(&ibp->sm_ah->ibah);2174ibp->sm_ah = NULL;2175}2176if (dd->pport[p].cong_stats.timer.data)2177del_timer_sync(&dd->pport[p].cong_stats.timer);2178}2179}218021812182