Path: blob/master/drivers/infiniband/hw/ipath/ipath_mad.c
15112 views
/*1* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.2* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.3*4* This software is available to you under a choice of one of two5* licenses. You may choose to be licensed under the terms of the GNU6* General Public License (GPL) Version 2, available from the file7* COPYING in the main directory of this source tree, or the8* OpenIB.org BSD license below:9*10* Redistribution and use in source and binary forms, with or11* without modification, are permitted provided that the following12* conditions are met:13*14* - Redistributions of source code must retain the above15* copyright notice, this list of conditions and the following16* disclaimer.17*18* - Redistributions in binary form must reproduce the above19* copyright notice, this list of conditions and the following20* disclaimer in the documentation and/or other materials21* provided with the distribution.22*23* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,24* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF25* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND26* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS27* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN28* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN29* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE30* SOFTWARE.31*/3233#include <rdma/ib_smi.h>3435#include "ipath_kernel.h"36#include "ipath_verbs.h"37#include "ipath_common.h"3839#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)40#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)41#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)42#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)4344static int reply(struct ib_smp *smp)45{46/*47* The verbs framework will handle the directed/LID route48* packet changes.49*/50smp->method = IB_MGMT_METHOD_GET_RESP;51if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)52smp->status |= IB_SMP_DIRECTION;53return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;54}5556static int recv_subn_get_nodedescription(struct ib_smp *smp,57struct ib_device *ibdev)58{59if (smp->attr_mod)60smp->status |= IB_SMP_INVALID_FIELD;6162memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));6364return reply(smp);65}6667struct nodeinfo {68u8 base_version;69u8 class_version;70u8 node_type;71u8 num_ports;72__be64 sys_guid;73__be64 node_guid;74__be64 port_guid;75__be16 partition_cap;76__be16 device_id;77__be32 revision;78u8 local_port_num;79u8 vendor_id[3];80} __attribute__ ((packed));8182static int recv_subn_get_nodeinfo(struct ib_smp *smp,83struct ib_device *ibdev, u8 port)84{85struct nodeinfo *nip = (struct nodeinfo *)&smp->data;86struct ipath_devdata *dd = to_idev(ibdev)->dd;87u32 vendor, majrev, minrev;8889/* GUID 0 is illegal */90if (smp->attr_mod || (dd->ipath_guid == 0))91smp->status |= IB_SMP_INVALID_FIELD;9293nip->base_version = 1;94nip->class_version = 1;95nip->node_type = 1; /* channel adapter */96/*97* XXX The num_ports value will need a layer function to get98* the value if we ever have more than one IB port on a chip.99* We will also need to get the GUID for the port.100*/101nip->num_ports = ibdev->phys_port_cnt;102/* This is already in network order */103nip->sys_guid = to_idev(ibdev)->sys_image_guid;104nip->node_guid = dd->ipath_guid;105nip->port_guid = dd->ipath_guid;106nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));107nip->device_id = cpu_to_be16(dd->ipath_deviceid);108majrev = dd->ipath_majrev;109minrev = dd->ipath_minrev;110nip->revision = cpu_to_be32((majrev << 16) | minrev);111nip->local_port_num = port;112vendor = dd->ipath_vendorid;113nip->vendor_id[0] = IPATH_SRC_OUI_1;114nip->vendor_id[1] = IPATH_SRC_OUI_2;115nip->vendor_id[2] = IPATH_SRC_OUI_3;116117return reply(smp);118}119120static int recv_subn_get_guidinfo(struct ib_smp *smp,121struct ib_device *ibdev)122{123u32 startgx = 8 * be32_to_cpu(smp->attr_mod);124__be64 *p = (__be64 *) smp->data;125126/* 32 blocks of 8 64-bit GUIDs per block */127128memset(smp->data, 0, sizeof(smp->data));129130/*131* We only support one GUID for now. If this changes, the132* portinfo.guid_cap field needs to be updated too.133*/134if (startgx == 0) {135__be64 g = to_idev(ibdev)->dd->ipath_guid;136if (g == 0)137/* GUID 0 is illegal */138smp->status |= IB_SMP_INVALID_FIELD;139else140/* The first is a copy of the read-only HW GUID. */141*p = g;142} else143smp->status |= IB_SMP_INVALID_FIELD;144145return reply(smp);146}147148static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)149{150(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);151}152153static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)154{155(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);156}157158static int get_overrunthreshold(struct ipath_devdata *dd)159{160return (dd->ipath_ibcctrl >>161INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &162INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;163}164165/**166* set_overrunthreshold - set the overrun threshold167* @dd: the infinipath device168* @n: the new threshold169*170* Note that this will only take effect when the link state changes.171*/172static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)173{174unsigned v;175176v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &177INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;178if (v != n) {179dd->ipath_ibcctrl &=180~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<181INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);182dd->ipath_ibcctrl |=183(u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;184ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,185dd->ipath_ibcctrl);186}187return 0;188}189190static int get_phyerrthreshold(struct ipath_devdata *dd)191{192return (dd->ipath_ibcctrl >>193INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &194INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;195}196197/**198* set_phyerrthreshold - set the physical error threshold199* @dd: the infinipath device200* @n: the new threshold201*202* Note that this will only take effect when the link state changes.203*/204static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)205{206unsigned v;207208v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &209INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;210if (v != n) {211dd->ipath_ibcctrl &=212~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<213INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);214dd->ipath_ibcctrl |=215(u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;216ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,217dd->ipath_ibcctrl);218}219return 0;220}221222/**223* get_linkdowndefaultstate - get the default linkdown state224* @dd: the infinipath device225*226* Returns zero if the default is POLL, 1 if the default is SLEEP.227*/228static int get_linkdowndefaultstate(struct ipath_devdata *dd)229{230return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);231}232233static int recv_subn_get_portinfo(struct ib_smp *smp,234struct ib_device *ibdev, u8 port)235{236struct ipath_ibdev *dev;237struct ipath_devdata *dd;238struct ib_port_info *pip = (struct ib_port_info *)smp->data;239u16 lid;240u8 ibcstat;241u8 mtu;242int ret;243244if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {245smp->status |= IB_SMP_INVALID_FIELD;246ret = reply(smp);247goto bail;248}249250dev = to_idev(ibdev);251dd = dev->dd;252253/* Clear all fields. Only set the non-zero fields. */254memset(smp->data, 0, sizeof(smp->data));255256/* Only return the mkey if the protection field allows it. */257if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||258dev->mkeyprot == 0)259pip->mkey = dev->mkey;260pip->gid_prefix = dev->gid_prefix;261lid = dd->ipath_lid;262pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;263pip->sm_lid = cpu_to_be16(dev->sm_lid);264pip->cap_mask = cpu_to_be32(dev->port_cap_flags);265/* pip->diag_code; */266pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);267pip->local_port_num = port;268pip->link_width_enabled = dd->ipath_link_width_enabled;269pip->link_width_supported = dd->ipath_link_width_supported;270pip->link_width_active = dd->ipath_link_width_active;271pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;272ibcstat = dd->ipath_lastibcstat;273/* map LinkState to IB portinfo values. */274pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;275276pip->portphysstate_linkdown =277(ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |278(get_linkdowndefaultstate(dd) ? 1 : 2);279pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;280pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |281dd->ipath_link_speed_enabled;282switch (dd->ipath_ibmtu) {283case 4096:284mtu = IB_MTU_4096;285break;286case 2048:287mtu = IB_MTU_2048;288break;289case 1024:290mtu = IB_MTU_1024;291break;292case 512:293mtu = IB_MTU_512;294break;295case 256:296mtu = IB_MTU_256;297break;298default: /* oops, something is wrong */299mtu = IB_MTU_2048;300break;301}302pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;303pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */304pip->vl_high_limit = dev->vl_high_limit;305/* pip->vl_arb_high_cap; // only one VL */306/* pip->vl_arb_low_cap; // only one VL */307/* InitTypeReply = 0 */308/* our mtu cap depends on whether 4K MTU enabled or not */309pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;310/* HCAs ignore VLStallCount and HOQLife */311/* pip->vlstallcnt_hoqlife; */312pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */313pip->mkey_violations = cpu_to_be16(dev->mkey_violations);314/* P_KeyViolations are counted by hardware. */315pip->pkey_violations =316cpu_to_be16((ipath_get_cr_errpkey(dd) -317dev->z_pkey_violations) & 0xFFFF);318pip->qkey_violations = cpu_to_be16(dev->qkey_violations);319/* Only the hardware GUID is supported for now */320pip->guid_cap = 1;321pip->clientrereg_resv_subnetto = dev->subnet_timeout;322/* 32.768 usec. response time (guessing) */323pip->resv_resptimevalue = 3;324pip->localphyerrors_overrunerrors =325(get_phyerrthreshold(dd) << 4) |326get_overrunthreshold(dd);327/* pip->max_credit_hint; */328if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {329u32 v;330331v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);332pip->link_roundtrip_latency[0] = v >> 16;333pip->link_roundtrip_latency[1] = v >> 8;334pip->link_roundtrip_latency[2] = v;335}336337ret = reply(smp);338339bail:340return ret;341}342343/**344* get_pkeys - return the PKEY table for port 0345* @dd: the infinipath device346* @pkeys: the pkey table is placed here347*/348static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)349{350/* always a kernel port, no locking needed */351struct ipath_portdata *pd = dd->ipath_pd[0];352353memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));354355return 0;356}357358static int recv_subn_get_pkeytable(struct ib_smp *smp,359struct ib_device *ibdev)360{361u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);362u16 *p = (u16 *) smp->data;363__be16 *q = (__be16 *) smp->data;364365/* 64 blocks of 32 16-bit P_Key entries */366367memset(smp->data, 0, sizeof(smp->data));368if (startpx == 0) {369struct ipath_ibdev *dev = to_idev(ibdev);370unsigned i, n = ipath_get_npkeys(dev->dd);371372get_pkeys(dev->dd, p);373374for (i = 0; i < n; i++)375q[i] = cpu_to_be16(p[i]);376} else377smp->status |= IB_SMP_INVALID_FIELD;378379return reply(smp);380}381382static int recv_subn_set_guidinfo(struct ib_smp *smp,383struct ib_device *ibdev)384{385/* The only GUID we support is the first read-only entry. */386return recv_subn_get_guidinfo(smp, ibdev);387}388389/**390* set_linkdowndefaultstate - set the default linkdown state391* @dd: the infinipath device392* @sleep: the new state393*394* Note that this will only take effect when the link state changes.395*/396static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)397{398if (sleep)399dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;400else401dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;402ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,403dd->ipath_ibcctrl);404return 0;405}406407/**408* recv_subn_set_portinfo - set port information409* @smp: the incoming SM packet410* @ibdev: the infiniband device411* @port: the port on the device412*413* Set Portinfo (see ch. 14.2.5.6).414*/415static int recv_subn_set_portinfo(struct ib_smp *smp,416struct ib_device *ibdev, u8 port)417{418struct ib_port_info *pip = (struct ib_port_info *)smp->data;419struct ib_event event;420struct ipath_ibdev *dev;421struct ipath_devdata *dd;422char clientrereg = 0;423u16 lid, smlid;424u8 lwe;425u8 lse;426u8 state;427u16 lstate;428u32 mtu;429int ret, ore;430431if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)432goto err;433434dev = to_idev(ibdev);435dd = dev->dd;436event.device = ibdev;437event.element.port_num = port;438439dev->mkey = pip->mkey;440dev->gid_prefix = pip->gid_prefix;441dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);442443lid = be16_to_cpu(pip->lid);444if (dd->ipath_lid != lid ||445dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) {446/* Must be a valid unicast LID address. */447if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)448goto err;449ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7);450event.event = IB_EVENT_LID_CHANGE;451ib_dispatch_event(&event);452}453454smlid = be16_to_cpu(pip->sm_lid);455if (smlid != dev->sm_lid) {456/* Must be a valid unicast LID address. */457if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)458goto err;459dev->sm_lid = smlid;460event.event = IB_EVENT_SM_CHANGE;461ib_dispatch_event(&event);462}463464/* Allow 1x or 4x to be set (see 14.2.6.6). */465lwe = pip->link_width_enabled;466if (lwe) {467if (lwe == 0xFF)468lwe = dd->ipath_link_width_supported;469else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))470goto err;471set_link_width_enabled(dd, lwe);472}473474/* Allow 2.5 or 5.0 Gbs. */475lse = pip->linkspeedactive_enabled & 0xF;476if (lse) {477if (lse == 15)478lse = dd->ipath_link_speed_supported;479else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))480goto err;481set_link_speed_enabled(dd, lse);482}483484/* Set link down default state. */485switch (pip->portphysstate_linkdown & 0xF) {486case 0: /* NOP */487break;488case 1: /* SLEEP */489if (set_linkdowndefaultstate(dd, 1))490goto err;491break;492case 2: /* POLL */493if (set_linkdowndefaultstate(dd, 0))494goto err;495break;496default:497goto err;498}499500dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6;501dev->vl_high_limit = pip->vl_high_limit;502503switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {504case IB_MTU_256:505mtu = 256;506break;507case IB_MTU_512:508mtu = 512;509break;510case IB_MTU_1024:511mtu = 1024;512break;513case IB_MTU_2048:514mtu = 2048;515break;516case IB_MTU_4096:517if (!ipath_mtu4096)518goto err;519mtu = 4096;520break;521default:522/* XXX We have already partially updated our state! */523goto err;524}525ipath_set_mtu(dd, mtu);526527dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;528529/* We only support VL0 */530if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)531goto err;532533if (pip->mkey_violations == 0)534dev->mkey_violations = 0;535536/*537* Hardware counter can't be reset so snapshot and subtract538* later.539*/540if (pip->pkey_violations == 0)541dev->z_pkey_violations = ipath_get_cr_errpkey(dd);542543if (pip->qkey_violations == 0)544dev->qkey_violations = 0;545546ore = pip->localphyerrors_overrunerrors;547if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))548goto err;549550if (set_overrunthreshold(dd, (ore & 0xF)))551goto err;552553dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;554555if (pip->clientrereg_resv_subnetto & 0x80) {556clientrereg = 1;557event.event = IB_EVENT_CLIENT_REREGISTER;558ib_dispatch_event(&event);559}560561/*562* Do the port state change now that the other link parameters563* have been set.564* Changing the port physical state only makes sense if the link565* is down or is being set to down.566*/567state = pip->linkspeed_portstate & 0xF;568lstate = (pip->portphysstate_linkdown >> 4) & 0xF;569if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))570goto err;571572/*573* Only state changes of DOWN, ARM, and ACTIVE are valid574* and must be in the correct state to take effect (see 7.2.6).575*/576switch (state) {577case IB_PORT_NOP:578if (lstate == 0)579break;580/* FALLTHROUGH */581case IB_PORT_DOWN:582if (lstate == 0)583lstate = IPATH_IB_LINKDOWN_ONLY;584else if (lstate == 1)585lstate = IPATH_IB_LINKDOWN_SLEEP;586else if (lstate == 2)587lstate = IPATH_IB_LINKDOWN;588else if (lstate == 3)589lstate = IPATH_IB_LINKDOWN_DISABLE;590else591goto err;592ipath_set_linkstate(dd, lstate);593if (lstate == IPATH_IB_LINKDOWN_DISABLE) {594ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;595goto done;596}597ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |598IPATH_LINKACTIVE, 1000);599break;600case IB_PORT_ARMED:601ipath_set_linkstate(dd, IPATH_IB_LINKARM);602break;603case IB_PORT_ACTIVE:604ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);605break;606default:607/* XXX We have already partially updated our state! */608goto err;609}610611ret = recv_subn_get_portinfo(smp, ibdev, port);612613if (clientrereg)614pip->clientrereg_resv_subnetto |= 0x80;615616goto done;617618err:619smp->status |= IB_SMP_INVALID_FIELD;620ret = recv_subn_get_portinfo(smp, ibdev, port);621622done:623return ret;624}625626/**627* rm_pkey - decrecment the reference count for the given PKEY628* @dd: the infinipath device629* @key: the PKEY index630*631* Return true if this was the last reference and the hardware table entry632* needs to be changed.633*/634static int rm_pkey(struct ipath_devdata *dd, u16 key)635{636int i;637int ret;638639for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {640if (dd->ipath_pkeys[i] != key)641continue;642if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {643dd->ipath_pkeys[i] = 0;644ret = 1;645goto bail;646}647break;648}649650ret = 0;651652bail:653return ret;654}655656/**657* add_pkey - add the given PKEY to the hardware table658* @dd: the infinipath device659* @key: the PKEY660*661* Return an error code if unable to add the entry, zero if no change,662* or 1 if the hardware PKEY register needs to be updated.663*/664static int add_pkey(struct ipath_devdata *dd, u16 key)665{666int i;667u16 lkey = key & 0x7FFF;668int any = 0;669int ret;670671if (lkey == 0x7FFF) {672ret = 0;673goto bail;674}675676/* Look for an empty slot or a matching PKEY. */677for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {678if (!dd->ipath_pkeys[i]) {679any++;680continue;681}682/* If it matches exactly, try to increment the ref count */683if (dd->ipath_pkeys[i] == key) {684if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {685ret = 0;686goto bail;687}688/* Lost the race. Look for an empty slot below. */689atomic_dec(&dd->ipath_pkeyrefs[i]);690any++;691}692/*693* It makes no sense to have both the limited and unlimited694* PKEY set at the same time since the unlimited one will695* disable the limited one.696*/697if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {698ret = -EEXIST;699goto bail;700}701}702if (!any) {703ret = -EBUSY;704goto bail;705}706for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {707if (!dd->ipath_pkeys[i] &&708atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {709/* for ipathstats, etc. */710ipath_stats.sps_pkeys[i] = lkey;711dd->ipath_pkeys[i] = key;712ret = 1;713goto bail;714}715}716ret = -EBUSY;717718bail:719return ret;720}721722/**723* set_pkeys - set the PKEY table for port 0724* @dd: the infinipath device725* @pkeys: the PKEY table726*/727static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)728{729struct ipath_portdata *pd;730int i;731int changed = 0;732733/* always a kernel port, no locking needed */734pd = dd->ipath_pd[0];735736for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {737u16 key = pkeys[i];738u16 okey = pd->port_pkeys[i];739740if (key == okey)741continue;742/*743* The value of this PKEY table entry is changing.744* Remove the old entry in the hardware's array of PKEYs.745*/746if (okey & 0x7FFF)747changed |= rm_pkey(dd, okey);748if (key & 0x7FFF) {749int ret = add_pkey(dd, key);750751if (ret < 0)752key = 0;753else754changed |= ret;755}756pd->port_pkeys[i] = key;757}758if (changed) {759u64 pkey;760761pkey = (u64) dd->ipath_pkeys[0] |762((u64) dd->ipath_pkeys[1] << 16) |763((u64) dd->ipath_pkeys[2] << 32) |764((u64) dd->ipath_pkeys[3] << 48);765ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",766(unsigned long long) pkey);767ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,768pkey);769}770return 0;771}772773static int recv_subn_set_pkeytable(struct ib_smp *smp,774struct ib_device *ibdev)775{776u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);777__be16 *p = (__be16 *) smp->data;778u16 *q = (u16 *) smp->data;779struct ipath_ibdev *dev = to_idev(ibdev);780unsigned i, n = ipath_get_npkeys(dev->dd);781782for (i = 0; i < n; i++)783q[i] = be16_to_cpu(p[i]);784785if (startpx != 0 || set_pkeys(dev->dd, q) != 0)786smp->status |= IB_SMP_INVALID_FIELD;787788return recv_subn_get_pkeytable(smp, ibdev);789}790791#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)792#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)793#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)794#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)795#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)796#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)797798struct ib_perf {799u8 base_version;800u8 mgmt_class;801u8 class_version;802u8 method;803__be16 status;804__be16 unused;805__be64 tid;806__be16 attr_id;807__be16 resv;808__be32 attr_mod;809u8 reserved[40];810u8 data[192];811} __attribute__ ((packed));812813struct ib_pma_classportinfo {814u8 base_version;815u8 class_version;816__be16 cap_mask;817u8 reserved[3];818u8 resp_time_value; /* only lower 5 bits */819union ib_gid redirect_gid;820__be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */821__be16 redirect_lid;822__be16 redirect_pkey;823__be32 redirect_qp; /* only lower 24 bits */824__be32 redirect_qkey;825union ib_gid trap_gid;826__be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */827__be16 trap_lid;828__be16 trap_pkey;829__be32 trap_hl_qp; /* 8, 24 bits respectively */830__be32 trap_qkey;831} __attribute__ ((packed));832833struct ib_pma_portsamplescontrol {834u8 opcode;835u8 port_select;836u8 tick;837u8 counter_width; /* only lower 3 bits */838__be32 counter_mask0_9; /* 2, 10 * 3, bits */839__be16 counter_mask10_14; /* 1, 5 * 3, bits */840u8 sample_mechanisms;841u8 sample_status; /* only lower 2 bits */842__be64 option_mask;843__be64 vendor_mask;844__be32 sample_start;845__be32 sample_interval;846__be16 tag;847__be16 counter_select[15];848} __attribute__ ((packed));849850struct ib_pma_portsamplesresult {851__be16 tag;852__be16 sample_status; /* only lower 2 bits */853__be32 counter[15];854} __attribute__ ((packed));855856struct ib_pma_portsamplesresult_ext {857__be16 tag;858__be16 sample_status; /* only lower 2 bits */859__be32 extended_width; /* only upper 2 bits */860__be64 counter[15];861} __attribute__ ((packed));862863struct ib_pma_portcounters {864u8 reserved;865u8 port_select;866__be16 counter_select;867__be16 symbol_error_counter;868u8 link_error_recovery_counter;869u8 link_downed_counter;870__be16 port_rcv_errors;871__be16 port_rcv_remphys_errors;872__be16 port_rcv_switch_relay_errors;873__be16 port_xmit_discards;874u8 port_xmit_constraint_errors;875u8 port_rcv_constraint_errors;876u8 reserved1;877u8 lli_ebor_errors; /* 4, 4, bits */878__be16 reserved2;879__be16 vl15_dropped;880__be32 port_xmit_data;881__be32 port_rcv_data;882__be32 port_xmit_packets;883__be32 port_rcv_packets;884} __attribute__ ((packed));885886#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)887#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)888#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)889#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)890#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)891#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)892#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)893#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)894#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)895#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)896#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)897#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)898#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)899900struct ib_pma_portcounters_ext {901u8 reserved;902u8 port_select;903__be16 counter_select;904__be32 reserved1;905__be64 port_xmit_data;906__be64 port_rcv_data;907__be64 port_xmit_packets;908__be64 port_rcv_packets;909__be64 port_unicast_xmit_packets;910__be64 port_unicast_rcv_packets;911__be64 port_multicast_xmit_packets;912__be64 port_multicast_rcv_packets;913} __attribute__ ((packed));914915#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)916#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)917#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)918#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)919#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)920#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)921#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)922#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)923924static int recv_pma_get_classportinfo(struct ib_perf *pmp)925{926struct ib_pma_classportinfo *p =927(struct ib_pma_classportinfo *)pmp->data;928929memset(pmp->data, 0, sizeof(pmp->data));930931if (pmp->attr_mod != 0)932pmp->status |= IB_SMP_INVALID_FIELD;933934/* Indicate AllPortSelect is valid (only one port anyway) */935p->cap_mask = cpu_to_be16(1 << 8);936p->base_version = 1;937p->class_version = 1;938/*939* Expected response time is 4.096 usec. * 2^18 == 1.073741824940* sec.941*/942p->resp_time_value = 18;943944return reply((struct ib_smp *) pmp);945}946947/*948* The PortSamplesControl.CounterMasks field is an array of 3 bit fields949* which specify the N'th counter's capabilities. See ch. 16.1.3.2.950* We support 5 counters which only count the mandatory quantities.951*/952#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))953#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \954COUNTER_MASK(1, 1) | \955COUNTER_MASK(1, 2) | \956COUNTER_MASK(1, 3) | \957COUNTER_MASK(1, 4))958959static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,960struct ib_device *ibdev, u8 port)961{962struct ib_pma_portsamplescontrol *p =963(struct ib_pma_portsamplescontrol *)pmp->data;964struct ipath_ibdev *dev = to_idev(ibdev);965struct ipath_cregs const *crp = dev->dd->ipath_cregs;966unsigned long flags;967u8 port_select = p->port_select;968969memset(pmp->data, 0, sizeof(pmp->data));970971p->port_select = port_select;972if (pmp->attr_mod != 0 ||973(port_select != port && port_select != 0xFF))974pmp->status |= IB_SMP_INVALID_FIELD;975/*976* Ticks are 10x the link transfer period which for 2.5Gbs is 4977* nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample978* intervals are counted in ticks. Since we use Linux timers, that979* count in jiffies, we can't sample for less than 1000 ticks if HZ980* == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for981* DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that982* have hardware support for delaying packets.983*/984if (crp->cr_psstat)985p->tick = dev->dd->ipath_link_speed_active - 1;986else987p->tick = 250; /* 1 usec. */988p->counter_width = 4; /* 32 bit counters */989p->counter_mask0_9 = COUNTER_MASK0_9;990spin_lock_irqsave(&dev->pending_lock, flags);991if (crp->cr_psstat)992p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);993else994p->sample_status = dev->pma_sample_status;995p->sample_start = cpu_to_be32(dev->pma_sample_start);996p->sample_interval = cpu_to_be32(dev->pma_sample_interval);997p->tag = cpu_to_be16(dev->pma_tag);998p->counter_select[0] = dev->pma_counter_select[0];999p->counter_select[1] = dev->pma_counter_select[1];1000p->counter_select[2] = dev->pma_counter_select[2];1001p->counter_select[3] = dev->pma_counter_select[3];1002p->counter_select[4] = dev->pma_counter_select[4];1003spin_unlock_irqrestore(&dev->pending_lock, flags);10041005return reply((struct ib_smp *) pmp);1006}10071008static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,1009struct ib_device *ibdev, u8 port)1010{1011struct ib_pma_portsamplescontrol *p =1012(struct ib_pma_portsamplescontrol *)pmp->data;1013struct ipath_ibdev *dev = to_idev(ibdev);1014struct ipath_cregs const *crp = dev->dd->ipath_cregs;1015unsigned long flags;1016u8 status;1017int ret;10181019if (pmp->attr_mod != 0 ||1020(p->port_select != port && p->port_select != 0xFF)) {1021pmp->status |= IB_SMP_INVALID_FIELD;1022ret = reply((struct ib_smp *) pmp);1023goto bail;1024}10251026spin_lock_irqsave(&dev->pending_lock, flags);1027if (crp->cr_psstat)1028status = ipath_read_creg32(dev->dd, crp->cr_psstat);1029else1030status = dev->pma_sample_status;1031if (status == IB_PMA_SAMPLE_STATUS_DONE) {1032dev->pma_sample_start = be32_to_cpu(p->sample_start);1033dev->pma_sample_interval = be32_to_cpu(p->sample_interval);1034dev->pma_tag = be16_to_cpu(p->tag);1035dev->pma_counter_select[0] = p->counter_select[0];1036dev->pma_counter_select[1] = p->counter_select[1];1037dev->pma_counter_select[2] = p->counter_select[2];1038dev->pma_counter_select[3] = p->counter_select[3];1039dev->pma_counter_select[4] = p->counter_select[4];1040if (crp->cr_psstat) {1041ipath_write_creg(dev->dd, crp->cr_psinterval,1042dev->pma_sample_interval);1043ipath_write_creg(dev->dd, crp->cr_psstart,1044dev->pma_sample_start);1045} else1046dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;1047}1048spin_unlock_irqrestore(&dev->pending_lock, flags);10491050ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);10511052bail:1053return ret;1054}10551056static u64 get_counter(struct ipath_ibdev *dev,1057struct ipath_cregs const *crp,1058__be16 sel)1059{1060u64 ret;10611062switch (sel) {1063case IB_PMA_PORT_XMIT_DATA:1064ret = (crp->cr_psxmitdatacount) ?1065ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :1066dev->ipath_sword;1067break;1068case IB_PMA_PORT_RCV_DATA:1069ret = (crp->cr_psrcvdatacount) ?1070ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :1071dev->ipath_rword;1072break;1073case IB_PMA_PORT_XMIT_PKTS:1074ret = (crp->cr_psxmitpktscount) ?1075ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :1076dev->ipath_spkts;1077break;1078case IB_PMA_PORT_RCV_PKTS:1079ret = (crp->cr_psrcvpktscount) ?1080ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :1081dev->ipath_rpkts;1082break;1083case IB_PMA_PORT_XMIT_WAIT:1084ret = (crp->cr_psxmitwaitcount) ?1085ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :1086dev->ipath_xmit_wait;1087break;1088default:1089ret = 0;1090}10911092return ret;1093}10941095static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,1096struct ib_device *ibdev)1097{1098struct ib_pma_portsamplesresult *p =1099(struct ib_pma_portsamplesresult *)pmp->data;1100struct ipath_ibdev *dev = to_idev(ibdev);1101struct ipath_cregs const *crp = dev->dd->ipath_cregs;1102u8 status;1103int i;11041105memset(pmp->data, 0, sizeof(pmp->data));1106p->tag = cpu_to_be16(dev->pma_tag);1107if (crp->cr_psstat)1108status = ipath_read_creg32(dev->dd, crp->cr_psstat);1109else1110status = dev->pma_sample_status;1111p->sample_status = cpu_to_be16(status);1112for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)1113p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :1114cpu_to_be32(1115get_counter(dev, crp, dev->pma_counter_select[i]));11161117return reply((struct ib_smp *) pmp);1118}11191120static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,1121struct ib_device *ibdev)1122{1123struct ib_pma_portsamplesresult_ext *p =1124(struct ib_pma_portsamplesresult_ext *)pmp->data;1125struct ipath_ibdev *dev = to_idev(ibdev);1126struct ipath_cregs const *crp = dev->dd->ipath_cregs;1127u8 status;1128int i;11291130memset(pmp->data, 0, sizeof(pmp->data));1131p->tag = cpu_to_be16(dev->pma_tag);1132if (crp->cr_psstat)1133status = ipath_read_creg32(dev->dd, crp->cr_psstat);1134else1135status = dev->pma_sample_status;1136p->sample_status = cpu_to_be16(status);1137/* 64 bits */1138p->extended_width = cpu_to_be32(0x80000000);1139for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)1140p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :1141cpu_to_be64(1142get_counter(dev, crp, dev->pma_counter_select[i]));11431144return reply((struct ib_smp *) pmp);1145}11461147static int recv_pma_get_portcounters(struct ib_perf *pmp,1148struct ib_device *ibdev, u8 port)1149{1150struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)1151pmp->data;1152struct ipath_ibdev *dev = to_idev(ibdev);1153struct ipath_verbs_counters cntrs;1154u8 port_select = p->port_select;11551156ipath_get_counters(dev->dd, &cntrs);11571158/* Adjust counters for any resets done. */1159cntrs.symbol_error_counter -= dev->z_symbol_error_counter;1160cntrs.link_error_recovery_counter -=1161dev->z_link_error_recovery_counter;1162cntrs.link_downed_counter -= dev->z_link_downed_counter;1163cntrs.port_rcv_errors += dev->rcv_errors;1164cntrs.port_rcv_errors -= dev->z_port_rcv_errors;1165cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;1166cntrs.port_xmit_discards -= dev->z_port_xmit_discards;1167cntrs.port_xmit_data -= dev->z_port_xmit_data;1168cntrs.port_rcv_data -= dev->z_port_rcv_data;1169cntrs.port_xmit_packets -= dev->z_port_xmit_packets;1170cntrs.port_rcv_packets -= dev->z_port_rcv_packets;1171cntrs.local_link_integrity_errors -=1172dev->z_local_link_integrity_errors;1173cntrs.excessive_buffer_overrun_errors -=1174dev->z_excessive_buffer_overrun_errors;1175cntrs.vl15_dropped -= dev->z_vl15_dropped;1176cntrs.vl15_dropped += dev->n_vl15_dropped;11771178memset(pmp->data, 0, sizeof(pmp->data));11791180p->port_select = port_select;1181if (pmp->attr_mod != 0 ||1182(port_select != port && port_select != 0xFF))1183pmp->status |= IB_SMP_INVALID_FIELD;11841185if (cntrs.symbol_error_counter > 0xFFFFUL)1186p->symbol_error_counter = cpu_to_be16(0xFFFF);1187else1188p->symbol_error_counter =1189cpu_to_be16((u16)cntrs.symbol_error_counter);1190if (cntrs.link_error_recovery_counter > 0xFFUL)1191p->link_error_recovery_counter = 0xFF;1192else1193p->link_error_recovery_counter =1194(u8)cntrs.link_error_recovery_counter;1195if (cntrs.link_downed_counter > 0xFFUL)1196p->link_downed_counter = 0xFF;1197else1198p->link_downed_counter = (u8)cntrs.link_downed_counter;1199if (cntrs.port_rcv_errors > 0xFFFFUL)1200p->port_rcv_errors = cpu_to_be16(0xFFFF);1201else1202p->port_rcv_errors =1203cpu_to_be16((u16) cntrs.port_rcv_errors);1204if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)1205p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);1206else1207p->port_rcv_remphys_errors =1208cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);1209if (cntrs.port_xmit_discards > 0xFFFFUL)1210p->port_xmit_discards = cpu_to_be16(0xFFFF);1211else1212p->port_xmit_discards =1213cpu_to_be16((u16)cntrs.port_xmit_discards);1214if (cntrs.local_link_integrity_errors > 0xFUL)1215cntrs.local_link_integrity_errors = 0xFUL;1216if (cntrs.excessive_buffer_overrun_errors > 0xFUL)1217cntrs.excessive_buffer_overrun_errors = 0xFUL;1218p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |1219cntrs.excessive_buffer_overrun_errors;1220if (cntrs.vl15_dropped > 0xFFFFUL)1221p->vl15_dropped = cpu_to_be16(0xFFFF);1222else1223p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);1224if (cntrs.port_xmit_data > 0xFFFFFFFFUL)1225p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);1226else1227p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);1228if (cntrs.port_rcv_data > 0xFFFFFFFFUL)1229p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);1230else1231p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);1232if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)1233p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);1234else1235p->port_xmit_packets =1236cpu_to_be32((u32)cntrs.port_xmit_packets);1237if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)1238p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);1239else1240p->port_rcv_packets =1241cpu_to_be32((u32) cntrs.port_rcv_packets);12421243return reply((struct ib_smp *) pmp);1244}12451246static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,1247struct ib_device *ibdev, u8 port)1248{1249struct ib_pma_portcounters_ext *p =1250(struct ib_pma_portcounters_ext *)pmp->data;1251struct ipath_ibdev *dev = to_idev(ibdev);1252u64 swords, rwords, spkts, rpkts, xwait;1253u8 port_select = p->port_select;12541255ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,1256&rpkts, &xwait);12571258/* Adjust counters for any resets done. */1259swords -= dev->z_port_xmit_data;1260rwords -= dev->z_port_rcv_data;1261spkts -= dev->z_port_xmit_packets;1262rpkts -= dev->z_port_rcv_packets;12631264memset(pmp->data, 0, sizeof(pmp->data));12651266p->port_select = port_select;1267if (pmp->attr_mod != 0 ||1268(port_select != port && port_select != 0xFF))1269pmp->status |= IB_SMP_INVALID_FIELD;12701271p->port_xmit_data = cpu_to_be64(swords);1272p->port_rcv_data = cpu_to_be64(rwords);1273p->port_xmit_packets = cpu_to_be64(spkts);1274p->port_rcv_packets = cpu_to_be64(rpkts);1275p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);1276p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);1277p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);1278p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);12791280return reply((struct ib_smp *) pmp);1281}12821283static int recv_pma_set_portcounters(struct ib_perf *pmp,1284struct ib_device *ibdev, u8 port)1285{1286struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)1287pmp->data;1288struct ipath_ibdev *dev = to_idev(ibdev);1289struct ipath_verbs_counters cntrs;12901291/*1292* Since the HW doesn't support clearing counters, we save the1293* current count and subtract it from future responses.1294*/1295ipath_get_counters(dev->dd, &cntrs);12961297if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)1298dev->z_symbol_error_counter = cntrs.symbol_error_counter;12991300if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)1301dev->z_link_error_recovery_counter =1302cntrs.link_error_recovery_counter;13031304if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)1305dev->z_link_downed_counter = cntrs.link_downed_counter;13061307if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)1308dev->z_port_rcv_errors =1309cntrs.port_rcv_errors + dev->rcv_errors;13101311if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)1312dev->z_port_rcv_remphys_errors =1313cntrs.port_rcv_remphys_errors;13141315if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)1316dev->z_port_xmit_discards = cntrs.port_xmit_discards;13171318if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)1319dev->z_local_link_integrity_errors =1320cntrs.local_link_integrity_errors;13211322if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)1323dev->z_excessive_buffer_overrun_errors =1324cntrs.excessive_buffer_overrun_errors;13251326if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {1327dev->n_vl15_dropped = 0;1328dev->z_vl15_dropped = cntrs.vl15_dropped;1329}13301331if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)1332dev->z_port_xmit_data = cntrs.port_xmit_data;13331334if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)1335dev->z_port_rcv_data = cntrs.port_rcv_data;13361337if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)1338dev->z_port_xmit_packets = cntrs.port_xmit_packets;13391340if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)1341dev->z_port_rcv_packets = cntrs.port_rcv_packets;13421343return recv_pma_get_portcounters(pmp, ibdev, port);1344}13451346static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,1347struct ib_device *ibdev, u8 port)1348{1349struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)1350pmp->data;1351struct ipath_ibdev *dev = to_idev(ibdev);1352u64 swords, rwords, spkts, rpkts, xwait;13531354ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,1355&rpkts, &xwait);13561357if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)1358dev->z_port_xmit_data = swords;13591360if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)1361dev->z_port_rcv_data = rwords;13621363if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)1364dev->z_port_xmit_packets = spkts;13651366if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)1367dev->z_port_rcv_packets = rpkts;13681369if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)1370dev->n_unicast_xmit = 0;13711372if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)1373dev->n_unicast_rcv = 0;13741375if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)1376dev->n_multicast_xmit = 0;13771378if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)1379dev->n_multicast_rcv = 0;13801381return recv_pma_get_portcounters_ext(pmp, ibdev, port);1382}13831384static int process_subn(struct ib_device *ibdev, int mad_flags,1385u8 port_num, struct ib_mad *in_mad,1386struct ib_mad *out_mad)1387{1388struct ib_smp *smp = (struct ib_smp *)out_mad;1389struct ipath_ibdev *dev = to_idev(ibdev);1390int ret;13911392*out_mad = *in_mad;1393if (smp->class_version != 1) {1394smp->status |= IB_SMP_UNSUP_VERSION;1395ret = reply(smp);1396goto bail;1397}13981399/* Is the mkey in the process of expiring? */1400if (dev->mkey_lease_timeout &&1401time_after_eq(jiffies, dev->mkey_lease_timeout)) {1402/* Clear timeout and mkey protection field. */1403dev->mkey_lease_timeout = 0;1404dev->mkeyprot = 0;1405}14061407/*1408* M_Key checking depends on1409* Portinfo:M_Key_protect_bits1410*/1411if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&1412dev->mkey != smp->mkey &&1413(smp->method == IB_MGMT_METHOD_SET ||1414(smp->method == IB_MGMT_METHOD_GET &&1415dev->mkeyprot >= 2))) {1416if (dev->mkey_violations != 0xFFFF)1417++dev->mkey_violations;1418if (dev->mkey_lease_timeout ||1419dev->mkey_lease_period == 0) {1420ret = IB_MAD_RESULT_SUCCESS |1421IB_MAD_RESULT_CONSUMED;1422goto bail;1423}1424dev->mkey_lease_timeout = jiffies +1425dev->mkey_lease_period * HZ;1426/* Future: Generate a trap notice. */1427ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;1428goto bail;1429} else if (dev->mkey_lease_timeout)1430dev->mkey_lease_timeout = 0;14311432switch (smp->method) {1433case IB_MGMT_METHOD_GET:1434switch (smp->attr_id) {1435case IB_SMP_ATTR_NODE_DESC:1436ret = recv_subn_get_nodedescription(smp, ibdev);1437goto bail;1438case IB_SMP_ATTR_NODE_INFO:1439ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);1440goto bail;1441case IB_SMP_ATTR_GUID_INFO:1442ret = recv_subn_get_guidinfo(smp, ibdev);1443goto bail;1444case IB_SMP_ATTR_PORT_INFO:1445ret = recv_subn_get_portinfo(smp, ibdev, port_num);1446goto bail;1447case IB_SMP_ATTR_PKEY_TABLE:1448ret = recv_subn_get_pkeytable(smp, ibdev);1449goto bail;1450case IB_SMP_ATTR_SM_INFO:1451if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {1452ret = IB_MAD_RESULT_SUCCESS |1453IB_MAD_RESULT_CONSUMED;1454goto bail;1455}1456if (dev->port_cap_flags & IB_PORT_SM) {1457ret = IB_MAD_RESULT_SUCCESS;1458goto bail;1459}1460/* FALLTHROUGH */1461default:1462smp->status |= IB_SMP_UNSUP_METH_ATTR;1463ret = reply(smp);1464goto bail;1465}14661467case IB_MGMT_METHOD_SET:1468switch (smp->attr_id) {1469case IB_SMP_ATTR_GUID_INFO:1470ret = recv_subn_set_guidinfo(smp, ibdev);1471goto bail;1472case IB_SMP_ATTR_PORT_INFO:1473ret = recv_subn_set_portinfo(smp, ibdev, port_num);1474goto bail;1475case IB_SMP_ATTR_PKEY_TABLE:1476ret = recv_subn_set_pkeytable(smp, ibdev);1477goto bail;1478case IB_SMP_ATTR_SM_INFO:1479if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {1480ret = IB_MAD_RESULT_SUCCESS |1481IB_MAD_RESULT_CONSUMED;1482goto bail;1483}1484if (dev->port_cap_flags & IB_PORT_SM) {1485ret = IB_MAD_RESULT_SUCCESS;1486goto bail;1487}1488/* FALLTHROUGH */1489default:1490smp->status |= IB_SMP_UNSUP_METH_ATTR;1491ret = reply(smp);1492goto bail;1493}14941495case IB_MGMT_METHOD_TRAP:1496case IB_MGMT_METHOD_REPORT:1497case IB_MGMT_METHOD_REPORT_RESP:1498case IB_MGMT_METHOD_TRAP_REPRESS:1499case IB_MGMT_METHOD_GET_RESP:1500/*1501* The ib_mad module will call us to process responses1502* before checking for other consumers.1503* Just tell the caller to process it normally.1504*/1505ret = IB_MAD_RESULT_SUCCESS;1506goto bail;1507default:1508smp->status |= IB_SMP_UNSUP_METHOD;1509ret = reply(smp);1510}15111512bail:1513return ret;1514}15151516static int process_perf(struct ib_device *ibdev, u8 port_num,1517struct ib_mad *in_mad,1518struct ib_mad *out_mad)1519{1520struct ib_perf *pmp = (struct ib_perf *)out_mad;1521int ret;15221523*out_mad = *in_mad;1524if (pmp->class_version != 1) {1525pmp->status |= IB_SMP_UNSUP_VERSION;1526ret = reply((struct ib_smp *) pmp);1527goto bail;1528}15291530switch (pmp->method) {1531case IB_MGMT_METHOD_GET:1532switch (pmp->attr_id) {1533case IB_PMA_CLASS_PORT_INFO:1534ret = recv_pma_get_classportinfo(pmp);1535goto bail;1536case IB_PMA_PORT_SAMPLES_CONTROL:1537ret = recv_pma_get_portsamplescontrol(pmp, ibdev,1538port_num);1539goto bail;1540case IB_PMA_PORT_SAMPLES_RESULT:1541ret = recv_pma_get_portsamplesresult(pmp, ibdev);1542goto bail;1543case IB_PMA_PORT_SAMPLES_RESULT_EXT:1544ret = recv_pma_get_portsamplesresult_ext(pmp,1545ibdev);1546goto bail;1547case IB_PMA_PORT_COUNTERS:1548ret = recv_pma_get_portcounters(pmp, ibdev,1549port_num);1550goto bail;1551case IB_PMA_PORT_COUNTERS_EXT:1552ret = recv_pma_get_portcounters_ext(pmp, ibdev,1553port_num);1554goto bail;1555default:1556pmp->status |= IB_SMP_UNSUP_METH_ATTR;1557ret = reply((struct ib_smp *) pmp);1558goto bail;1559}15601561case IB_MGMT_METHOD_SET:1562switch (pmp->attr_id) {1563case IB_PMA_PORT_SAMPLES_CONTROL:1564ret = recv_pma_set_portsamplescontrol(pmp, ibdev,1565port_num);1566goto bail;1567case IB_PMA_PORT_COUNTERS:1568ret = recv_pma_set_portcounters(pmp, ibdev,1569port_num);1570goto bail;1571case IB_PMA_PORT_COUNTERS_EXT:1572ret = recv_pma_set_portcounters_ext(pmp, ibdev,1573port_num);1574goto bail;1575default:1576pmp->status |= IB_SMP_UNSUP_METH_ATTR;1577ret = reply((struct ib_smp *) pmp);1578goto bail;1579}15801581case IB_MGMT_METHOD_GET_RESP:1582/*1583* The ib_mad module will call us to process responses1584* before checking for other consumers.1585* Just tell the caller to process it normally.1586*/1587ret = IB_MAD_RESULT_SUCCESS;1588goto bail;1589default:1590pmp->status |= IB_SMP_UNSUP_METHOD;1591ret = reply((struct ib_smp *) pmp);1592}15931594bail:1595return ret;1596}15971598/**1599* ipath_process_mad - process an incoming MAD packet1600* @ibdev: the infiniband device this packet came in on1601* @mad_flags: MAD flags1602* @port_num: the port number this packet came in on1603* @in_wc: the work completion entry for this packet1604* @in_grh: the global route header for this packet1605* @in_mad: the incoming MAD1606* @out_mad: any outgoing MAD reply1607*1608* Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not1609* interested in processing.1610*1611* Note that the verbs framework has already done the MAD sanity checks,1612* and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE1613* MADs.1614*1615* This is called by the ib_mad module.1616*/1617int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,1618struct ib_wc *in_wc, struct ib_grh *in_grh,1619struct ib_mad *in_mad, struct ib_mad *out_mad)1620{1621int ret;16221623switch (in_mad->mad_hdr.mgmt_class) {1624case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:1625case IB_MGMT_CLASS_SUBN_LID_ROUTED:1626ret = process_subn(ibdev, mad_flags, port_num,1627in_mad, out_mad);1628goto bail;1629case IB_MGMT_CLASS_PERF_MGMT:1630ret = process_perf(ibdev, port_num, in_mad, out_mad);1631goto bail;1632default:1633ret = IB_MAD_RESULT_SUCCESS;1634}16351636bail:1637return ret;1638}163916401641