Path: blob/master/drivers/infiniband/hw/ipath/ipath_init_chip.c
15112 views
/*1* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.2* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.3*4* This software is available to you under a choice of one of two5* licenses. You may choose to be licensed under the terms of the GNU6* General Public License (GPL) Version 2, available from the file7* COPYING in the main directory of this source tree, or the8* OpenIB.org BSD license below:9*10* Redistribution and use in source and binary forms, with or11* without modification, are permitted provided that the following12* conditions are met:13*14* - Redistributions of source code must retain the above15* copyright notice, this list of conditions and the following16* disclaimer.17*18* - Redistributions in binary form must reproduce the above19* copyright notice, this list of conditions and the following20* disclaimer in the documentation and/or other materials21* provided with the distribution.22*23* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,24* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF25* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND26* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS27* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN28* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN29* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE30* SOFTWARE.31*/3233#include <linux/pci.h>34#include <linux/netdevice.h>35#include <linux/slab.h>36#include <linux/vmalloc.h>3738#include "ipath_kernel.h"39#include "ipath_common.h"4041/*42* min buffers we want to have per port, after driver43*/44#define IPATH_MIN_USER_PORT_BUFCNT 74546/*47* Number of ports we are configured to use (to allow for more pio48* buffers per port, etc.) Zero means use chip value.49*/50static ushort ipath_cfgports;5152module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);53MODULE_PARM_DESC(cfgports, "Set max number of ports to use");5455/*56* Number of buffers reserved for driver (verbs and layered drivers.)57* Initialized based on number of PIO buffers if not set via module interface.58* The problem with this is that it's global, but we'll use different59* numbers for different chip types.60*/61static ushort ipath_kpiobufs;6263static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);6465module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,66&ipath_kpiobufs, S_IWUSR | S_IRUGO);67MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");6869/**70* create_port0_egr - allocate the eager TID buffers71* @dd: the infinipath device72*73* This code is now quite different for user and kernel, because74* the kernel uses skb's, for the accelerated network performance.75* This is the kernel (port0) version.76*77* Allocate the eager TID buffers and program them into infinipath.78* We use the network layer alloc_skb() allocator to allocate the79* memory, and either use the buffers as is for things like verbs80* packets, or pass the buffers up to the ipath layered driver and81* thence the network layer, replacing them as we do so (see82* ipath_rcv_layer()).83*/84static int create_port0_egr(struct ipath_devdata *dd)85{86unsigned e, egrcnt;87struct ipath_skbinfo *skbinfo;88int ret;8990egrcnt = dd->ipath_p0_rcvegrcnt;9192skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);93if (skbinfo == NULL) {94ipath_dev_err(dd, "allocation error for eager TID "95"skb array\n");96ret = -ENOMEM;97goto bail;98}99for (e = 0; e < egrcnt; e++) {100/*101* This is a bit tricky in that we allocate extra102* space for 2 bytes of the 14 byte ethernet header.103* These two bytes are passed in the ipath header so104* the rest of the data is word aligned. We allocate105* 4 bytes so that the data buffer stays word aligned.106* See ipath_kreceive() for more details.107*/108skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL);109if (!skbinfo[e].skb) {110ipath_dev_err(dd, "SKB allocation error for "111"eager TID %u\n", e);112while (e != 0)113dev_kfree_skb(skbinfo[--e].skb);114vfree(skbinfo);115ret = -ENOMEM;116goto bail;117}118}119/*120* After loop above, so we can test non-NULL to see if ready121* to use at receive, etc.122*/123dd->ipath_port0_skbinfo = skbinfo;124125for (e = 0; e < egrcnt; e++) {126dd->ipath_port0_skbinfo[e].phys =127ipath_map_single(dd->pcidev,128dd->ipath_port0_skbinfo[e].skb->data,129dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE);130dd->ipath_f_put_tid(dd, e + (u64 __iomem *)131((char __iomem *) dd->ipath_kregbase +132dd->ipath_rcvegrbase),133RCVHQ_RCV_TYPE_EAGER,134dd->ipath_port0_skbinfo[e].phys);135}136137ret = 0;138139bail:140return ret;141}142143static int bringup_link(struct ipath_devdata *dd)144{145u64 val, ibc;146int ret = 0;147148/* hold IBC in reset */149dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;150ipath_write_kreg(dd, dd->ipath_kregs->kr_control,151dd->ipath_control);152153/*154* set initial max size pkt IBC will send, including ICRC; it's the155* PIO buffer size in dwords, less 1; also see ipath_set_mtu()156*/157val = (dd->ipath_ibmaxlen >> 2) + 1;158ibc = val << dd->ibcc_mpl_shift;159160/* flowcontrolwatermark is in units of KBytes */161ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;162/*163* How often flowctrl sent. More or less in usecs; balance against164* watermark value, so that in theory senders always get a flow165* control update in time to not let the IB link go idle.166*/167ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT;168/* max error tolerance */169ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;170/* use "real" buffer space for */171ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT;172/* IB credit flow control. */173ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;174/* initially come up waiting for TS1, without sending anything. */175dd->ipath_ibcctrl = ibc;176/*177* Want to start out with both LINKCMD and LINKINITCMD in NOP178* (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that179* to stay a NOP. Flag that we are disabled, for the (unlikely)180* case that some recovery path is trying to bring the link up181* before we are ready.182*/183ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<184INFINIPATH_IBCC_LINKINITCMD_SHIFT;185dd->ipath_flags |= IPATH_IB_LINK_DISABLED;186ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",187(unsigned long long) ibc);188ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);189190// be sure chip saw it191val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);192193ret = dd->ipath_f_bringup_serdes(dd);194195if (ret)196dev_info(&dd->pcidev->dev, "Could not initialize SerDes, "197"not usable\n");198else {199/* enable IBC */200dd->ipath_control |= INFINIPATH_C_LINKENABLE;201ipath_write_kreg(dd, dd->ipath_kregs->kr_control,202dd->ipath_control);203}204205return ret;206}207208static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)209{210struct ipath_portdata *pd = NULL;211212pd = kzalloc(sizeof(*pd), GFP_KERNEL);213if (pd) {214pd->port_dd = dd;215pd->port_cnt = 1;216/* The port 0 pkey table is used by the layer interface. */217pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;218pd->port_seq_cnt = 1;219}220return pd;221}222223static int init_chip_first(struct ipath_devdata *dd)224{225struct ipath_portdata *pd;226int ret = 0;227u64 val;228229spin_lock_init(&dd->ipath_kernel_tid_lock);230spin_lock_init(&dd->ipath_user_tid_lock);231spin_lock_init(&dd->ipath_sendctrl_lock);232spin_lock_init(&dd->ipath_uctxt_lock);233spin_lock_init(&dd->ipath_sdma_lock);234spin_lock_init(&dd->ipath_gpio_lock);235spin_lock_init(&dd->ipath_eep_st_lock);236spin_lock_init(&dd->ipath_sdepb_lock);237mutex_init(&dd->ipath_eep_lock);238239/*240* skip cfgports stuff because we are not allocating memory,241* and we don't want problems if the portcnt changed due to242* cfgports. We do still check and report a difference, if243* not same (should be impossible).244*/245dd->ipath_f_config_ports(dd, ipath_cfgports);246if (!ipath_cfgports)247dd->ipath_cfgports = dd->ipath_portcnt;248else if (ipath_cfgports <= dd->ipath_portcnt) {249dd->ipath_cfgports = ipath_cfgports;250ipath_dbg("Configured to use %u ports out of %u in chip\n",251dd->ipath_cfgports, ipath_read_kreg32(dd,252dd->ipath_kregs->kr_portcnt));253} else {254dd->ipath_cfgports = dd->ipath_portcnt;255ipath_dbg("Tried to configured to use %u ports; chip "256"only supports %u\n", ipath_cfgports,257ipath_read_kreg32(dd,258dd->ipath_kregs->kr_portcnt));259}260/*261* Allocate full portcnt array, rather than just cfgports, because262* cleanup iterates across all possible ports.263*/264dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_portcnt,265GFP_KERNEL);266267if (!dd->ipath_pd) {268ipath_dev_err(dd, "Unable to allocate portdata array, "269"failing\n");270ret = -ENOMEM;271goto done;272}273274pd = create_portdata0(dd);275if (!pd) {276ipath_dev_err(dd, "Unable to allocate portdata for port "277"0, failing\n");278ret = -ENOMEM;279goto done;280}281dd->ipath_pd[0] = pd;282283dd->ipath_rcvtidcnt =284ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);285dd->ipath_rcvtidbase =286ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);287dd->ipath_rcvegrcnt =288ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);289dd->ipath_rcvegrbase =290ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);291dd->ipath_palign =292ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);293dd->ipath_piobufbase =294ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase);295val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);296dd->ipath_piosize2k = val & ~0U;297dd->ipath_piosize4k = val >> 32;298if (dd->ipath_piosize4k == 0 && ipath_mtu4096)299ipath_mtu4096 = 0; /* 4KB not supported by this chip */300dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;301val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);302dd->ipath_piobcnt2k = val & ~0U;303dd->ipath_piobcnt4k = val >> 32;304dd->ipath_pio2kbase =305(u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) +306(dd->ipath_piobufbase & 0xffffffff));307if (dd->ipath_piobcnt4k) {308dd->ipath_pio4kbase = (u32 __iomem *)309(((char __iomem *) dd->ipath_kregbase) +310(dd->ipath_piobufbase >> 32));311/*312* 4K buffers take 2 pages; we use roundup just to be313* paranoid; we calculate it once here, rather than on314* ever buf allocate315*/316dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k,317dd->ipath_palign);318ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p "319"(%x aligned)\n",320dd->ipath_piobcnt2k, dd->ipath_piosize2k,321dd->ipath_pio2kbase, dd->ipath_piobcnt4k,322dd->ipath_piosize4k, dd->ipath_pio4kbase,323dd->ipath_4kalign);324}325else ipath_dbg("%u 2k piobufs @ %p\n",326dd->ipath_piobcnt2k, dd->ipath_pio2kbase);327328done:329return ret;330}331332/**333* init_chip_reset - re-initialize after a reset, or enable334* @dd: the infinipath device335*336* sanity check at least some of the values after reset, and337* ensure no receive or transmit (explicitly, in case reset338* failed339*/340static int init_chip_reset(struct ipath_devdata *dd)341{342u32 rtmp;343int i;344unsigned long flags;345346/*347* ensure chip does no sends or receives, tail updates, or348* pioavail updates while we re-initialize349*/350dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);351for (i = 0; i < dd->ipath_portcnt; i++) {352clear_bit(dd->ipath_r_portenable_shift + i,353&dd->ipath_rcvctrl);354clear_bit(dd->ipath_r_intravail_shift + i,355&dd->ipath_rcvctrl);356}357ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,358dd->ipath_rcvctrl);359360spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);361dd->ipath_sendctrl = 0U; /* no sdma, etc */362ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);363ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);364spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);365366ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);367368rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);369if (rtmp != dd->ipath_rcvtidcnt)370dev_info(&dd->pcidev->dev, "tidcnt was %u before "371"reset, now %u, using original\n",372dd->ipath_rcvtidcnt, rtmp);373rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);374if (rtmp != dd->ipath_rcvtidbase)375dev_info(&dd->pcidev->dev, "tidbase was %u before "376"reset, now %u, using original\n",377dd->ipath_rcvtidbase, rtmp);378rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);379if (rtmp != dd->ipath_rcvegrcnt)380dev_info(&dd->pcidev->dev, "egrcnt was %u before "381"reset, now %u, using original\n",382dd->ipath_rcvegrcnt, rtmp);383rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);384if (rtmp != dd->ipath_rcvegrbase)385dev_info(&dd->pcidev->dev, "egrbase was %u before "386"reset, now %u, using original\n",387dd->ipath_rcvegrbase, rtmp);388389return 0;390}391392static int init_pioavailregs(struct ipath_devdata *dd)393{394int ret;395396dd->ipath_pioavailregs_dma = dma_alloc_coherent(397&dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys,398GFP_KERNEL);399if (!dd->ipath_pioavailregs_dma) {400ipath_dev_err(dd, "failed to allocate PIOavail reg area "401"in memory\n");402ret = -ENOMEM;403goto done;404}405406/*407* we really want L2 cache aligned, but for current CPUs of408* interest, they are the same.409*/410dd->ipath_statusp = (u64 *)411((char *)dd->ipath_pioavailregs_dma +412((2 * L1_CACHE_BYTES +413dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));414/* copy the current value now that it's really allocated */415*dd->ipath_statusp = dd->_ipath_status;416/*417* setup buffer to hold freeze msg, accessible to apps,418* following statusp419*/420dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1];421/* and its length */422dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);423424ret = 0;425426done:427return ret;428}429430/**431* init_shadow_tids - allocate the shadow TID array432* @dd: the infinipath device433*434* allocate the shadow TID array, so we can ipath_munlock previous435* entries. It may make more sense to move the pageshadow to the436* port data structure, so we only allocate memory for ports actually437* in use, since we at 8k per port, now.438*/439static void init_shadow_tids(struct ipath_devdata *dd)440{441struct page **pages;442dma_addr_t *addrs;443444pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *445sizeof(struct page *));446if (!pages) {447ipath_dev_err(dd, "failed to allocate shadow page * "448"array, no expected sends!\n");449dd->ipath_pageshadow = NULL;450return;451}452453addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *454sizeof(dma_addr_t));455if (!addrs) {456ipath_dev_err(dd, "failed to allocate shadow dma handle "457"array, no expected sends!\n");458vfree(pages);459dd->ipath_pageshadow = NULL;460return;461}462463dd->ipath_pageshadow = pages;464dd->ipath_physshadow = addrs;465}466467static void enable_chip(struct ipath_devdata *dd, int reinit)468{469u32 val;470u64 rcvmask;471unsigned long flags;472int i;473474if (!reinit)475init_waitqueue_head(&ipath_state_wait);476477ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,478dd->ipath_rcvctrl);479480spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);481/* Enable PIO send, and update of PIOavail regs to memory. */482dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |483INFINIPATH_S_PIOBUFAVAILUPD;484485/*486* Set the PIO avail update threshold to host memory487* on chips that support it.488*/489if (dd->ipath_pioupd_thresh)490dd->ipath_sendctrl |= dd->ipath_pioupd_thresh491<< INFINIPATH_S_UPDTHRESH_SHIFT;492ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);493ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);494spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);495496/*497* Enable kernel ports' receive and receive interrupt.498* Other ports done as user opens and inits them.499*/500rcvmask = 1ULL;501dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |502(rcvmask << dd->ipath_r_intravail_shift);503if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))504dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);505506ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,507dd->ipath_rcvctrl);508509/*510* now ready for use. this should be cleared whenever we511* detect a reset, or initiate one.512*/513dd->ipath_flags |= IPATH_INITTED;514515/*516* Init our shadow copies of head from tail values,517* and write head values to match.518*/519val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);520ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);521522/* Initialize so we interrupt on next packet received */523ipath_write_ureg(dd, ur_rcvhdrhead,524dd->ipath_rhdrhead_intr_off |525dd->ipath_pd[0]->port_head, 0);526527/*528* by now pioavail updates to memory should have occurred, so529* copy them into our working/shadow registers; this is in530* case something went wrong with abort, but mostly to get the531* initial values of the generation bit correct.532*/533for (i = 0; i < dd->ipath_pioavregs; i++) {534__le64 pioavail;535536/*537* Chip Errata bug 6641; even and odd qwords>3 are swapped.538*/539if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))540pioavail = dd->ipath_pioavailregs_dma[i ^ 1];541else542pioavail = dd->ipath_pioavailregs_dma[i];543/*544* don't need to worry about ipath_pioavailkernel here545* because we will call ipath_chg_pioavailkernel() later546* in initialization, to busy out buffers as needed547*/548dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);549}550/* can get counters, stats, etc. */551dd->ipath_flags |= IPATH_PRESENT;552}553554static int init_housekeeping(struct ipath_devdata *dd, int reinit)555{556char boardn[40];557int ret = 0;558559/*560* have to clear shadow copies of registers at init that are561* not otherwise set here, or all kinds of bizarre things562* happen with driver on chip reset563*/564dd->ipath_rcvhdrsize = 0;565566/*567* Don't clear ipath_flags as 8bit mode was set before568* entering this func. However, we do set the linkstate to569* unknown, so we can watch for a transition.570* PRESENT is set because we want register reads to work,571* and the kernel infrastructure saw it in config space;572* We clear it if we have failures.573*/574dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;575dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |576IPATH_LINKDOWN | IPATH_LINKINIT);577578ipath_cdbg(VERBOSE, "Try to read spc chip revision\n");579dd->ipath_revision =580ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);581582/*583* set up fundamental info we need to use the chip; we assume584* if the revision reg and these regs are OK, we don't need to585* special case the rest586*/587dd->ipath_sregbase =588ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase);589dd->ipath_cregbase =590ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase);591dd->ipath_uregbase =592ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase);593ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, "594"cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase,595dd->ipath_uregbase, dd->ipath_cregbase);596if ((dd->ipath_revision & 0xffffffff) == 0xffffffff597|| (dd->ipath_sregbase & 0xffffffff) == 0xffffffff598|| (dd->ipath_cregbase & 0xffffffff) == 0xffffffff599|| (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {600ipath_dev_err(dd, "Register read failures from chip, "601"giving up initialization\n");602dd->ipath_flags &= ~IPATH_PRESENT;603ret = -ENODEV;604goto done;605}606607608/* clear diagctrl register, in case diags were running and crashed */609ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0);610611/* clear the initial reset flag, in case first driver load */612ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,613INFINIPATH_E_RESET);614615ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",616(unsigned long long) dd->ipath_revision,617dd->ipath_pcirev);618619if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &620INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {621ipath_dev_err(dd, "Driver only handles version %d, "622"chip swversion is %d (%llx), failng\n",623IPATH_CHIP_SWVERSION,624(int)(dd->ipath_revision >>625INFINIPATH_R_SOFTWARE_SHIFT) &626INFINIPATH_R_SOFTWARE_MASK,627(unsigned long long) dd->ipath_revision);628ret = -ENOSYS;629goto done;630}631dd->ipath_majrev = (u8) ((dd->ipath_revision >>632INFINIPATH_R_CHIPREVMAJOR_SHIFT) &633INFINIPATH_R_CHIPREVMAJOR_MASK);634dd->ipath_minrev = (u8) ((dd->ipath_revision >>635INFINIPATH_R_CHIPREVMINOR_SHIFT) &636INFINIPATH_R_CHIPREVMINOR_MASK);637dd->ipath_boardrev = (u8) ((dd->ipath_revision >>638INFINIPATH_R_BOARDID_SHIFT) &639INFINIPATH_R_BOARDID_MASK);640641ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);642643snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),644"ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "645"SW Compat %u\n",646IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,647(unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &648INFINIPATH_R_ARCH_MASK,649dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev,650(unsigned)(dd->ipath_revision >>651INFINIPATH_R_SOFTWARE_SHIFT) &652INFINIPATH_R_SOFTWARE_MASK);653654ipath_dbg("%s", dd->ipath_boardversion);655656if (ret)657goto done;658659if (reinit)660ret = init_chip_reset(dd);661else662ret = init_chip_first(dd);663664done:665return ret;666}667668static void verify_interrupt(unsigned long opaque)669{670struct ipath_devdata *dd = (struct ipath_devdata *) opaque;671672if (!dd)673return; /* being torn down */674675/*676* If we don't have any interrupts, let the user know and677* don't bother checking again.678*/679if (dd->ipath_int_counter == 0) {680if (!dd->ipath_f_intr_fallback(dd))681dev_err(&dd->pcidev->dev, "No interrupts detected, "682"not usable.\n");683else /* re-arm the timer to see if fallback works */684mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);685} else686ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",687dd->ipath_int_counter);688}689690/**691* ipath_init_chip - do the actual initialization sequence on the chip692* @dd: the infinipath device693* @reinit: reinitializing, so don't allocate new memory694*695* Do the actual initialization sequence on the chip. This is done696* both from the init routine called from the PCI infrastructure, and697* when we reset the chip, or detect that it was reset internally,698* or it's administratively re-enabled.699*700* Memory allocation here and in called routines is only done in701* the first case (reinit == 0). We have to be careful, because even702* without memory allocation, we need to re-write all the chip registers703* TIDs, etc. after the reset or enable has completed.704*/705int ipath_init_chip(struct ipath_devdata *dd, int reinit)706{707int ret = 0;708u32 kpiobufs, defkbufs;709u32 piobufs, uports;710u64 val;711struct ipath_portdata *pd;712gfp_t gfp_flags = GFP_USER | __GFP_COMP;713714ret = init_housekeeping(dd, reinit);715if (ret)716goto done;717718/*719* we ignore most issues after reporting them, but have to specially720* handle hardware-disabled chips.721*/722if (ret == 2) {723/* unique error, known to ipath_init_one */724ret = -EPERM;725goto done;726}727728/*729* We could bump this to allow for full rcvegrcnt + rcvtidcnt,730* but then it no longer nicely fits power of two, and since731* we now use routines that backend onto __get_free_pages, the732* rest would be wasted.733*/734dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);735ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,736dd->ipath_rcvhdrcnt);737738/*739* Set up the shadow copies of the piobufavail registers,740* which we compare against the chip registers for now, and741* the in memory DMA'ed copies of the registers. This has to742* be done early, before we calculate lastport, etc.743*/744piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;745/*746* calc number of pioavail registers, and save it; we have 2747* bits per buffer.748*/749dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)750/ (sizeof(u64) * BITS_PER_BYTE / 2);751uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;752if (piobufs > 144)753defkbufs = 32 + dd->ipath_pioreserved;754else755defkbufs = 16 + dd->ipath_pioreserved;756757if (ipath_kpiobufs && (ipath_kpiobufs +758(uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) {759int i = (int) piobufs -760(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);761if (i < 1)762i = 1;763dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "764"%d for kernel leaves too few for %d user ports "765"(%d each); using %u\n", ipath_kpiobufs,766piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);767/*768* shouldn't change ipath_kpiobufs, because could be769* different for different devices...770*/771kpiobufs = i;772} else if (ipath_kpiobufs)773kpiobufs = ipath_kpiobufs;774else775kpiobufs = defkbufs;776dd->ipath_lastport_piobuf = piobufs - kpiobufs;777dd->ipath_pbufsport =778uports ? dd->ipath_lastport_piobuf / uports : 0;779/* if not an even divisor, some user ports get extra buffers */780dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf -781(dd->ipath_pbufsport * uports);782if (dd->ipath_ports_extrabuf)783ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to "784"ports <= %u\n", dd->ipath_pbufsport,785dd->ipath_ports_extrabuf);786dd->ipath_lastpioindex = 0;787dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;788/* ipath_pioavailshadow initialized earlier */789ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "790"each for %u user ports\n", kpiobufs,791piobufs, dd->ipath_pbufsport, uports);792ret = dd->ipath_f_early_init(dd);793if (ret) {794ipath_dev_err(dd, "Early initialization failure\n");795goto done;796}797798/*799* Early_init sets rcvhdrentsize and rcvhdrsize, so this must be800* done after early_init.801*/802dd->ipath_hdrqlast =803dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);804ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,805dd->ipath_rcvhdrentsize);806ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,807dd->ipath_rcvhdrsize);808809if (!reinit) {810ret = init_pioavailregs(dd);811init_shadow_tids(dd);812if (ret)813goto done;814}815816ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,817dd->ipath_pioavailregs_phys);818819/*820* this is to detect s/w errors, which the h/w works around by821* ignoring the low 6 bits of address, if it wasn't aligned.822*/823val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr);824if (val != dd->ipath_pioavailregs_phys) {825ipath_dev_err(dd, "Catastrophic software error, "826"SendPIOAvailAddr written as %lx, "827"read back as %llx\n",828(unsigned long) dd->ipath_pioavailregs_phys,829(unsigned long long) val);830ret = -EINVAL;831goto done;832}833834ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);835836/*837* make sure we are not in freeze, and PIO send enabled, so838* writes to pbc happen839*/840ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL);841ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,842~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);843ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);844845/*846* before error clears, since we expect serdes pll errors during847* this, the first time after reset848*/849if (bringup_link(dd)) {850dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n");851ret = -ENETDOWN;852goto done;853}854855/*856* clear any "expected" hwerrs from reset and/or initialization857* clear any that aren't enabled (at least this once), and then858* set the enable mask859*/860dd->ipath_f_init_hwerrors(dd);861ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,862~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);863ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,864dd->ipath_hwerrmask);865866/* clear all */867ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);868/* enable errors that are masked, at least this first time. */869ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,870~dd->ipath_maskederrs);871dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */872dd->ipath_errormask =873ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);874/* clear any interrupts up to this point (ints still not enabled) */875ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);876877dd->ipath_f_tidtemplate(dd);878879/*880* Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing881* re-init, the simplest way to handle this is to free882* existing, and re-allocate.883* Need to re-create rest of port 0 portdata as well.884*/885pd = dd->ipath_pd[0];886if (reinit) {887struct ipath_portdata *npd;888889/*890* Alloc and init new ipath_portdata for port0,891* Then free old pd. Could lead to fragmentation, but also892* makes later support for hot-swap easier.893*/894npd = create_portdata0(dd);895if (npd) {896ipath_free_pddata(dd, pd);897dd->ipath_pd[0] = npd;898pd = npd;899} else {900ipath_dev_err(dd, "Unable to allocate portdata"901" for port 0, failing\n");902ret = -ENOMEM;903goto done;904}905}906ret = ipath_create_rcvhdrq(dd, pd);907if (!ret)908ret = create_port0_egr(dd);909if (ret) {910ipath_dev_err(dd, "failed to allocate kernel port's "911"rcvhdrq and/or egr bufs\n");912goto done;913}914else915enable_chip(dd, reinit);916917/* after enable_chip, so pioavailshadow setup */918ipath_chg_pioavailkernel(dd, 0, piobufs, 1);919920/*921* Cancel any possible active sends from early driver load.922* Follows early_init because some chips have to initialize923* PIO buffers in early_init to avoid false parity errors.924* After enable and ipath_chg_pioavailkernel so we can safely925* enable pioavail updates and PIOENABLE; packets are now926* ready to go out.927*/928ipath_cancel_sends(dd, 1);929930if (!reinit) {931/*932* Used when we close a port, for DMA already in flight933* at close.934*/935dd->ipath_dummy_hdrq = dma_alloc_coherent(936&dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,937&dd->ipath_dummy_hdrq_phys,938gfp_flags);939if (!dd->ipath_dummy_hdrq) {940dev_info(&dd->pcidev->dev,941"Couldn't allocate 0x%lx bytes for dummy hdrq\n",942dd->ipath_pd[0]->port_rcvhdrq_size);943/* fallback to just 0'ing */944dd->ipath_dummy_hdrq_phys = 0UL;945}946}947948/*949* cause retrigger of pending interrupts ignored during init,950* even if we had errors951*/952ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);953954if (!dd->ipath_stats_timer_active) {955/*956* first init, or after an admin disable/enable957* set up stats retrieval timer, even if we had errors958* in last portion of setup959*/960init_timer(&dd->ipath_stats_timer);961dd->ipath_stats_timer.function = ipath_get_faststats;962dd->ipath_stats_timer.data = (unsigned long) dd;963/* every 5 seconds; */964dd->ipath_stats_timer.expires = jiffies + 5 * HZ;965/* takes ~16 seconds to overflow at full IB 4x bandwdith */966add_timer(&dd->ipath_stats_timer);967dd->ipath_stats_timer_active = 1;968}969970/* Set up SendDMA if chip supports it */971if (dd->ipath_flags & IPATH_HAS_SEND_DMA)972ret = setup_sdma(dd);973974/* Set up HoL state */975init_timer(&dd->ipath_hol_timer);976dd->ipath_hol_timer.function = ipath_hol_event;977dd->ipath_hol_timer.data = (unsigned long)dd;978dd->ipath_hol_state = IPATH_HOL_UP;979980done:981if (!ret) {982*dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;983if (!dd->ipath_f_intrsetup(dd)) {984/* now we can enable all interrupts from the chip */985ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,986-1LL);987/* force re-interrupt of any pending interrupts. */988ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear,9890ULL);990/* chip is usable; mark it as initialized */991*dd->ipath_statusp |= IPATH_STATUS_INITTED;992993/*994* setup to verify we get an interrupt, and fallback995* to an alternate if necessary and possible996*/997if (!reinit) {998init_timer(&dd->ipath_intrchk_timer);999dd->ipath_intrchk_timer.function =1000verify_interrupt;1001dd->ipath_intrchk_timer.data =1002(unsigned long) dd;1003}1004dd->ipath_intrchk_timer.expires = jiffies + HZ/2;1005add_timer(&dd->ipath_intrchk_timer);1006} else1007ipath_dev_err(dd, "No interrupts enabled, couldn't "1008"setup interrupt address\n");10091010if (dd->ipath_cfgports > ipath_stats.sps_nports)1011/*1012* sps_nports is a global, so, we set it to1013* the highest number of ports of any of the1014* chips we find; we never decrement it, at1015* least for now. Since this might have changed1016* over disable/enable or prior to reset, always1017* do the check and potentially adjust.1018*/1019ipath_stats.sps_nports = dd->ipath_cfgports;1020} else1021ipath_dbg("Failed (%d) to initialize chip\n", ret);10221023/* if ret is non-zero, we probably should do some cleanup1024here... */1025return ret;1026}10271028static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)1029{1030struct ipath_devdata *dd;1031unsigned long flags;1032unsigned short val;1033int ret;10341035ret = ipath_parse_ushort(str, &val);10361037spin_lock_irqsave(&ipath_devs_lock, flags);10381039if (ret < 0)1040goto bail;10411042if (val == 0) {1043ret = -EINVAL;1044goto bail;1045}10461047list_for_each_entry(dd, &ipath_dev_list, ipath_list) {1048if (dd->ipath_kregbase)1049continue;1050if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -1051(dd->ipath_cfgports *1052IPATH_MIN_USER_PORT_BUFCNT)))1053{1054ipath_dev_err(1055dd,1056"Allocating %d PIO bufs for kernel leaves "1057"too few for %d user ports (%d each)\n",1058val, dd->ipath_cfgports - 1,1059IPATH_MIN_USER_PORT_BUFCNT);1060ret = -EINVAL;1061goto bail;1062}1063dd->ipath_lastport_piobuf =1064dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;1065}10661067ipath_kpiobufs = val;1068ret = 0;1069bail:1070spin_unlock_irqrestore(&ipath_devs_lock, flags);10711072return ret;1073}107410751076