Path: blob/master/drivers/infiniband/hw/qib/qib_diag.c
15112 views
/*1* Copyright (c) 2010 QLogic Corporation. All rights reserved.2* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.3* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.4*5* This software is available to you under a choice of one of two6* licenses. You may choose to be licensed under the terms of the GNU7* General Public License (GPL) Version 2, available from the file8* COPYING in the main directory of this source tree, or the9* OpenIB.org BSD license below:10*11* Redistribution and use in source and binary forms, with or12* without modification, are permitted provided that the following13* conditions are met:14*15* - Redistributions of source code must retain the above16* copyright notice, this list of conditions and the following17* disclaimer.18*19* - Redistributions in binary form must reproduce the above20* copyright notice, this list of conditions and the following21* disclaimer in the documentation and/or other materials22* provided with the distribution.23*24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE31* SOFTWARE.32*/3334/*35* This file contains support for diagnostic functions. It is accessed by36* opening the qib_diag device, normally minor number 129. Diagnostic use37* of the QLogic_IB chip may render the chip or board unusable until the38* driver is unloaded, or in some cases, until the system is rebooted.39*40* Accesses to the chip through this interface are not similar to going41* through the /sys/bus/pci resource mmap interface.42*/4344#include <linux/io.h>45#include <linux/pci.h>46#include <linux/poll.h>47#include <linux/vmalloc.h>48#include <linux/fs.h>49#include <linux/uaccess.h>5051#include "qib.h"52#include "qib_common.h"5354/*55* Each client that opens the diag device must read then write56* offset 0, to prevent lossage from random cat or od. diag_state57* sequences this "handshake".58*/59enum diag_state { UNUSED = 0, OPENED, INIT, READY };6061/* State for an individual client. PID so children cannot abuse handshake */62static struct qib_diag_client {63struct qib_diag_client *next;64struct qib_devdata *dd;65pid_t pid;66enum diag_state state;67} *client_pool;6869/*70* Get a client struct. Recycled if possible, else kmalloc.71* Must be called with qib_mutex held72*/73static struct qib_diag_client *get_client(struct qib_devdata *dd)74{75struct qib_diag_client *dc;7677dc = client_pool;78if (dc)79/* got from pool remove it and use */80client_pool = dc->next;81else82/* None in pool, alloc and init */83dc = kmalloc(sizeof *dc, GFP_KERNEL);8485if (dc) {86dc->next = NULL;87dc->dd = dd;88dc->pid = current->pid;89dc->state = OPENED;90}91return dc;92}9394/*95* Return to pool. Must be called with qib_mutex held96*/97static void return_client(struct qib_diag_client *dc)98{99struct qib_devdata *dd = dc->dd;100struct qib_diag_client *tdc, *rdc;101102rdc = NULL;103if (dc == dd->diag_client) {104dd->diag_client = dc->next;105rdc = dc;106} else {107tdc = dc->dd->diag_client;108while (tdc) {109if (dc == tdc->next) {110tdc->next = dc->next;111rdc = dc;112break;113}114tdc = tdc->next;115}116}117if (rdc) {118rdc->state = UNUSED;119rdc->dd = NULL;120rdc->pid = 0;121rdc->next = client_pool;122client_pool = rdc;123}124}125126static int qib_diag_open(struct inode *in, struct file *fp);127static int qib_diag_release(struct inode *in, struct file *fp);128static ssize_t qib_diag_read(struct file *fp, char __user *data,129size_t count, loff_t *off);130static ssize_t qib_diag_write(struct file *fp, const char __user *data,131size_t count, loff_t *off);132133static const struct file_operations diag_file_ops = {134.owner = THIS_MODULE,135.write = qib_diag_write,136.read = qib_diag_read,137.open = qib_diag_open,138.release = qib_diag_release,139.llseek = default_llseek,140};141142static atomic_t diagpkt_count = ATOMIC_INIT(0);143static struct cdev *diagpkt_cdev;144static struct device *diagpkt_device;145146static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,147size_t count, loff_t *off);148149static const struct file_operations diagpkt_file_ops = {150.owner = THIS_MODULE,151.write = qib_diagpkt_write,152.llseek = noop_llseek,153};154155int qib_diag_add(struct qib_devdata *dd)156{157char name[16];158int ret = 0;159160if (atomic_inc_return(&diagpkt_count) == 1) {161ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",162&diagpkt_file_ops, &diagpkt_cdev,163&diagpkt_device);164if (ret)165goto done;166}167168snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);169ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,170&diag_file_ops, &dd->diag_cdev,171&dd->diag_device);172done:173return ret;174}175176static void qib_unregister_observers(struct qib_devdata *dd);177178void qib_diag_remove(struct qib_devdata *dd)179{180struct qib_diag_client *dc;181182if (atomic_dec_and_test(&diagpkt_count))183qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);184185qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);186187/*188* Return all diag_clients of this device. There should be none,189* as we are "guaranteed" that no clients are still open190*/191while (dd->diag_client)192return_client(dd->diag_client);193194/* Now clean up all unused client structs */195while (client_pool) {196dc = client_pool;197client_pool = dc->next;198kfree(dc);199}200/* Clean up observer list */201qib_unregister_observers(dd);202}203204/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *205*206* @dd: the qlogic_ib device207* @offs: the offset in chip-space208* @cntp: Pointer to max (byte) count for transfer starting at offset209* This returns a u32 __iomem * so it can be used for both 64 and 32-bit210* mapping. It is needed because with the use of PAT for control of211* write-combining, the logically contiguous address-space of the chip212* may be split into virtually non-contiguous spaces, with different213* attributes, which are them mapped to contiguous physical space214* based from the first BAR.215*216* The code below makes the same assumptions as were made in217* init_chip_wc_pat() (qib_init.c), copied here:218* Assumes chip address space looks like:219* - kregs + sregs + cregs + uregs (in any order)220* - piobufs (2K and 4K bufs in either order)221* or:222* - kregs + sregs + cregs (in any order)223* - piobufs (2K and 4K bufs in either order)224* - uregs225*226* If cntp is non-NULL, returns how many bytes from offset can be accessed227* Returns 0 if the offset is not mapped.228*/229static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,230u32 *cntp)231{232u32 kreglen;233u32 snd_bottom, snd_lim = 0;234u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;235u32 __iomem *map = NULL;236u32 cnt = 0;237u32 tot4k, offs4k;238239/* First, simplest case, offset is within the first map. */240kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);241if (offset < kreglen) {242map = krb32 + (offset / sizeof(u32));243cnt = kreglen - offset;244goto mapped;245}246247/*248* Next check for user regs, the next most common case,249* and a cheap check because if they are not in the first map250* they are last in chip.251*/252if (dd->userbase) {253/* If user regs mapped, they are after send, so set limit. */254u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;255if (!dd->piovl15base)256snd_lim = dd->uregbase;257krb32 = (u32 __iomem *)dd->userbase;258if (offset >= dd->uregbase && offset < ulim) {259map = krb32 + (offset - dd->uregbase) / sizeof(u32);260cnt = ulim - offset;261goto mapped;262}263}264265/*266* Lastly, check for offset within Send Buffers.267* This is gnarly because struct devdata is deliberately vague268* about things like 7322 VL15 buffers, and we are not in269* chip-specific code here, so should not make many assumptions.270* The one we _do_ make is that the only chip that has more sndbufs271* than we admit is the 7322, and it has userregs above that, so272* we know the snd_lim.273*/274/* Assume 2K buffers are first. */275snd_bottom = dd->pio2k_bufbase;276if (snd_lim == 0) {277u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);278snd_lim = snd_bottom + tot2k;279}280/* If 4k buffers exist, account for them by bumping281* appropriate limit.282*/283tot4k = dd->piobcnt4k * dd->align4k;284offs4k = dd->piobufbase >> 32;285if (dd->piobcnt4k) {286if (snd_bottom > offs4k)287snd_bottom = offs4k;288else {289/* 4k above 2k. Bump snd_lim, if needed*/290if (!dd->userbase || dd->piovl15base)291snd_lim = offs4k + tot4k;292}293}294/*295* Judgement call: can we ignore the space between SendBuffs and296* UserRegs, where we would like to see vl15 buffs, but not more?297*/298if (offset >= snd_bottom && offset < snd_lim) {299offset -= snd_bottom;300map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));301cnt = snd_lim - offset;302}303304if (!map && offs4k && dd->piovl15base) {305snd_lim = offs4k + tot4k + 2 * dd->align4k;306if (offset >= (offs4k + tot4k) && offset < snd_lim) {307map = (u32 __iomem *)dd->piovl15base +308((offset - (offs4k + tot4k)) / sizeof(u32));309cnt = snd_lim - offset;310}311}312313mapped:314if (cntp)315*cntp = cnt;316return map;317}318319/*320* qib_read_umem64 - read a 64-bit quantity from the chip into user space321* @dd: the qlogic_ib device322* @uaddr: the location to store the data in user memory323* @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)324* @count: number of bytes to copy (multiple of 32 bits)325*326* This function also localizes all chip memory accesses.327* The copy should be written such that we read full cacheline packets328* from the chip. This is usually used for a single qword329*330* NOTE: This assumes the chip address is 64-bit aligned.331*/332static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,333u32 regoffs, size_t count)334{335const u64 __iomem *reg_addr;336const u64 __iomem *reg_end;337u32 limit;338int ret;339340reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);341if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {342ret = -EINVAL;343goto bail;344}345if (count >= limit)346count = limit;347reg_end = reg_addr + (count / sizeof(u64));348349/* not very efficient, but it works for now */350while (reg_addr < reg_end) {351u64 data = readq(reg_addr);352353if (copy_to_user(uaddr, &data, sizeof(u64))) {354ret = -EFAULT;355goto bail;356}357reg_addr++;358uaddr += sizeof(u64);359}360ret = 0;361bail:362return ret;363}364365/*366* qib_write_umem64 - write a 64-bit quantity to the chip from user space367* @dd: the qlogic_ib device368* @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)369* @uaddr: the source of the data in user memory370* @count: the number of bytes to copy (multiple of 32 bits)371*372* This is usually used for a single qword373* NOTE: This assumes the chip address is 64-bit aligned.374*/375376static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,377const void __user *uaddr, size_t count)378{379u64 __iomem *reg_addr;380const u64 __iomem *reg_end;381u32 limit;382int ret;383384reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);385if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {386ret = -EINVAL;387goto bail;388}389if (count >= limit)390count = limit;391reg_end = reg_addr + (count / sizeof(u64));392393/* not very efficient, but it works for now */394while (reg_addr < reg_end) {395u64 data;396if (copy_from_user(&data, uaddr, sizeof(data))) {397ret = -EFAULT;398goto bail;399}400writeq(data, reg_addr);401402reg_addr++;403uaddr += sizeof(u64);404}405ret = 0;406bail:407return ret;408}409410/*411* qib_read_umem32 - read a 32-bit quantity from the chip into user space412* @dd: the qlogic_ib device413* @uaddr: the location to store the data in user memory414* @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)415* @count: number of bytes to copy416*417* read 32 bit values, not 64 bit; for memories that only418* support 32 bit reads; usually a single dword.419*/420static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,421u32 regoffs, size_t count)422{423const u32 __iomem *reg_addr;424const u32 __iomem *reg_end;425u32 limit;426int ret;427428reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);429if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {430ret = -EINVAL;431goto bail;432}433if (count >= limit)434count = limit;435reg_end = reg_addr + (count / sizeof(u32));436437/* not very efficient, but it works for now */438while (reg_addr < reg_end) {439u32 data = readl(reg_addr);440441if (copy_to_user(uaddr, &data, sizeof(data))) {442ret = -EFAULT;443goto bail;444}445446reg_addr++;447uaddr += sizeof(u32);448449}450ret = 0;451bail:452return ret;453}454455/*456* qib_write_umem32 - write a 32-bit quantity to the chip from user space457* @dd: the qlogic_ib device458* @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)459* @uaddr: the source of the data in user memory460* @count: number of bytes to copy461*462* write 32 bit values, not 64 bit; for memories that only463* support 32 bit write; usually a single dword.464*/465466static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,467const void __user *uaddr, size_t count)468{469u32 __iomem *reg_addr;470const u32 __iomem *reg_end;471u32 limit;472int ret;473474reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);475if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {476ret = -EINVAL;477goto bail;478}479if (count >= limit)480count = limit;481reg_end = reg_addr + (count / sizeof(u32));482483while (reg_addr < reg_end) {484u32 data;485486if (copy_from_user(&data, uaddr, sizeof(data))) {487ret = -EFAULT;488goto bail;489}490writel(data, reg_addr);491492reg_addr++;493uaddr += sizeof(u32);494}495ret = 0;496bail:497return ret;498}499500static int qib_diag_open(struct inode *in, struct file *fp)501{502int unit = iminor(in) - QIB_DIAG_MINOR_BASE;503struct qib_devdata *dd;504struct qib_diag_client *dc;505int ret;506507mutex_lock(&qib_mutex);508509dd = qib_lookup(unit);510511if (dd == NULL || !(dd->flags & QIB_PRESENT) ||512!dd->kregbase) {513ret = -ENODEV;514goto bail;515}516517dc = get_client(dd);518if (!dc) {519ret = -ENOMEM;520goto bail;521}522dc->next = dd->diag_client;523dd->diag_client = dc;524fp->private_data = dc;525ret = 0;526bail:527mutex_unlock(&qib_mutex);528529return ret;530}531532/**533* qib_diagpkt_write - write an IB packet534* @fp: the diag data device file pointer535* @data: qib_diag_pkt structure saying where to get the packet536* @count: size of data to write537* @off: unused by this code538*/539static ssize_t qib_diagpkt_write(struct file *fp,540const char __user *data,541size_t count, loff_t *off)542{543u32 __iomem *piobuf;544u32 plen, clen, pbufn;545struct qib_diag_xpkt dp;546u32 *tmpbuf = NULL;547struct qib_devdata *dd;548struct qib_pportdata *ppd;549ssize_t ret = 0;550551if (count != sizeof(dp)) {552ret = -EINVAL;553goto bail;554}555if (copy_from_user(&dp, data, sizeof(dp))) {556ret = -EFAULT;557goto bail;558}559560dd = qib_lookup(dp.unit);561if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {562ret = -ENODEV;563goto bail;564}565if (!(dd->flags & QIB_INITTED)) {566/* no hardware, freeze, etc. */567ret = -ENODEV;568goto bail;569}570571if (dp.version != _DIAG_XPKT_VERS) {572qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",573dp.version);574ret = -EINVAL;575goto bail;576}577/* send count must be an exact number of dwords */578if (dp.len & 3) {579ret = -EINVAL;580goto bail;581}582if (!dp.port || dp.port > dd->num_pports) {583ret = -EINVAL;584goto bail;585}586ppd = &dd->pport[dp.port - 1];587588/* need total length before first word written */589/* +1 word is for the qword padding */590plen = sizeof(u32) + dp.len;591clen = dp.len >> 2;592593if ((plen + 4) > ppd->ibmaxlen) {594ret = -EINVAL;595goto bail; /* before writing pbc */596}597tmpbuf = vmalloc(plen);598if (!tmpbuf) {599qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, "600"failing\n");601ret = -ENOMEM;602goto bail;603}604605if (copy_from_user(tmpbuf,606(const void __user *) (unsigned long) dp.data,607dp.len)) {608ret = -EFAULT;609goto bail;610}611612plen >>= 2; /* in dwords */613614if (dp.pbc_wd == 0)615dp.pbc_wd = plen;616617piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);618if (!piobuf) {619ret = -EBUSY;620goto bail;621}622/* disarm it just to be extra sure */623dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));624625/* disable header check on pbufn for this packet */626dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);627628writeq(dp.pbc_wd, piobuf);629/*630* Copy all but the trigger word, then flush, so it's written631* to chip before trigger word, then write trigger word, then632* flush again, so packet is sent.633*/634if (dd->flags & QIB_PIO_FLUSH_WC) {635qib_flush_wc();636qib_pio_copy(piobuf + 2, tmpbuf, clen - 1);637qib_flush_wc();638__raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);639} else640qib_pio_copy(piobuf + 2, tmpbuf, clen);641642if (dd->flags & QIB_USE_SPCL_TRIG) {643u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;644645qib_flush_wc();646__raw_writel(0xaebecede, piobuf + spcl_off);647}648649/*650* Ensure buffer is written to the chip, then re-enable651* header checks (if supported by chip). The txchk652* code will ensure seen by chip before returning.653*/654qib_flush_wc();655qib_sendbuf_done(dd, pbufn);656dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);657658ret = sizeof(dp);659660bail:661vfree(tmpbuf);662return ret;663}664665static int qib_diag_release(struct inode *in, struct file *fp)666{667mutex_lock(&qib_mutex);668return_client(fp->private_data);669fp->private_data = NULL;670mutex_unlock(&qib_mutex);671return 0;672}673674/*675* Chip-specific code calls to register its interest in676* a specific range.677*/678struct diag_observer_list_elt {679struct diag_observer_list_elt *next;680const struct diag_observer *op;681};682683int qib_register_observer(struct qib_devdata *dd,684const struct diag_observer *op)685{686struct diag_observer_list_elt *olp;687int ret = -EINVAL;688689if (!dd || !op)690goto bail;691ret = -ENOMEM;692olp = vmalloc(sizeof *olp);693if (!olp) {694printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n");695goto bail;696}697if (olp) {698unsigned long flags;699700spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);701olp->op = op;702olp->next = dd->diag_observer_list;703dd->diag_observer_list = olp;704spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);705ret = 0;706}707bail:708return ret;709}710711/* Remove all registered observers when device is closed */712static void qib_unregister_observers(struct qib_devdata *dd)713{714struct diag_observer_list_elt *olp;715unsigned long flags;716717spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);718olp = dd->diag_observer_list;719while (olp) {720/* Pop one observer, let go of lock */721dd->diag_observer_list = olp->next;722spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);723vfree(olp);724/* try again. */725spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);726olp = dd->diag_observer_list;727}728spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);729}730731/*732* Find the observer, if any, for the specified address. Initial implementation733* is simple stack of observers. This must be called with diag transaction734* lock held.735*/736static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,737u32 addr)738{739struct diag_observer_list_elt *olp;740const struct diag_observer *op = NULL;741742olp = dd->diag_observer_list;743while (olp) {744op = olp->op;745if (addr >= op->bottom && addr <= op->top)746break;747olp = olp->next;748}749if (!olp)750op = NULL;751752return op;753}754755static ssize_t qib_diag_read(struct file *fp, char __user *data,756size_t count, loff_t *off)757{758struct qib_diag_client *dc = fp->private_data;759struct qib_devdata *dd = dc->dd;760void __iomem *kreg_base;761ssize_t ret;762763if (dc->pid != current->pid) {764ret = -EPERM;765goto bail;766}767768kreg_base = dd->kregbase;769770if (count == 0)771ret = 0;772else if ((count % 4) || (*off % 4))773/* address or length is not 32-bit aligned, hence invalid */774ret = -EINVAL;775else if (dc->state < READY && (*off || count != 8))776ret = -EINVAL; /* prevent cat /dev/qib_diag* */777else {778unsigned long flags;779u64 data64 = 0;780int use_32;781const struct diag_observer *op;782783use_32 = (count % 8) || (*off % 8);784ret = -1;785spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);786/*787* Check for observer on this address range.788* we only support a single 32 or 64-bit read789* via observer, currently.790*/791op = diag_get_observer(dd, *off);792if (op) {793u32 offset = *off;794ret = op->hook(dd, op, offset, &data64, 0, use_32);795}796/*797* We need to release lock before any copy_to_user(),798* whether implicit in qib_read_umem* or explicit below.799*/800spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);801if (!op) {802if (use_32)803/*804* Address or length is not 64-bit aligned;805* do 32-bit rd806*/807ret = qib_read_umem32(dd, data, (u32) *off,808count);809else810ret = qib_read_umem64(dd, data, (u32) *off,811count);812} else if (ret == count) {813/* Below finishes case where observer existed */814ret = copy_to_user(data, &data64, use_32 ?815sizeof(u32) : sizeof(u64));816if (ret)817ret = -EFAULT;818}819}820821if (ret >= 0) {822*off += count;823ret = count;824if (dc->state == OPENED)825dc->state = INIT;826}827bail:828return ret;829}830831static ssize_t qib_diag_write(struct file *fp, const char __user *data,832size_t count, loff_t *off)833{834struct qib_diag_client *dc = fp->private_data;835struct qib_devdata *dd = dc->dd;836void __iomem *kreg_base;837ssize_t ret;838839if (dc->pid != current->pid) {840ret = -EPERM;841goto bail;842}843844kreg_base = dd->kregbase;845846if (count == 0)847ret = 0;848else if ((count % 4) || (*off % 4))849/* address or length is not 32-bit aligned, hence invalid */850ret = -EINVAL;851else if (dc->state < READY &&852((*off || count != 8) || dc->state != INIT))853/* No writes except second-step of init seq */854ret = -EINVAL; /* before any other write allowed */855else {856unsigned long flags;857const struct diag_observer *op = NULL;858int use_32 = (count % 8) || (*off % 8);859860/*861* Check for observer on this address range.862* We only support a single 32 or 64-bit write863* via observer, currently. This helps, because864* we would otherwise have to jump through hoops865* to make "diag transaction" meaningful when we866* cannot do a copy_from_user while holding the lock.867*/868if (count == 4 || count == 8) {869u64 data64;870u32 offset = *off;871ret = copy_from_user(&data64, data, count);872if (ret) {873ret = -EFAULT;874goto bail;875}876spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);877op = diag_get_observer(dd, *off);878if (op)879ret = op->hook(dd, op, offset, &data64, ~0Ull,880use_32);881spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);882}883884if (!op) {885if (use_32)886/*887* Address or length is not 64-bit aligned;888* do 32-bit write889*/890ret = qib_write_umem32(dd, (u32) *off, data,891count);892else893ret = qib_write_umem64(dd, (u32) *off, data,894count);895}896}897898if (ret >= 0) {899*off += count;900ret = count;901if (dc->state == INIT)902dc->state = READY; /* all read/write OK now */903}904bail:905return ret;906}907908909