Path: blob/master/drivers/char/xillybus/xillybus_core.c
26282 views
// SPDX-License-Identifier: GPL-2.0-only1/*2* linux/drivers/misc/xillybus_core.c3*4* Copyright 2011 Xillybus Ltd, http://xillybus.com5*6* Driver for the Xillybus FPGA/host framework.7*8* This driver interfaces with a special IP core in an FPGA, setting up9* a pipe between a hardware FIFO in the programmable logic and a device10* file in the host. The number of such pipes and their attributes are11* set up on the logic. This driver detects these automatically and12* creates the device files accordingly.13*/1415#include <linux/list.h>16#include <linux/device.h>17#include <linux/module.h>18#include <linux/io.h>19#include <linux/dma-mapping.h>20#include <linux/interrupt.h>21#include <linux/sched.h>22#include <linux/fs.h>23#include <linux/spinlock.h>24#include <linux/mutex.h>25#include <linux/crc32.h>26#include <linux/poll.h>27#include <linux/delay.h>28#include <linux/slab.h>29#include <linux/workqueue.h>30#include "xillybus.h"31#include "xillybus_class.h"3233MODULE_DESCRIPTION("Xillybus core functions");34MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");35MODULE_ALIAS("xillybus_core");36MODULE_LICENSE("GPL v2");3738/* General timeout is 100 ms, rx timeout is 10 ms */39#define XILLY_RX_TIMEOUT (10*HZ/1000)40#define XILLY_TIMEOUT (100*HZ/1000)4142#define fpga_msg_ctrl_reg 0x000843#define fpga_dma_control_reg 0x002044#define fpga_dma_bufno_reg 0x002445#define fpga_dma_bufaddr_lowaddr_reg 0x002846#define fpga_dma_bufaddr_highaddr_reg 0x002c47#define fpga_buf_ctrl_reg 0x003048#define fpga_buf_offset_reg 0x003449#define fpga_endian_reg 0x00405051#define XILLYMSG_OPCODE_RELEASEBUF 152#define XILLYMSG_OPCODE_QUIESCEACK 253#define XILLYMSG_OPCODE_FIFOEOF 354#define XILLYMSG_OPCODE_FATAL_ERROR 455#define XILLYMSG_OPCODE_NONEMPTY 55657static const char xillyname[] = "xillybus";5859static struct workqueue_struct *xillybus_wq;6061/*62* Locking scheme: Mutexes protect invocations of character device methods.63* If both locks are taken, wr_mutex is taken first, rd_mutex second.64*65* wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the66* buffers' end_offset fields against changes made by IRQ handler (and in67* theory, other file request handlers, but the mutex handles that). Nothing68* else.69* They are held for short direct memory manipulations. Needless to say,70* no mutex locking is allowed when a spinlock is held.71*72* rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.73*74* register_mutex is endpoint-specific, and is held when non-atomic75* register operations are performed. wr_mutex and rd_mutex may be76* held when register_mutex is taken, but none of the spinlocks. Note that77* register_mutex doesn't protect against sporadic buf_ctrl_reg writes78* which are unrelated to buf_offset_reg, since they are harmless.79*80* Blocking on the wait queues is allowed with mutexes held, but not with81* spinlocks.82*83* Only interruptible blocking is allowed on mutexes and wait queues.84*85* All in all, the locking order goes (with skips allowed, of course):86* wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock87*/8889static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)90{91int opcode;92int msg_channel, msg_bufno, msg_data, msg_dir;9394opcode = (buf[0] >> 24) & 0xff;95msg_dir = buf[0] & 1;96msg_channel = (buf[0] >> 1) & 0x7ff;97msg_bufno = (buf[0] >> 12) & 0x3ff;98msg_data = buf[1] & 0xfffffff;99100dev_warn(endpoint->dev,101"Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",102opcode, msg_channel, msg_dir, msg_bufno, msg_data);103}104105/*106* xillybus_isr assumes the interrupt is allocated exclusively to it,107* which is the natural case MSI and several other hardware-oriented108* interrupts. Sharing is not allowed.109*/110111irqreturn_t xillybus_isr(int irq, void *data)112{113struct xilly_endpoint *ep = data;114u32 *buf;115unsigned int buf_size;116int i;117int opcode;118unsigned int msg_channel, msg_bufno, msg_data, msg_dir;119struct xilly_channel *channel;120121buf = ep->msgbuf_addr;122buf_size = ep->msg_buf_size/sizeof(u32);123124dma_sync_single_for_cpu(ep->dev, ep->msgbuf_dma_addr,125ep->msg_buf_size, DMA_FROM_DEVICE);126127for (i = 0; i < buf_size; i += 2) {128if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {129malformed_message(ep, &buf[i]);130dev_warn(ep->dev,131"Sending a NACK on counter %x (instead of %x) on entry %d\n",132((buf[i+1] >> 28) & 0xf),133ep->msg_counter,134i/2);135136if (++ep->failed_messages > 10) {137dev_err(ep->dev,138"Lost sync with interrupt messages. Stopping.\n");139} else {140dma_sync_single_for_device(ep->dev,141ep->msgbuf_dma_addr,142ep->msg_buf_size,143DMA_FROM_DEVICE);144145iowrite32(0x01, /* Message NACK */146ep->registers + fpga_msg_ctrl_reg);147}148return IRQ_HANDLED;149} else if (buf[i] & (1 << 22)) /* Last message */150break;151}152153if (i >= buf_size) {154dev_err(ep->dev, "Bad interrupt message. Stopping.\n");155return IRQ_HANDLED;156}157158buf_size = i + 2;159160for (i = 0; i < buf_size; i += 2) { /* Scan through messages */161opcode = (buf[i] >> 24) & 0xff;162163msg_dir = buf[i] & 1;164msg_channel = (buf[i] >> 1) & 0x7ff;165msg_bufno = (buf[i] >> 12) & 0x3ff;166msg_data = buf[i+1] & 0xfffffff;167168switch (opcode) {169case XILLYMSG_OPCODE_RELEASEBUF:170if ((msg_channel > ep->num_channels) ||171(msg_channel == 0)) {172malformed_message(ep, &buf[i]);173break;174}175176channel = ep->channels[msg_channel];177178if (msg_dir) { /* Write channel */179if (msg_bufno >= channel->num_wr_buffers) {180malformed_message(ep, &buf[i]);181break;182}183spin_lock(&channel->wr_spinlock);184channel->wr_buffers[msg_bufno]->end_offset =185msg_data;186channel->wr_fpga_buf_idx = msg_bufno;187channel->wr_empty = 0;188channel->wr_sleepy = 0;189spin_unlock(&channel->wr_spinlock);190191wake_up_interruptible(&channel->wr_wait);192193} else {194/* Read channel */195196if (msg_bufno >= channel->num_rd_buffers) {197malformed_message(ep, &buf[i]);198break;199}200201spin_lock(&channel->rd_spinlock);202channel->rd_fpga_buf_idx = msg_bufno;203channel->rd_full = 0;204spin_unlock(&channel->rd_spinlock);205206wake_up_interruptible(&channel->rd_wait);207if (!channel->rd_synchronous)208queue_delayed_work(209xillybus_wq,210&channel->rd_workitem,211XILLY_RX_TIMEOUT);212}213214break;215case XILLYMSG_OPCODE_NONEMPTY:216if ((msg_channel > ep->num_channels) ||217(msg_channel == 0) || (!msg_dir) ||218!ep->channels[msg_channel]->wr_supports_nonempty) {219malformed_message(ep, &buf[i]);220break;221}222223channel = ep->channels[msg_channel];224225if (msg_bufno >= channel->num_wr_buffers) {226malformed_message(ep, &buf[i]);227break;228}229spin_lock(&channel->wr_spinlock);230if (msg_bufno == channel->wr_host_buf_idx)231channel->wr_ready = 1;232spin_unlock(&channel->wr_spinlock);233234wake_up_interruptible(&channel->wr_ready_wait);235236break;237case XILLYMSG_OPCODE_QUIESCEACK:238ep->idtlen = msg_data;239wake_up_interruptible(&ep->ep_wait);240241break;242case XILLYMSG_OPCODE_FIFOEOF:243if ((msg_channel > ep->num_channels) ||244(msg_channel == 0) || (!msg_dir) ||245!ep->channels[msg_channel]->num_wr_buffers) {246malformed_message(ep, &buf[i]);247break;248}249channel = ep->channels[msg_channel];250spin_lock(&channel->wr_spinlock);251channel->wr_eof = msg_bufno;252channel->wr_sleepy = 0;253254channel->wr_hangup = channel->wr_empty &&255(channel->wr_host_buf_idx == msg_bufno);256257spin_unlock(&channel->wr_spinlock);258259wake_up_interruptible(&channel->wr_wait);260261break;262case XILLYMSG_OPCODE_FATAL_ERROR:263ep->fatal_error = 1;264wake_up_interruptible(&ep->ep_wait); /* For select() */265dev_err(ep->dev,266"FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n");267break;268default:269malformed_message(ep, &buf[i]);270break;271}272}273274dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr,275ep->msg_buf_size, DMA_FROM_DEVICE);276277ep->msg_counter = (ep->msg_counter + 1) & 0xf;278ep->failed_messages = 0;279iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */280281return IRQ_HANDLED;282}283EXPORT_SYMBOL(xillybus_isr);284285/*286* A few trivial memory management functions.287* NOTE: These functions are used only on probe and remove, and therefore288* no locks are applied!289*/290291static void xillybus_autoflush(struct work_struct *work);292293struct xilly_alloc_state {294void *salami;295int left_of_salami;296int nbuffer;297enum dma_data_direction direction;298u32 regdirection;299};300301static void xilly_unmap(void *ptr)302{303struct xilly_mapping *data = ptr;304305dma_unmap_single(data->device, data->dma_addr,306data->size, data->direction);307308kfree(ptr);309}310311static int xilly_map_single(struct xilly_endpoint *ep,312void *ptr,313size_t size,314int direction,315dma_addr_t *ret_dma_handle316)317{318dma_addr_t addr;319struct xilly_mapping *this;320321this = kzalloc(sizeof(*this), GFP_KERNEL);322if (!this)323return -ENOMEM;324325addr = dma_map_single(ep->dev, ptr, size, direction);326327if (dma_mapping_error(ep->dev, addr)) {328kfree(this);329return -ENODEV;330}331332this->device = ep->dev;333this->dma_addr = addr;334this->size = size;335this->direction = direction;336337*ret_dma_handle = addr;338339return devm_add_action_or_reset(ep->dev, xilly_unmap, this);340}341342static int xilly_get_dma_buffers(struct xilly_endpoint *ep,343struct xilly_alloc_state *s,344struct xilly_buffer **buffers,345int bufnum, int bytebufsize)346{347int i, rc;348dma_addr_t dma_addr;349struct device *dev = ep->dev;350struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */351352if (buffers) { /* Not the message buffer */353this_buffer = devm_kcalloc(dev, bufnum,354sizeof(struct xilly_buffer),355GFP_KERNEL);356if (!this_buffer)357return -ENOMEM;358}359360for (i = 0; i < bufnum; i++) {361/*362* Buffers are expected in descending size order, so there363* is either enough space for this buffer or none at all.364*/365366if ((s->left_of_salami < bytebufsize) &&367(s->left_of_salami > 0)) {368dev_err(ep->dev,369"Corrupt buffer allocation in IDT. Aborting.\n");370return -ENODEV;371}372373if (s->left_of_salami == 0) {374int allocorder, allocsize;375376allocsize = PAGE_SIZE;377allocorder = 0;378while (bytebufsize > allocsize) {379allocsize *= 2;380allocorder++;381}382383s->salami = (void *) devm_get_free_pages(384dev,385GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO,386allocorder);387if (!s->salami)388return -ENOMEM;389390s->left_of_salami = allocsize;391}392393rc = xilly_map_single(ep, s->salami,394bytebufsize, s->direction,395&dma_addr);396if (rc)397return rc;398399iowrite32((u32) (dma_addr & 0xffffffff),400ep->registers + fpga_dma_bufaddr_lowaddr_reg);401iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)),402ep->registers + fpga_dma_bufaddr_highaddr_reg);403404if (buffers) { /* Not the message buffer */405this_buffer->addr = s->salami;406this_buffer->dma_addr = dma_addr;407buffers[i] = this_buffer++;408409iowrite32(s->regdirection | s->nbuffer++,410ep->registers + fpga_dma_bufno_reg);411} else {412ep->msgbuf_addr = s->salami;413ep->msgbuf_dma_addr = dma_addr;414ep->msg_buf_size = bytebufsize;415416iowrite32(s->regdirection,417ep->registers + fpga_dma_bufno_reg);418}419420s->left_of_salami -= bytebufsize;421s->salami += bytebufsize;422}423return 0;424}425426static int xilly_setupchannels(struct xilly_endpoint *ep,427unsigned char *chandesc,428int entries)429{430struct device *dev = ep->dev;431int i, entry, rc;432struct xilly_channel *channel;433int channelnum, bufnum, bufsize, format, is_writebuf;434int bytebufsize;435int synchronous, allowpartial, exclusive_open, seekable;436int supports_nonempty;437int msg_buf_done = 0;438439struct xilly_alloc_state rd_alloc = {440.salami = NULL,441.left_of_salami = 0,442.nbuffer = 1,443.direction = DMA_TO_DEVICE,444.regdirection = 0,445};446447struct xilly_alloc_state wr_alloc = {448.salami = NULL,449.left_of_salami = 0,450.nbuffer = 1,451.direction = DMA_FROM_DEVICE,452.regdirection = 0x80000000,453};454455channel = devm_kcalloc(dev, ep->num_channels,456sizeof(struct xilly_channel), GFP_KERNEL);457if (!channel)458return -ENOMEM;459460ep->channels = devm_kcalloc(dev, ep->num_channels + 1,461sizeof(struct xilly_channel *),462GFP_KERNEL);463if (!ep->channels)464return -ENOMEM;465466ep->channels[0] = NULL; /* Channel 0 is message buf. */467468/* Initialize all channels with defaults */469470for (i = 1; i <= ep->num_channels; i++) {471channel->wr_buffers = NULL;472channel->rd_buffers = NULL;473channel->num_wr_buffers = 0;474channel->num_rd_buffers = 0;475channel->wr_fpga_buf_idx = -1;476channel->wr_host_buf_idx = 0;477channel->wr_host_buf_pos = 0;478channel->wr_empty = 1;479channel->wr_ready = 0;480channel->wr_sleepy = 1;481channel->rd_fpga_buf_idx = 0;482channel->rd_host_buf_idx = 0;483channel->rd_host_buf_pos = 0;484channel->rd_full = 0;485channel->wr_ref_count = 0;486channel->rd_ref_count = 0;487488spin_lock_init(&channel->wr_spinlock);489spin_lock_init(&channel->rd_spinlock);490mutex_init(&channel->wr_mutex);491mutex_init(&channel->rd_mutex);492init_waitqueue_head(&channel->rd_wait);493init_waitqueue_head(&channel->wr_wait);494init_waitqueue_head(&channel->wr_ready_wait);495496INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);497498channel->endpoint = ep;499channel->chan_num = i;500501channel->log2_element_size = 0;502503ep->channels[i] = channel++;504}505506for (entry = 0; entry < entries; entry++, chandesc += 4) {507struct xilly_buffer **buffers = NULL;508509is_writebuf = chandesc[0] & 0x01;510channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);511format = (chandesc[1] >> 4) & 0x03;512allowpartial = (chandesc[1] >> 6) & 0x01;513synchronous = (chandesc[1] >> 7) & 0x01;514bufsize = 1 << (chandesc[2] & 0x1f);515bufnum = 1 << (chandesc[3] & 0x0f);516exclusive_open = (chandesc[2] >> 7) & 0x01;517seekable = (chandesc[2] >> 6) & 0x01;518supports_nonempty = (chandesc[2] >> 5) & 0x01;519520if ((channelnum > ep->num_channels) ||521((channelnum == 0) && !is_writebuf)) {522dev_err(ep->dev,523"IDT requests channel out of range. Aborting.\n");524return -ENODEV;525}526527channel = ep->channels[channelnum]; /* NULL for msg channel */528529if (!is_writebuf || channelnum > 0) {530channel->log2_element_size = ((format > 2) ?5312 : format);532533bytebufsize = bufsize *534(1 << channel->log2_element_size);535536buffers = devm_kcalloc(dev, bufnum,537sizeof(struct xilly_buffer *),538GFP_KERNEL);539if (!buffers)540return -ENOMEM;541} else {542bytebufsize = bufsize << 2;543}544545if (!is_writebuf) {546channel->num_rd_buffers = bufnum;547channel->rd_buf_size = bytebufsize;548channel->rd_allow_partial = allowpartial;549channel->rd_synchronous = synchronous;550channel->rd_exclusive_open = exclusive_open;551channel->seekable = seekable;552553channel->rd_buffers = buffers;554rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers,555bufnum, bytebufsize);556} else if (channelnum > 0) {557channel->num_wr_buffers = bufnum;558channel->wr_buf_size = bytebufsize;559560channel->seekable = seekable;561channel->wr_supports_nonempty = supports_nonempty;562563channel->wr_allow_partial = allowpartial;564channel->wr_synchronous = synchronous;565channel->wr_exclusive_open = exclusive_open;566567channel->wr_buffers = buffers;568rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers,569bufnum, bytebufsize);570} else {571rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL,572bufnum, bytebufsize);573msg_buf_done++;574}575576if (rc)577return -ENOMEM;578}579580if (!msg_buf_done) {581dev_err(ep->dev,582"Corrupt IDT: No message buffer. Aborting.\n");583return -ENODEV;584}585return 0;586}587588static int xilly_scan_idt(struct xilly_endpoint *endpoint,589struct xilly_idt_handle *idt_handle)590{591int count = 0;592unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;593unsigned char *end_of_idt = idt + endpoint->idtlen - 4;594unsigned char *scan;595int len;596597scan = idt + 1;598idt_handle->names = scan;599600while ((scan <= end_of_idt) && *scan) {601while ((scan <= end_of_idt) && *scan++)602/* Do nothing, just scan thru string */;603count++;604}605606idt_handle->names_len = scan - idt_handle->names;607608scan++;609610if (scan > end_of_idt) {611dev_err(endpoint->dev,612"IDT device name list overflow. Aborting.\n");613return -ENODEV;614}615idt_handle->chandesc = scan;616617len = endpoint->idtlen - (3 + ((int) (scan - idt)));618619if (len & 0x03) {620dev_err(endpoint->dev,621"Corrupt IDT device name list. Aborting.\n");622return -ENODEV;623}624625idt_handle->entries = len >> 2;626endpoint->num_channels = count;627628return 0;629}630631static int xilly_obtain_idt(struct xilly_endpoint *endpoint)632{633struct xilly_channel *channel;634unsigned char *version;635long t;636637channel = endpoint->channels[1]; /* This should be generated ad-hoc */638639channel->wr_sleepy = 1;640641iowrite32(1 |642(3 << 24), /* Opcode 3 for channel 0 = Send IDT */643endpoint->registers + fpga_buf_ctrl_reg);644645t = wait_event_interruptible_timeout(channel->wr_wait,646(!channel->wr_sleepy),647XILLY_TIMEOUT);648649if (t <= 0) {650dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n");651652if (endpoint->fatal_error)653return -EIO;654655return -ENODEV;656}657658dma_sync_single_for_cpu(channel->endpoint->dev,659channel->wr_buffers[0]->dma_addr,660channel->wr_buf_size,661DMA_FROM_DEVICE);662663if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {664dev_err(endpoint->dev,665"IDT length mismatch (%d != %d). Aborting.\n",666channel->wr_buffers[0]->end_offset, endpoint->idtlen);667return -ENODEV;668}669670if (crc32_le(~0, channel->wr_buffers[0]->addr,671endpoint->idtlen+1) != 0) {672dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n");673return -ENODEV;674}675676version = channel->wr_buffers[0]->addr;677678/* Check version number. Reject anything above 0x82. */679if (*version > 0x82) {680dev_err(endpoint->dev,681"No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgrade. Aborting.\n",682*version);683return -ENODEV;684}685686return 0;687}688689static ssize_t xillybus_read(struct file *filp, char __user *userbuf,690size_t count, loff_t *f_pos)691{692ssize_t rc;693unsigned long flags;694int bytes_done = 0;695int no_time_left = 0;696long deadline, left_to_sleep;697struct xilly_channel *channel = filp->private_data;698699int empty, reached_eof, exhausted, ready;700/* Initializations are there only to silence warnings */701702int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;703int waiting_bufidx;704705if (channel->endpoint->fatal_error)706return -EIO;707708deadline = jiffies + 1 + XILLY_RX_TIMEOUT;709710rc = mutex_lock_interruptible(&channel->wr_mutex);711if (rc)712return rc;713714while (1) { /* Note that we may drop mutex within this loop */715int bytes_to_do = count - bytes_done;716717spin_lock_irqsave(&channel->wr_spinlock, flags);718719empty = channel->wr_empty;720ready = !empty || channel->wr_ready;721722if (!empty) {723bufidx = channel->wr_host_buf_idx;724bufpos = channel->wr_host_buf_pos;725howmany = ((channel->wr_buffers[bufidx]->end_offset726+ 1) << channel->log2_element_size)727- bufpos;728729/* Update wr_host_* to its post-operation state */730if (howmany > bytes_to_do) {731bufferdone = 0;732733howmany = bytes_to_do;734channel->wr_host_buf_pos += howmany;735} else {736bufferdone = 1;737738channel->wr_host_buf_pos = 0;739740if (bufidx == channel->wr_fpga_buf_idx) {741channel->wr_empty = 1;742channel->wr_sleepy = 1;743channel->wr_ready = 0;744}745746if (bufidx >= (channel->num_wr_buffers - 1))747channel->wr_host_buf_idx = 0;748else749channel->wr_host_buf_idx++;750}751}752753/*754* Marking our situation after the possible changes above,755* for use after releasing the spinlock.756*757* empty = empty before change758* exhasted = empty after possible change759*/760761reached_eof = channel->wr_empty &&762(channel->wr_host_buf_idx == channel->wr_eof);763channel->wr_hangup = reached_eof;764exhausted = channel->wr_empty;765waiting_bufidx = channel->wr_host_buf_idx;766767spin_unlock_irqrestore(&channel->wr_spinlock, flags);768769if (!empty) { /* Go on, now without the spinlock */770771if (bufpos == 0) /* Position zero means it's virgin */772dma_sync_single_for_cpu(channel->endpoint->dev,773channel->wr_buffers[bufidx]->dma_addr,774channel->wr_buf_size,775DMA_FROM_DEVICE);776777if (copy_to_user(778userbuf,779channel->wr_buffers[bufidx]->addr780+ bufpos, howmany))781rc = -EFAULT;782783userbuf += howmany;784bytes_done += howmany;785786if (bufferdone) {787dma_sync_single_for_device(channel->endpoint->dev,788channel->wr_buffers[bufidx]->dma_addr,789channel->wr_buf_size,790DMA_FROM_DEVICE);791792/*793* Tell FPGA the buffer is done with. It's an794* atomic operation to the FPGA, so what795* happens with other channels doesn't matter,796* and the certain channel is protected with797* the channel-specific mutex.798*/799800iowrite32(1 | (channel->chan_num << 1) |801(bufidx << 12),802channel->endpoint->registers +803fpga_buf_ctrl_reg);804}805806if (rc) {807mutex_unlock(&channel->wr_mutex);808return rc;809}810}811812/* This includes a zero-count return = EOF */813if ((bytes_done >= count) || reached_eof)814break;815816if (!exhausted)817continue; /* More in RAM buffer(s)? Just go on. */818819if ((bytes_done > 0) &&820(no_time_left ||821(channel->wr_synchronous && channel->wr_allow_partial)))822break;823824/*825* Nonblocking read: The "ready" flag tells us that the FPGA826* has data to send. In non-blocking mode, if it isn't on,827* just return. But if there is, we jump directly to the point828* where we ask for the FPGA to send all it has, and wait829* until that data arrives. So in a sense, we *do* block in830* nonblocking mode, but only for a very short time.831*/832833if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {834if (bytes_done > 0)835break;836837if (ready)838goto desperate;839840rc = -EAGAIN;841break;842}843844if (!no_time_left || (bytes_done > 0)) {845/*846* Note that in case of an element-misaligned read847* request, offsetlimit will include the last element,848* which will be partially read from.849*/850int offsetlimit = ((count - bytes_done) - 1) >>851channel->log2_element_size;852int buf_elements = channel->wr_buf_size >>853channel->log2_element_size;854855/*856* In synchronous mode, always send an offset limit.857* Just don't send a value too big.858*/859860if (channel->wr_synchronous) {861/* Don't request more than one buffer */862if (channel->wr_allow_partial &&863(offsetlimit >= buf_elements))864offsetlimit = buf_elements - 1;865866/* Don't request more than all buffers */867if (!channel->wr_allow_partial &&868(offsetlimit >=869(buf_elements * channel->num_wr_buffers)))870offsetlimit = buf_elements *871channel->num_wr_buffers - 1;872}873874/*875* In asynchronous mode, force early flush of a buffer876* only if that will allow returning a full count. The877* "offsetlimit < ( ... )" rather than "<=" excludes878* requesting a full buffer, which would obviously879* cause a buffer transmission anyhow880*/881882if (channel->wr_synchronous ||883(offsetlimit < (buf_elements - 1))) {884mutex_lock(&channel->endpoint->register_mutex);885886iowrite32(offsetlimit,887channel->endpoint->registers +888fpga_buf_offset_reg);889890iowrite32(1 | (channel->chan_num << 1) |891(2 << 24) | /* 2 = offset limit */892(waiting_bufidx << 12),893channel->endpoint->registers +894fpga_buf_ctrl_reg);895896mutex_unlock(&channel->endpoint->897register_mutex);898}899}900901/*902* If partial completion is disallowed, there is no point in903* timeout sleeping. Neither if no_time_left is set and904* there's no data.905*/906907if (!channel->wr_allow_partial ||908(no_time_left && (bytes_done == 0))) {909/*910* This do-loop will run more than once if another911* thread reasserted wr_sleepy before we got the mutex912* back, so we try again.913*/914915do {916mutex_unlock(&channel->wr_mutex);917918if (wait_event_interruptible(919channel->wr_wait,920(!channel->wr_sleepy)))921goto interrupted;922923if (mutex_lock_interruptible(924&channel->wr_mutex))925goto interrupted;926} while (channel->wr_sleepy);927928continue;929930interrupted: /* Mutex is not held if got here */931if (channel->endpoint->fatal_error)932return -EIO;933if (bytes_done)934return bytes_done;935if (filp->f_flags & O_NONBLOCK)936return -EAGAIN; /* Don't admit snoozing */937return -EINTR;938}939940left_to_sleep = deadline - ((long) jiffies);941942/*943* If our time is out, skip the waiting. We may miss wr_sleepy944* being deasserted but hey, almost missing the train is like945* missing it.946*/947948if (left_to_sleep > 0) {949left_to_sleep =950wait_event_interruptible_timeout(951channel->wr_wait,952(!channel->wr_sleepy),953left_to_sleep);954955if (left_to_sleep > 0) /* wr_sleepy deasserted */956continue;957958if (left_to_sleep < 0) { /* Interrupt */959mutex_unlock(&channel->wr_mutex);960if (channel->endpoint->fatal_error)961return -EIO;962if (bytes_done)963return bytes_done;964return -EINTR;965}966}967968desperate:969no_time_left = 1; /* We're out of sleeping time. Desperate! */970971if (bytes_done == 0) {972/*973* Reaching here means that we allow partial return,974* that we've run out of time, and that we have975* nothing to return.976* So tell the FPGA to send anything it has or gets.977*/978979iowrite32(1 | (channel->chan_num << 1) |980(3 << 24) | /* Opcode 3, flush it all! */981(waiting_bufidx << 12),982channel->endpoint->registers +983fpga_buf_ctrl_reg);984}985986/*987* Reaching here means that we *do* have data in the buffer,988* but the "partial" flag disallows returning less than989* required. And we don't have as much. So loop again,990* which is likely to end up blocking indefinitely until991* enough data has arrived.992*/993}994995mutex_unlock(&channel->wr_mutex);996997if (channel->endpoint->fatal_error)998return -EIO;9991000if (rc)1001return rc;10021003return bytes_done;1004}10051006/*1007* The timeout argument takes values as follows:1008* >0 : Flush with timeout1009* ==0 : Flush, and wait idefinitely for the flush to complete1010* <0 : Autoflush: Flush only if there's a single buffer occupied1011*/10121013static int xillybus_myflush(struct xilly_channel *channel, long timeout)1014{1015int rc;1016unsigned long flags;10171018int end_offset_plus1;1019int bufidx, bufidx_minus1;1020int i;1021int empty;1022int new_rd_host_buf_pos;10231024if (channel->endpoint->fatal_error)1025return -EIO;1026rc = mutex_lock_interruptible(&channel->rd_mutex);1027if (rc)1028return rc;10291030/*1031* Don't flush a closed channel. This can happen when the work queued1032* autoflush thread fires off after the file has closed. This is not1033* an error, just something to dismiss.1034*/10351036if (!channel->rd_ref_count)1037goto done;10381039bufidx = channel->rd_host_buf_idx;10401041bufidx_minus1 = (bufidx == 0) ?1042channel->num_rd_buffers - 1 :1043bufidx - 1;10441045end_offset_plus1 = channel->rd_host_buf_pos >>1046channel->log2_element_size;10471048new_rd_host_buf_pos = channel->rd_host_buf_pos -1049(end_offset_plus1 << channel->log2_element_size);10501051/* Submit the current buffer if it's nonempty */1052if (end_offset_plus1) {1053unsigned char *tail = channel->rd_buffers[bufidx]->addr +1054(end_offset_plus1 << channel->log2_element_size);10551056/* Copy unflushed data, so we can put it in next buffer */1057for (i = 0; i < new_rd_host_buf_pos; i++)1058channel->rd_leftovers[i] = *tail++;10591060spin_lock_irqsave(&channel->rd_spinlock, flags);10611062/* Autoflush only if a single buffer is occupied */10631064if ((timeout < 0) &&1065(channel->rd_full ||1066(bufidx_minus1 != channel->rd_fpga_buf_idx))) {1067spin_unlock_irqrestore(&channel->rd_spinlock, flags);1068/*1069* A new work item may be queued by the ISR exactly1070* now, since the execution of a work item allows the1071* queuing of a new one while it's running.1072*/1073goto done;1074}10751076/* The 4th element is never needed for data, so it's a flag */1077channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);10781079/* Set up rd_full to reflect a certain moment's state */10801081if (bufidx == channel->rd_fpga_buf_idx)1082channel->rd_full = 1;1083spin_unlock_irqrestore(&channel->rd_spinlock, flags);10841085if (bufidx >= (channel->num_rd_buffers - 1))1086channel->rd_host_buf_idx = 0;1087else1088channel->rd_host_buf_idx++;10891090dma_sync_single_for_device(channel->endpoint->dev,1091channel->rd_buffers[bufidx]->dma_addr,1092channel->rd_buf_size,1093DMA_TO_DEVICE);10941095mutex_lock(&channel->endpoint->register_mutex);10961097iowrite32(end_offset_plus1 - 1,1098channel->endpoint->registers + fpga_buf_offset_reg);10991100iowrite32((channel->chan_num << 1) | /* Channel ID */1101(2 << 24) | /* Opcode 2, submit buffer */1102(bufidx << 12),1103channel->endpoint->registers + fpga_buf_ctrl_reg);11041105mutex_unlock(&channel->endpoint->register_mutex);1106} else if (bufidx == 0) {1107bufidx = channel->num_rd_buffers - 1;1108} else {1109bufidx--;1110}11111112channel->rd_host_buf_pos = new_rd_host_buf_pos;11131114if (timeout < 0)1115goto done; /* Autoflush */11161117/*1118* bufidx is now the last buffer written to (or equal to1119* rd_fpga_buf_idx if buffer was never written to), and1120* channel->rd_host_buf_idx the one after it.1121*1122* If bufidx == channel->rd_fpga_buf_idx we're either empty or full.1123*/11241125while (1) { /* Loop waiting for draining of buffers */1126spin_lock_irqsave(&channel->rd_spinlock, flags);11271128if (bufidx != channel->rd_fpga_buf_idx)1129channel->rd_full = 1; /*1130* Not really full,1131* but needs waiting.1132*/11331134empty = !channel->rd_full;11351136spin_unlock_irqrestore(&channel->rd_spinlock, flags);11371138if (empty)1139break;11401141/*1142* Indefinite sleep with mutex taken. With data waiting for1143* flushing user should not be surprised if open() for write1144* sleeps.1145*/1146if (timeout == 0)1147wait_event_interruptible(channel->rd_wait,1148(!channel->rd_full));11491150else if (wait_event_interruptible_timeout(1151channel->rd_wait,1152(!channel->rd_full),1153timeout) == 0) {1154dev_warn(channel->endpoint->dev,1155"Timed out while flushing. Output data may be lost.\n");11561157rc = -ETIMEDOUT;1158break;1159}11601161if (channel->rd_full) {1162rc = -EINTR;1163break;1164}1165}11661167done:1168mutex_unlock(&channel->rd_mutex);11691170if (channel->endpoint->fatal_error)1171return -EIO;11721173return rc;1174}11751176static int xillybus_flush(struct file *filp, fl_owner_t id)1177{1178if (!(filp->f_mode & FMODE_WRITE))1179return 0;11801181return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */1182}11831184static void xillybus_autoflush(struct work_struct *work)1185{1186struct delayed_work *workitem = to_delayed_work(work);1187struct xilly_channel *channel = container_of(1188workitem, struct xilly_channel, rd_workitem);1189int rc;11901191rc = xillybus_myflush(channel, -1);1192if (rc == -EINTR)1193dev_warn(channel->endpoint->dev,1194"Autoflush failed because work queue thread got a signal.\n");1195else if (rc)1196dev_err(channel->endpoint->dev,1197"Autoflush failed under weird circumstances.\n");1198}11991200static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,1201size_t count, loff_t *f_pos)1202{1203ssize_t rc;1204unsigned long flags;1205int bytes_done = 0;1206struct xilly_channel *channel = filp->private_data;12071208int full, exhausted;1209/* Initializations are there only to silence warnings */12101211int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;1212int end_offset_plus1 = 0;12131214if (channel->endpoint->fatal_error)1215return -EIO;12161217rc = mutex_lock_interruptible(&channel->rd_mutex);1218if (rc)1219return rc;12201221while (1) {1222int bytes_to_do = count - bytes_done;12231224spin_lock_irqsave(&channel->rd_spinlock, flags);12251226full = channel->rd_full;12271228if (!full) {1229bufidx = channel->rd_host_buf_idx;1230bufpos = channel->rd_host_buf_pos;1231howmany = channel->rd_buf_size - bufpos;12321233/*1234* Update rd_host_* to its state after this operation.1235* count=0 means committing the buffer immediately,1236* which is like flushing, but not necessarily block.1237*/12381239if ((howmany > bytes_to_do) &&1240(count ||1241((bufpos >> channel->log2_element_size) == 0))) {1242bufferdone = 0;12431244howmany = bytes_to_do;1245channel->rd_host_buf_pos += howmany;1246} else {1247bufferdone = 1;12481249if (count) {1250end_offset_plus1 =1251channel->rd_buf_size >>1252channel->log2_element_size;1253channel->rd_host_buf_pos = 0;1254} else {1255unsigned char *tail;1256int i;12571258howmany = 0;12591260end_offset_plus1 = bufpos >>1261channel->log2_element_size;12621263channel->rd_host_buf_pos -=1264end_offset_plus1 <<1265channel->log2_element_size;12661267tail = channel->1268rd_buffers[bufidx]->addr +1269(end_offset_plus1 <<1270channel->log2_element_size);12711272for (i = 0;1273i < channel->rd_host_buf_pos;1274i++)1275channel->rd_leftovers[i] =1276*tail++;1277}12781279if (bufidx == channel->rd_fpga_buf_idx)1280channel->rd_full = 1;12811282if (bufidx >= (channel->num_rd_buffers - 1))1283channel->rd_host_buf_idx = 0;1284else1285channel->rd_host_buf_idx++;1286}1287}12881289/*1290* Marking our situation after the possible changes above,1291* for use after releasing the spinlock.1292*1293* full = full before change1294* exhasted = full after possible change1295*/12961297exhausted = channel->rd_full;12981299spin_unlock_irqrestore(&channel->rd_spinlock, flags);13001301if (!full) { /* Go on, now without the spinlock */1302unsigned char *head =1303channel->rd_buffers[bufidx]->addr;1304int i;13051306if ((bufpos == 0) || /* Zero means it's virgin */1307(channel->rd_leftovers[3] != 0)) {1308dma_sync_single_for_cpu(channel->endpoint->dev,1309channel->rd_buffers[bufidx]->dma_addr,1310channel->rd_buf_size,1311DMA_TO_DEVICE);13121313/* Virgin, but leftovers are due */1314for (i = 0; i < bufpos; i++)1315*head++ = channel->rd_leftovers[i];13161317channel->rd_leftovers[3] = 0; /* Clear flag */1318}13191320if (copy_from_user(1321channel->rd_buffers[bufidx]->addr + bufpos,1322userbuf, howmany))1323rc = -EFAULT;13241325userbuf += howmany;1326bytes_done += howmany;13271328if (bufferdone) {1329dma_sync_single_for_device(channel->endpoint->dev,1330channel->rd_buffers[bufidx]->dma_addr,1331channel->rd_buf_size,1332DMA_TO_DEVICE);13331334mutex_lock(&channel->endpoint->register_mutex);13351336iowrite32(end_offset_plus1 - 1,1337channel->endpoint->registers +1338fpga_buf_offset_reg);13391340iowrite32((channel->chan_num << 1) |1341(2 << 24) | /* 2 = submit buffer */1342(bufidx << 12),1343channel->endpoint->registers +1344fpga_buf_ctrl_reg);13451346mutex_unlock(&channel->endpoint->1347register_mutex);13481349channel->rd_leftovers[3] =1350(channel->rd_host_buf_pos != 0);1351}13521353if (rc) {1354mutex_unlock(&channel->rd_mutex);13551356if (channel->endpoint->fatal_error)1357return -EIO;13581359if (!channel->rd_synchronous)1360queue_delayed_work(1361xillybus_wq,1362&channel->rd_workitem,1363XILLY_RX_TIMEOUT);13641365return rc;1366}1367}13681369if (bytes_done >= count)1370break;13711372if (!exhausted)1373continue; /* If there's more space, just go on */13741375if ((bytes_done > 0) && channel->rd_allow_partial)1376break;13771378/*1379* Indefinite sleep with mutex taken. With data waiting for1380* flushing, user should not be surprised if open() for write1381* sleeps.1382*/13831384if (filp->f_flags & O_NONBLOCK) {1385rc = -EAGAIN;1386break;1387}13881389if (wait_event_interruptible(channel->rd_wait,1390(!channel->rd_full))) {1391mutex_unlock(&channel->rd_mutex);13921393if (channel->endpoint->fatal_error)1394return -EIO;13951396if (bytes_done)1397return bytes_done;1398return -EINTR;1399}1400}14011402mutex_unlock(&channel->rd_mutex);14031404if (!channel->rd_synchronous)1405queue_delayed_work(xillybus_wq,1406&channel->rd_workitem,1407XILLY_RX_TIMEOUT);14081409if (channel->endpoint->fatal_error)1410return -EIO;14111412if (rc)1413return rc;14141415if ((channel->rd_synchronous) && (bytes_done > 0)) {1416rc = xillybus_myflush(filp->private_data, 0); /* No timeout */14171418if (rc && (rc != -EINTR))1419return rc;1420}14211422return bytes_done;1423}14241425static int xillybus_open(struct inode *inode, struct file *filp)1426{1427int rc;1428unsigned long flags;1429struct xilly_endpoint *endpoint;1430struct xilly_channel *channel;1431int index;14321433rc = xillybus_find_inode(inode, (void **)&endpoint, &index);1434if (rc)1435return rc;14361437if (endpoint->fatal_error)1438return -EIO;14391440channel = endpoint->channels[1 + index];1441filp->private_data = channel;14421443/*1444* It gets complicated because:1445* 1. We don't want to take a mutex we don't have to1446* 2. We don't want to open one direction if the other will fail.1447*/14481449if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))1450return -ENODEV;14511452if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))1453return -ENODEV;14541455if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&1456(channel->wr_synchronous || !channel->wr_allow_partial ||1457!channel->wr_supports_nonempty)) {1458dev_err(endpoint->dev,1459"open() failed: O_NONBLOCK not allowed for read on this device\n");1460return -ENODEV;1461}14621463if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&1464(channel->rd_synchronous || !channel->rd_allow_partial)) {1465dev_err(endpoint->dev,1466"open() failed: O_NONBLOCK not allowed for write on this device\n");1467return -ENODEV;1468}14691470/*1471* Note: open() may block on getting mutexes despite O_NONBLOCK.1472* This shouldn't occur normally, since multiple open of the same1473* file descriptor is almost always prohibited anyhow1474* (*_exclusive_open is normally set in real-life systems).1475*/14761477if (filp->f_mode & FMODE_READ) {1478rc = mutex_lock_interruptible(&channel->wr_mutex);1479if (rc)1480return rc;1481}14821483if (filp->f_mode & FMODE_WRITE) {1484rc = mutex_lock_interruptible(&channel->rd_mutex);1485if (rc)1486goto unlock_wr;1487}14881489if ((filp->f_mode & FMODE_READ) &&1490(channel->wr_ref_count != 0) &&1491(channel->wr_exclusive_open)) {1492rc = -EBUSY;1493goto unlock;1494}14951496if ((filp->f_mode & FMODE_WRITE) &&1497(channel->rd_ref_count != 0) &&1498(channel->rd_exclusive_open)) {1499rc = -EBUSY;1500goto unlock;1501}15021503if (filp->f_mode & FMODE_READ) {1504if (channel->wr_ref_count == 0) { /* First open of file */1505/* Move the host to first buffer */1506spin_lock_irqsave(&channel->wr_spinlock, flags);1507channel->wr_host_buf_idx = 0;1508channel->wr_host_buf_pos = 0;1509channel->wr_fpga_buf_idx = -1;1510channel->wr_empty = 1;1511channel->wr_ready = 0;1512channel->wr_sleepy = 1;1513channel->wr_eof = -1;1514channel->wr_hangup = 0;15151516spin_unlock_irqrestore(&channel->wr_spinlock, flags);15171518iowrite32(1 | (channel->chan_num << 1) |1519(4 << 24) | /* Opcode 4, open channel */1520((channel->wr_synchronous & 1) << 23),1521channel->endpoint->registers +1522fpga_buf_ctrl_reg);1523}15241525channel->wr_ref_count++;1526}15271528if (filp->f_mode & FMODE_WRITE) {1529if (channel->rd_ref_count == 0) { /* First open of file */1530/* Move the host to first buffer */1531spin_lock_irqsave(&channel->rd_spinlock, flags);1532channel->rd_host_buf_idx = 0;1533channel->rd_host_buf_pos = 0;1534channel->rd_leftovers[3] = 0; /* No leftovers. */1535channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;1536channel->rd_full = 0;15371538spin_unlock_irqrestore(&channel->rd_spinlock, flags);15391540iowrite32((channel->chan_num << 1) |1541(4 << 24), /* Opcode 4, open channel */1542channel->endpoint->registers +1543fpga_buf_ctrl_reg);1544}15451546channel->rd_ref_count++;1547}15481549unlock:1550if (filp->f_mode & FMODE_WRITE)1551mutex_unlock(&channel->rd_mutex);1552unlock_wr:1553if (filp->f_mode & FMODE_READ)1554mutex_unlock(&channel->wr_mutex);15551556if (!rc && (!channel->seekable))1557return nonseekable_open(inode, filp);15581559return rc;1560}15611562static int xillybus_release(struct inode *inode, struct file *filp)1563{1564unsigned long flags;1565struct xilly_channel *channel = filp->private_data;15661567int buf_idx;1568int eof;15691570if (channel->endpoint->fatal_error)1571return -EIO;15721573if (filp->f_mode & FMODE_WRITE) {1574mutex_lock(&channel->rd_mutex);15751576channel->rd_ref_count--;15771578if (channel->rd_ref_count == 0) {1579/*1580* We rely on the kernel calling flush()1581* before we get here.1582*/15831584iowrite32((channel->chan_num << 1) | /* Channel ID */1585(5 << 24), /* Opcode 5, close channel */1586channel->endpoint->registers +1587fpga_buf_ctrl_reg);1588}1589mutex_unlock(&channel->rd_mutex);1590}15911592if (filp->f_mode & FMODE_READ) {1593mutex_lock(&channel->wr_mutex);15941595channel->wr_ref_count--;15961597if (channel->wr_ref_count == 0) {1598iowrite32(1 | (channel->chan_num << 1) |1599(5 << 24), /* Opcode 5, close channel */1600channel->endpoint->registers +1601fpga_buf_ctrl_reg);16021603/*1604* This is crazily cautious: We make sure that not1605* only that we got an EOF (be it because we closed1606* the channel or because of a user's EOF), but verify1607* that it's one beyond the last buffer arrived, so1608* we have no leftover buffers pending before wrapping1609* up (which can only happen in asynchronous channels,1610* BTW)1611*/16121613while (1) {1614spin_lock_irqsave(&channel->wr_spinlock,1615flags);1616buf_idx = channel->wr_fpga_buf_idx;1617eof = channel->wr_eof;1618channel->wr_sleepy = 1;1619spin_unlock_irqrestore(&channel->wr_spinlock,1620flags);16211622/*1623* Check if eof points at the buffer after1624* the last one the FPGA submitted. Note that1625* no EOF is marked by negative eof.1626*/16271628buf_idx++;1629if (buf_idx == channel->num_wr_buffers)1630buf_idx = 0;16311632if (buf_idx == eof)1633break;16341635/*1636* Steal extra 100 ms if awaken by interrupt.1637* This is a simple workaround for an1638* interrupt pending when entering, which would1639* otherwise result in declaring the hardware1640* non-responsive.1641*/16421643if (wait_event_interruptible(1644channel->wr_wait,1645(!channel->wr_sleepy)))1646msleep(100);16471648if (channel->wr_sleepy) {1649mutex_unlock(&channel->wr_mutex);1650dev_warn(channel->endpoint->dev,1651"Hardware failed to respond to close command, therefore left in messy state.\n");1652return -EINTR;1653}1654}1655}16561657mutex_unlock(&channel->wr_mutex);1658}16591660return 0;1661}16621663static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)1664{1665struct xilly_channel *channel = filp->private_data;1666loff_t pos = filp->f_pos;1667int rc = 0;16681669/*1670* Take both mutexes not allowing interrupts, since it seems like1671* common applications don't expect an -EINTR here. Besides, multiple1672* access to a single file descriptor on seekable devices is a mess1673* anyhow.1674*/16751676if (channel->endpoint->fatal_error)1677return -EIO;16781679mutex_lock(&channel->wr_mutex);1680mutex_lock(&channel->rd_mutex);16811682switch (whence) {1683case SEEK_SET:1684pos = offset;1685break;1686case SEEK_CUR:1687pos += offset;1688break;1689case SEEK_END:1690pos = offset; /* Going to the end => to the beginning */1691break;1692default:1693rc = -EINVAL;1694goto end;1695}16961697/* In any case, we must finish on an element boundary */1698if (pos & ((1 << channel->log2_element_size) - 1)) {1699rc = -EINVAL;1700goto end;1701}17021703mutex_lock(&channel->endpoint->register_mutex);17041705iowrite32(pos >> channel->log2_element_size,1706channel->endpoint->registers + fpga_buf_offset_reg);17071708iowrite32((channel->chan_num << 1) |1709(6 << 24), /* Opcode 6, set address */1710channel->endpoint->registers + fpga_buf_ctrl_reg);17111712mutex_unlock(&channel->endpoint->register_mutex);17131714end:1715mutex_unlock(&channel->rd_mutex);1716mutex_unlock(&channel->wr_mutex);17171718if (rc) /* Return error after releasing mutexes */1719return rc;17201721filp->f_pos = pos;17221723/*1724* Since seekable devices are allowed only when the channel is1725* synchronous, we assume that there is no data pending in either1726* direction (which holds true as long as no concurrent access on the1727* file descriptor takes place).1728* The only thing we may need to throw away is leftovers from partial1729* write() flush.1730*/17311732channel->rd_leftovers[3] = 0;17331734return pos;1735}17361737static __poll_t xillybus_poll(struct file *filp, poll_table *wait)1738{1739struct xilly_channel *channel = filp->private_data;1740__poll_t mask = 0;1741unsigned long flags;17421743poll_wait(filp, &channel->endpoint->ep_wait, wait);17441745/*1746* poll() won't play ball regarding read() channels which1747* aren't asynchronous and support the nonempty message. Allowing1748* that will create situations where data has been delivered at1749* the FPGA, and users expecting select() to wake up, which it may1750* not.1751*/17521753if (!channel->wr_synchronous && channel->wr_supports_nonempty) {1754poll_wait(filp, &channel->wr_wait, wait);1755poll_wait(filp, &channel->wr_ready_wait, wait);17561757spin_lock_irqsave(&channel->wr_spinlock, flags);1758if (!channel->wr_empty || channel->wr_ready)1759mask |= EPOLLIN | EPOLLRDNORM;17601761if (channel->wr_hangup)1762/*1763* Not EPOLLHUP, because its behavior is in the1764* mist, and EPOLLIN does what we want: Wake up1765* the read file descriptor so it sees EOF.1766*/1767mask |= EPOLLIN | EPOLLRDNORM;1768spin_unlock_irqrestore(&channel->wr_spinlock, flags);1769}17701771/*1772* If partial data write is disallowed on a write() channel,1773* it's pointless to ever signal OK to write, because is could1774* block despite some space being available.1775*/17761777if (channel->rd_allow_partial) {1778poll_wait(filp, &channel->rd_wait, wait);17791780spin_lock_irqsave(&channel->rd_spinlock, flags);1781if (!channel->rd_full)1782mask |= EPOLLOUT | EPOLLWRNORM;1783spin_unlock_irqrestore(&channel->rd_spinlock, flags);1784}17851786if (channel->endpoint->fatal_error)1787mask |= EPOLLERR;17881789return mask;1790}17911792static const struct file_operations xillybus_fops = {1793.owner = THIS_MODULE,1794.read = xillybus_read,1795.write = xillybus_write,1796.open = xillybus_open,1797.flush = xillybus_flush,1798.release = xillybus_release,1799.llseek = xillybus_llseek,1800.poll = xillybus_poll,1801};18021803struct xilly_endpoint *xillybus_init_endpoint(struct device *dev)1804{1805struct xilly_endpoint *endpoint;18061807endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL);1808if (!endpoint)1809return NULL;18101811endpoint->dev = dev;1812endpoint->msg_counter = 0x0b;1813endpoint->failed_messages = 0;1814endpoint->fatal_error = 0;18151816init_waitqueue_head(&endpoint->ep_wait);1817mutex_init(&endpoint->register_mutex);18181819return endpoint;1820}1821EXPORT_SYMBOL(xillybus_init_endpoint);18221823static int xilly_quiesce(struct xilly_endpoint *endpoint)1824{1825long t;18261827endpoint->idtlen = -1;18281829iowrite32((u32) (endpoint->dma_using_dac & 0x0001),1830endpoint->registers + fpga_dma_control_reg);18311832t = wait_event_interruptible_timeout(endpoint->ep_wait,1833(endpoint->idtlen >= 0),1834XILLY_TIMEOUT);1835if (t <= 0) {1836dev_err(endpoint->dev,1837"Failed to quiesce the device on exit.\n");1838return -ENODEV;1839}1840return 0;1841}18421843int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)1844{1845int rc;1846long t;18471848void *bootstrap_resources;1849int idtbuffersize = (1 << PAGE_SHIFT);1850struct device *dev = endpoint->dev;18511852/*1853* The bogus IDT is used during bootstrap for allocating the initial1854* message buffer, and then the message buffer and space for the IDT1855* itself. The initial message buffer is of a single page's size, but1856* it's soon replaced with a more modest one (and memory is freed).1857*/18581859unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,18603, 192, PAGE_SHIFT, 0 };1861struct xilly_idt_handle idt_handle;18621863/*1864* Writing the value 0x00000001 to Endianness register signals which1865* endianness this processor is using, so the FPGA can swap words as1866* necessary.1867*/18681869iowrite32(1, endpoint->registers + fpga_endian_reg);18701871/* Bootstrap phase I: Allocate temporary message buffer */18721873bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL);1874if (!bootstrap_resources)1875return -ENOMEM;18761877endpoint->num_channels = 0;18781879rc = xilly_setupchannels(endpoint, bogus_idt, 1);1880if (rc)1881return rc;18821883/* Clear the message subsystem (and counter in particular) */1884iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg);18851886endpoint->idtlen = -1;18871888/*1889* Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT1890* buffer size.1891*/1892iowrite32((u32) (endpoint->dma_using_dac & 0x0001),1893endpoint->registers + fpga_dma_control_reg);18941895t = wait_event_interruptible_timeout(endpoint->ep_wait,1896(endpoint->idtlen >= 0),1897XILLY_TIMEOUT);1898if (t <= 0) {1899dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");1900return -ENODEV;1901}19021903/* Enable DMA */1904iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),1905endpoint->registers + fpga_dma_control_reg);19061907/* Bootstrap phase II: Allocate buffer for IDT and obtain it */1908while (endpoint->idtlen >= idtbuffersize) {1909idtbuffersize *= 2;1910bogus_idt[6]++;1911}19121913endpoint->num_channels = 1;19141915rc = xilly_setupchannels(endpoint, bogus_idt, 2);1916if (rc)1917goto failed_idt;19181919rc = xilly_obtain_idt(endpoint);1920if (rc)1921goto failed_idt;19221923rc = xilly_scan_idt(endpoint, &idt_handle);1924if (rc)1925goto failed_idt;19261927devres_close_group(dev, bootstrap_resources);19281929/* Bootstrap phase III: Allocate buffers according to IDT */19301931rc = xilly_setupchannels(endpoint,1932idt_handle.chandesc,1933idt_handle.entries);1934if (rc)1935goto failed_idt;19361937rc = xillybus_init_chrdev(dev, &xillybus_fops,1938endpoint->owner, endpoint,1939idt_handle.names,1940idt_handle.names_len,1941endpoint->num_channels,1942xillyname, false);19431944if (rc)1945goto failed_idt;19461947devres_release_group(dev, bootstrap_resources);19481949return 0;19501951failed_idt:1952xilly_quiesce(endpoint);1953flush_workqueue(xillybus_wq);19541955return rc;1956}1957EXPORT_SYMBOL(xillybus_endpoint_discovery);19581959void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)1960{1961xillybus_cleanup_chrdev(endpoint, endpoint->dev);19621963xilly_quiesce(endpoint);19641965/*1966* Flushing is done upon endpoint release to prevent access to memory1967* just about to be released. This makes the quiesce complete.1968*/1969flush_workqueue(xillybus_wq);1970}1971EXPORT_SYMBOL(xillybus_endpoint_remove);19721973static int __init xillybus_init(void)1974{1975xillybus_wq = alloc_workqueue(xillyname, 0, 0);1976if (!xillybus_wq)1977return -ENOMEM;19781979return 0;1980}19811982static void __exit xillybus_exit(void)1983{1984/* flush_workqueue() was called for each endpoint released */1985destroy_workqueue(xillybus_wq);1986}19871988module_init(xillybus_init);1989module_exit(xillybus_exit);199019911992