Path: blob/master/drivers/media/video/cx18/cx18-queue.c
17534 views
/*1* cx18 buffer queues2*3* Derived from ivtv-queue.c4*5* Copyright (C) 2007 Hans Verkuil <[email protected]>6* Copyright (C) 2008 Andy Walls <[email protected]>7*8* This program is free software; you can redistribute it and/or modify9* it under the terms of the GNU General Public License as published by10* the Free Software Foundation; either version 2 of the License, or11* (at your option) any later version.12*13* This program is distributed in the hope that it will be useful,14* but WITHOUT ANY WARRANTY; without even the implied warranty of15* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the16* GNU General Public License for more details.17*18* You should have received a copy of the GNU General Public License19* along with this program; if not, write to the Free Software20* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA21* 02111-1307 USA22*/2324#include "cx18-driver.h"25#include "cx18-queue.h"26#include "cx18-streams.h"27#include "cx18-scb.h"28#include "cx18-io.h"2930void cx18_buf_swap(struct cx18_buffer *buf)31{32int i;3334for (i = 0; i < buf->bytesused; i += 4)35swab32s((u32 *)(buf->buf + i));36}3738void _cx18_mdl_swap(struct cx18_mdl *mdl)39{40struct cx18_buffer *buf;4142list_for_each_entry(buf, &mdl->buf_list, list) {43if (buf->bytesused == 0)44break;45cx18_buf_swap(buf);46}47}4849void cx18_queue_init(struct cx18_queue *q)50{51INIT_LIST_HEAD(&q->list);52atomic_set(&q->depth, 0);53q->bytesused = 0;54}5556struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,57struct cx18_queue *q, int to_front)58{59/* clear the mdl if it is not to be enqueued to the full queue */60if (q != &s->q_full) {61mdl->bytesused = 0;62mdl->readpos = 0;63mdl->m_flags = 0;64mdl->skipped = 0;65mdl->curr_buf = NULL;66}6768/* q_busy is restricted to a max buffer count imposed by firmware */69if (q == &s->q_busy &&70atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM)71q = &s->q_free;7273spin_lock(&q->lock);7475if (to_front)76list_add(&mdl->list, &q->list); /* LIFO */77else78list_add_tail(&mdl->list, &q->list); /* FIFO */79q->bytesused += mdl->bytesused - mdl->readpos;80atomic_inc(&q->depth);8182spin_unlock(&q->lock);83return q;84}8586struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)87{88struct cx18_mdl *mdl = NULL;8990spin_lock(&q->lock);91if (!list_empty(&q->list)) {92mdl = list_first_entry(&q->list, struct cx18_mdl, list);93list_del_init(&mdl->list);94q->bytesused -= mdl->bytesused - mdl->readpos;95mdl->skipped = 0;96atomic_dec(&q->depth);97}98spin_unlock(&q->lock);99return mdl;100}101102static void _cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,103struct cx18_mdl *mdl)104{105struct cx18_buffer *buf;106u32 buf_size = s->buf_size;107u32 bytesused = mdl->bytesused;108109list_for_each_entry(buf, &mdl->buf_list, list) {110buf->readpos = 0;111if (bytesused >= buf_size) {112buf->bytesused = buf_size;113bytesused -= buf_size;114} else {115buf->bytesused = bytesused;116bytesused = 0;117}118cx18_buf_sync_for_cpu(s, buf);119}120}121122static inline void cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,123struct cx18_mdl *mdl)124{125struct cx18_buffer *buf;126127if (list_is_singular(&mdl->buf_list)) {128buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,129list);130buf->bytesused = mdl->bytesused;131buf->readpos = 0;132cx18_buf_sync_for_cpu(s, buf);133} else {134_cx18_mdl_update_bufs_for_cpu(s, mdl);135}136}137138struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,139u32 bytesused)140{141struct cx18 *cx = s->cx;142struct cx18_mdl *mdl;143struct cx18_mdl *tmp;144struct cx18_mdl *ret = NULL;145LIST_HEAD(sweep_up);146147/*148* We don't have to acquire multiple q locks here, because we are149* serialized by the single threaded work handler.150* MDLs from the firmware will thus remain in order as151* they are moved from q_busy to q_full or to the dvb ring buffer.152*/153spin_lock(&s->q_busy.lock);154list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) {155/*156* We should find what the firmware told us is done,157* right at the front of the queue. If we don't, we likely have158* missed an mdl done message from the firmware.159* Once we skip an mdl repeatedly, relative to the size of160* q_busy, we have high confidence we've missed it.161*/162if (mdl->id != id) {163mdl->skipped++;164if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {165/* mdl must have fallen out of rotation */166CX18_WARN("Skipped %s, MDL %d, %d "167"times - it must have dropped out of "168"rotation\n", s->name, mdl->id,169mdl->skipped);170/* Sweep it up to put it back into rotation */171list_move_tail(&mdl->list, &sweep_up);172atomic_dec(&s->q_busy.depth);173}174continue;175}176/*177* We pull the desired mdl off of the queue here. Something178* will have to put it back on a queue later.179*/180list_del_init(&mdl->list);181atomic_dec(&s->q_busy.depth);182ret = mdl;183break;184}185spin_unlock(&s->q_busy.lock);186187/*188* We found the mdl for which we were looking. Get it ready for189* the caller to put on q_full or in the dvb ring buffer.190*/191if (ret != NULL) {192ret->bytesused = bytesused;193ret->skipped = 0;194/* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */195cx18_mdl_update_bufs_for_cpu(s, ret);196if (s->type != CX18_ENC_STREAM_TYPE_TS)197set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags);198}199200/* Put any mdls the firmware is ignoring back into normal rotation */201list_for_each_entry_safe(mdl, tmp, &sweep_up, list) {202list_del_init(&mdl->list);203cx18_enqueue(s, mdl, &s->q_free);204}205return ret;206}207208/* Move all mdls of a queue, while flushing the mdl */209static void cx18_queue_flush(struct cx18_stream *s,210struct cx18_queue *q_src, struct cx18_queue *q_dst)211{212struct cx18_mdl *mdl;213214/* It only makes sense to flush to q_free or q_idle */215if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy)216return;217218spin_lock(&q_src->lock);219spin_lock(&q_dst->lock);220while (!list_empty(&q_src->list)) {221mdl = list_first_entry(&q_src->list, struct cx18_mdl, list);222list_move_tail(&mdl->list, &q_dst->list);223mdl->bytesused = 0;224mdl->readpos = 0;225mdl->m_flags = 0;226mdl->skipped = 0;227mdl->curr_buf = NULL;228atomic_inc(&q_dst->depth);229}230cx18_queue_init(q_src);231spin_unlock(&q_src->lock);232spin_unlock(&q_dst->lock);233}234235void cx18_flush_queues(struct cx18_stream *s)236{237cx18_queue_flush(s, &s->q_busy, &s->q_free);238cx18_queue_flush(s, &s->q_full, &s->q_free);239}240241/*242* Note, s->buf_pool is not protected by a lock,243* the stream better not have *anything* going on when calling this244*/245void cx18_unload_queues(struct cx18_stream *s)246{247struct cx18_queue *q_idle = &s->q_idle;248struct cx18_mdl *mdl;249struct cx18_buffer *buf;250251/* Move all MDLS to q_idle */252cx18_queue_flush(s, &s->q_busy, q_idle);253cx18_queue_flush(s, &s->q_full, q_idle);254cx18_queue_flush(s, &s->q_free, q_idle);255256/* Reset MDL id's and move all buffers back to the stream's buf_pool */257spin_lock(&q_idle->lock);258list_for_each_entry(mdl, &q_idle->list, list) {259while (!list_empty(&mdl->buf_list)) {260buf = list_first_entry(&mdl->buf_list,261struct cx18_buffer, list);262list_move_tail(&buf->list, &s->buf_pool);263buf->bytesused = 0;264buf->readpos = 0;265}266mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */267/* all other mdl fields were cleared by cx18_queue_flush() */268}269spin_unlock(&q_idle->lock);270}271272/*273* Note, s->buf_pool is not protected by a lock,274* the stream better not have *anything* going on when calling this275*/276void cx18_load_queues(struct cx18_stream *s)277{278struct cx18 *cx = s->cx;279struct cx18_mdl *mdl;280struct cx18_buffer *buf;281int mdl_id;282int i;283u32 partial_buf_size;284285/*286* Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free287* Excess MDLs are left on q_idle288* Excess buffers are left in buf_pool and/or on an MDL in q_idle289*/290mdl_id = s->mdl_base_idx;291for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl;292mdl != NULL && i == s->bufs_per_mdl;293mdl = cx18_dequeue(s, &s->q_idle)) {294295mdl->id = mdl_id;296297for (i = 0; i < s->bufs_per_mdl; i++) {298if (list_empty(&s->buf_pool))299break;300301buf = list_first_entry(&s->buf_pool, struct cx18_buffer,302list);303list_move_tail(&buf->list, &mdl->buf_list);304305/* update the firmware's MDL array with this buffer */306cx18_writel(cx, buf->dma_handle,307&cx->scb->cpu_mdl[mdl_id + i].paddr);308cx18_writel(cx, s->buf_size,309&cx->scb->cpu_mdl[mdl_id + i].length);310}311312if (i == s->bufs_per_mdl) {313/*314* The encoder doesn't honor s->mdl_size. So in the315* case of a non-integral number of buffers to meet316* mdl_size, we lie about the size of the last buffer317* in the MDL to get the encoder to really only send318* us mdl_size bytes per MDL transfer.319*/320partial_buf_size = s->mdl_size % s->buf_size;321if (partial_buf_size) {322cx18_writel(cx, partial_buf_size,323&cx->scb->cpu_mdl[mdl_id + i - 1].length);324}325cx18_enqueue(s, mdl, &s->q_free);326} else {327/* Not enough buffers for this MDL; we won't use it */328cx18_push(s, mdl, &s->q_idle);329}330mdl_id += i;331}332}333334void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)335{336int dma = s->dma;337u32 buf_size = s->buf_size;338struct pci_dev *pci_dev = s->cx->pci_dev;339struct cx18_buffer *buf;340341list_for_each_entry(buf, &mdl->buf_list, list)342pci_dma_sync_single_for_device(pci_dev, buf->dma_handle,343buf_size, dma);344}345346int cx18_stream_alloc(struct cx18_stream *s)347{348struct cx18 *cx = s->cx;349int i;350351if (s->buffers == 0)352return 0;353354CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers "355"(%d.%02d kB total)\n",356s->name, s->buffers, s->buf_size,357s->buffers * s->buf_size / 1024,358(s->buffers * s->buf_size * 100 / 1024) % 100);359360if (((char __iomem *)&cx->scb->cpu_mdl[cx->free_mdl_idx + s->buffers] -361(char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {362unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -363((char __iomem *)cx->scb->cpu_mdl));364365CX18_ERR("Too many buffers, cannot fit in SCB area\n");366CX18_ERR("Max buffers = %zd\n",367bufsz / sizeof(struct cx18_mdl_ent));368return -ENOMEM;369}370371s->mdl_base_idx = cx->free_mdl_idx;372373/* allocate stream buffers and MDLs */374for (i = 0; i < s->buffers; i++) {375struct cx18_mdl *mdl;376struct cx18_buffer *buf;377378/* 1 MDL per buffer to handle the worst & also default case */379mdl = kzalloc(sizeof(struct cx18_mdl), GFP_KERNEL|__GFP_NOWARN);380if (mdl == NULL)381break;382383buf = kzalloc(sizeof(struct cx18_buffer),384GFP_KERNEL|__GFP_NOWARN);385if (buf == NULL) {386kfree(mdl);387break;388}389390buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);391if (buf->buf == NULL) {392kfree(mdl);393kfree(buf);394break;395}396397INIT_LIST_HEAD(&mdl->list);398INIT_LIST_HEAD(&mdl->buf_list);399mdl->id = s->mdl_base_idx; /* a somewhat safe value */400cx18_enqueue(s, mdl, &s->q_idle);401402INIT_LIST_HEAD(&buf->list);403buf->dma_handle = pci_map_single(s->cx->pci_dev,404buf->buf, s->buf_size, s->dma);405cx18_buf_sync_for_cpu(s, buf);406list_add_tail(&buf->list, &s->buf_pool);407}408if (i == s->buffers) {409cx->free_mdl_idx += s->buffers;410return 0;411}412CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);413cx18_stream_free(s);414return -ENOMEM;415}416417void cx18_stream_free(struct cx18_stream *s)418{419struct cx18_mdl *mdl;420struct cx18_buffer *buf;421struct cx18 *cx = s->cx;422423CX18_DEBUG_INFO("Deallocating buffers for %s stream\n", s->name);424425/* move all buffers to buf_pool and all MDLs to q_idle */426cx18_unload_queues(s);427428/* empty q_idle */429while ((mdl = cx18_dequeue(s, &s->q_idle)))430kfree(mdl);431432/* empty buf_pool */433while (!list_empty(&s->buf_pool)) {434buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);435list_del_init(&buf->list);436437pci_unmap_single(s->cx->pci_dev, buf->dma_handle,438s->buf_size, s->dma);439kfree(buf->buf);440kfree(buf);441}442}443444445