/*1* Functions related to tagged command queuing2*/3#include <linux/kernel.h>4#include <linux/module.h>5#include <linux/bio.h>6#include <linux/blkdev.h>7#include <linux/slab.h>89#include "blk.h"1011/**12* blk_queue_find_tag - find a request by its tag and queue13* @q: The request queue for the device14* @tag: The tag of the request15*16* Notes:17* Should be used when a device returns a tag and you want to match18* it with a request.19*20* no locks need be held.21**/22struct request *blk_queue_find_tag(struct request_queue *q, int tag)23{24return blk_map_queue_find_tag(q->queue_tags, tag);25}26EXPORT_SYMBOL(blk_queue_find_tag);2728/**29* __blk_free_tags - release a given set of tag maintenance info30* @bqt: the tag map to free31*32* Tries to free the specified @bqt. Returns true if it was33* actually freed and false if there are still references using it34*/35static int __blk_free_tags(struct blk_queue_tag *bqt)36{37int retval;3839retval = atomic_dec_and_test(&bqt->refcnt);40if (retval) {41BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <42bqt->max_depth);4344kfree(bqt->tag_index);45bqt->tag_index = NULL;4647kfree(bqt->tag_map);48bqt->tag_map = NULL;4950kfree(bqt);51}5253return retval;54}5556/**57* __blk_queue_free_tags - release tag maintenance info58* @q: the request queue for the device59*60* Notes:61* blk_cleanup_queue() will take care of calling this function, if tagging62* has been used. So there's no need to call this directly.63**/64void __blk_queue_free_tags(struct request_queue *q)65{66struct blk_queue_tag *bqt = q->queue_tags;6768if (!bqt)69return;7071__blk_free_tags(bqt);7273q->queue_tags = NULL;74queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);75}7677/**78* blk_free_tags - release a given set of tag maintenance info79* @bqt: the tag map to free80*81* For externally managed @bqt frees the map. Callers of this82* function must guarantee to have released all the queues that83* might have been using this tag map.84*/85void blk_free_tags(struct blk_queue_tag *bqt)86{87if (unlikely(!__blk_free_tags(bqt)))88BUG();89}90EXPORT_SYMBOL(blk_free_tags);9192/**93* blk_queue_free_tags - release tag maintenance info94* @q: the request queue for the device95*96* Notes:97* This is used to disable tagged queuing to a device, yet leave98* queue in function.99**/100void blk_queue_free_tags(struct request_queue *q)101{102queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);103}104EXPORT_SYMBOL(blk_queue_free_tags);105106static int107init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)108{109struct request **tag_index;110unsigned long *tag_map;111int nr_ulongs;112113if (q && depth > q->nr_requests * 2) {114depth = q->nr_requests * 2;115printk(KERN_ERR "%s: adjusted depth to %d\n",116__func__, depth);117}118119tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);120if (!tag_index)121goto fail;122123nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;124tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);125if (!tag_map)126goto fail;127128tags->real_max_depth = depth;129tags->max_depth = depth;130tags->tag_index = tag_index;131tags->tag_map = tag_map;132133return 0;134fail:135kfree(tag_index);136return -ENOMEM;137}138139static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,140int depth)141{142struct blk_queue_tag *tags;143144tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);145if (!tags)146goto fail;147148if (init_tag_map(q, tags, depth))149goto fail;150151atomic_set(&tags->refcnt, 1);152return tags;153fail:154kfree(tags);155return NULL;156}157158/**159* blk_init_tags - initialize the tag info for an external tag map160* @depth: the maximum queue depth supported161**/162struct blk_queue_tag *blk_init_tags(int depth)163{164return __blk_queue_init_tags(NULL, depth);165}166EXPORT_SYMBOL(blk_init_tags);167168/**169* blk_queue_init_tags - initialize the queue tag info170* @q: the request queue for the device171* @depth: the maximum queue depth supported172* @tags: the tag to use173*174* Queue lock must be held here if the function is called to resize an175* existing map.176**/177int blk_queue_init_tags(struct request_queue *q, int depth,178struct blk_queue_tag *tags)179{180int rc;181182BUG_ON(tags && q->queue_tags && tags != q->queue_tags);183184if (!tags && !q->queue_tags) {185tags = __blk_queue_init_tags(q, depth);186187if (!tags)188goto fail;189} else if (q->queue_tags) {190rc = blk_queue_resize_tags(q, depth);191if (rc)192return rc;193queue_flag_set(QUEUE_FLAG_QUEUED, q);194return 0;195} else196atomic_inc(&tags->refcnt);197198/*199* assign it, all done200*/201q->queue_tags = tags;202queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);203INIT_LIST_HEAD(&q->tag_busy_list);204return 0;205fail:206kfree(tags);207return -ENOMEM;208}209EXPORT_SYMBOL(blk_queue_init_tags);210211/**212* blk_queue_resize_tags - change the queueing depth213* @q: the request queue for the device214* @new_depth: the new max command queueing depth215*216* Notes:217* Must be called with the queue lock held.218**/219int blk_queue_resize_tags(struct request_queue *q, int new_depth)220{221struct blk_queue_tag *bqt = q->queue_tags;222struct request **tag_index;223unsigned long *tag_map;224int max_depth, nr_ulongs;225226if (!bqt)227return -ENXIO;228229/*230* if we already have large enough real_max_depth. just231* adjust max_depth. *NOTE* as requests with tag value232* between new_depth and real_max_depth can be in-flight, tag233* map can not be shrunk blindly here.234*/235if (new_depth <= bqt->real_max_depth) {236bqt->max_depth = new_depth;237return 0;238}239240/*241* Currently cannot replace a shared tag map with a new242* one, so error out if this is the case243*/244if (atomic_read(&bqt->refcnt) != 1)245return -EBUSY;246247/*248* save the old state info, so we can copy it back249*/250tag_index = bqt->tag_index;251tag_map = bqt->tag_map;252max_depth = bqt->real_max_depth;253254if (init_tag_map(q, bqt, new_depth))255return -ENOMEM;256257memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));258nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;259memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));260261kfree(tag_index);262kfree(tag_map);263return 0;264}265EXPORT_SYMBOL(blk_queue_resize_tags);266267/**268* blk_queue_end_tag - end tag operations for a request269* @q: the request queue for the device270* @rq: the request that has completed271*272* Description:273* Typically called when end_that_request_first() returns %0, meaning274* all transfers have been done for a request. It's important to call275* this function before end_that_request_last(), as that will put the276* request back on the free list thus corrupting the internal tag list.277*278* Notes:279* queue lock must be held.280**/281void blk_queue_end_tag(struct request_queue *q, struct request *rq)282{283struct blk_queue_tag *bqt = q->queue_tags;284int tag = rq->tag;285286BUG_ON(tag == -1);287288if (unlikely(tag >= bqt->real_max_depth))289/*290* This can happen after tag depth has been reduced.291* FIXME: how about a warning or info message here?292*/293return;294295list_del_init(&rq->queuelist);296rq->cmd_flags &= ~REQ_QUEUED;297rq->tag = -1;298299if (unlikely(bqt->tag_index[tag] == NULL))300printk(KERN_ERR "%s: tag %d is missing\n",301__func__, tag);302303bqt->tag_index[tag] = NULL;304305if (unlikely(!test_bit(tag, bqt->tag_map))) {306printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",307__func__, tag);308return;309}310/*311* The tag_map bit acts as a lock for tag_index[bit], so we need312* unlock memory barrier semantics.313*/314clear_bit_unlock(tag, bqt->tag_map);315}316EXPORT_SYMBOL(blk_queue_end_tag);317318/**319* blk_queue_start_tag - find a free tag and assign it320* @q: the request queue for the device321* @rq: the block request that needs tagging322*323* Description:324* This can either be used as a stand-alone helper, or possibly be325* assigned as the queue &prep_rq_fn (in which case &struct request326* automagically gets a tag assigned). Note that this function327* assumes that any type of request can be queued! if this is not328* true for your device, you must check the request type before329* calling this function. The request will also be removed from330* the request queue, so it's the drivers responsibility to readd331* it if it should need to be restarted for some reason.332*333* Notes:334* queue lock must be held.335**/336int blk_queue_start_tag(struct request_queue *q, struct request *rq)337{338struct blk_queue_tag *bqt = q->queue_tags;339unsigned max_depth;340int tag;341342if (unlikely((rq->cmd_flags & REQ_QUEUED))) {343printk(KERN_ERR344"%s: request %p for device [%s] already tagged %d",345__func__, rq,346rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);347BUG();348}349350/*351* Protect against shared tag maps, as we may not have exclusive352* access to the tag map.353*354* We reserve a few tags just for sync IO, since we don't want355* to starve sync IO on behalf of flooding async IO.356*/357max_depth = bqt->max_depth;358if (!rq_is_sync(rq) && max_depth > 1) {359max_depth -= 2;360if (!max_depth)361max_depth = 1;362if (q->in_flight[BLK_RW_ASYNC] > max_depth)363return 1;364}365366do {367tag = find_first_zero_bit(bqt->tag_map, max_depth);368if (tag >= max_depth)369return 1;370371} while (test_and_set_bit_lock(tag, bqt->tag_map));372/*373* We need lock ordering semantics given by test_and_set_bit_lock.374* See blk_queue_end_tag for details.375*/376377rq->cmd_flags |= REQ_QUEUED;378rq->tag = tag;379bqt->tag_index[tag] = rq;380blk_start_request(rq);381list_add(&rq->queuelist, &q->tag_busy_list);382return 0;383}384EXPORT_SYMBOL(blk_queue_start_tag);385386/**387* blk_queue_invalidate_tags - invalidate all pending tags388* @q: the request queue for the device389*390* Description:391* Hardware conditions may dictate a need to stop all pending requests.392* In this case, we will safely clear the block side of the tag queue and393* readd all requests to the request queue in the right order.394*395* Notes:396* queue lock must be held.397**/398void blk_queue_invalidate_tags(struct request_queue *q)399{400struct list_head *tmp, *n;401402list_for_each_safe(tmp, n, &q->tag_busy_list)403blk_requeue_request(q, list_entry_rq(tmp));404}405EXPORT_SYMBOL(blk_queue_invalidate_tags);406407408