Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/mmc/card/queue.c
15109 views
1
/*
2
* linux/drivers/mmc/card/queue.c
3
*
4
* Copyright (C) 2003 Russell King, All Rights Reserved.
5
* Copyright 2006-2007 Pierre Ossman
6
*
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License version 2 as
9
* published by the Free Software Foundation.
10
*
11
*/
12
#include <linux/slab.h>
13
#include <linux/module.h>
14
#include <linux/blkdev.h>
15
#include <linux/freezer.h>
16
#include <linux/kthread.h>
17
#include <linux/scatterlist.h>
18
19
#include <linux/mmc/card.h>
20
#include <linux/mmc/host.h>
21
#include "queue.h"
22
23
#define MMC_QUEUE_BOUNCESZ 65536
24
25
#define MMC_QUEUE_SUSPENDED (1 << 0)
26
27
/*
28
* Prepare a MMC request. This just filters out odd stuff.
29
*/
30
static int mmc_prep_request(struct request_queue *q, struct request *req)
31
{
32
/*
33
* We only like normal block requests and discards.
34
*/
35
if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
36
blk_dump_rq_flags(req, "MMC bad request");
37
return BLKPREP_KILL;
38
}
39
40
req->cmd_flags |= REQ_DONTPREP;
41
42
return BLKPREP_OK;
43
}
44
45
static int mmc_queue_thread(void *d)
46
{
47
struct mmc_queue *mq = d;
48
struct request_queue *q = mq->queue;
49
50
current->flags |= PF_MEMALLOC;
51
52
down(&mq->thread_sem);
53
do {
54
struct request *req = NULL;
55
56
spin_lock_irq(q->queue_lock);
57
set_current_state(TASK_INTERRUPTIBLE);
58
req = blk_fetch_request(q);
59
mq->req = req;
60
spin_unlock_irq(q->queue_lock);
61
62
if (!req) {
63
if (kthread_should_stop()) {
64
set_current_state(TASK_RUNNING);
65
break;
66
}
67
up(&mq->thread_sem);
68
schedule();
69
down(&mq->thread_sem);
70
continue;
71
}
72
set_current_state(TASK_RUNNING);
73
74
mq->issue_fn(mq, req);
75
} while (1);
76
up(&mq->thread_sem);
77
78
return 0;
79
}
80
81
/*
82
* Generic MMC request handler. This is called for any queue on a
83
* particular host. When the host is not busy, we look for a request
84
* on any queue on this host, and attempt to issue it. This may
85
* not be the queue we were asked to process.
86
*/
87
static void mmc_request(struct request_queue *q)
88
{
89
struct mmc_queue *mq = q->queuedata;
90
struct request *req;
91
92
if (!mq) {
93
while ((req = blk_fetch_request(q)) != NULL) {
94
req->cmd_flags |= REQ_QUIET;
95
__blk_end_request_all(req, -EIO);
96
}
97
return;
98
}
99
100
if (!mq->req)
101
wake_up_process(mq->thread);
102
}
103
104
/**
105
* mmc_init_queue - initialise a queue structure.
106
* @mq: mmc queue
107
* @card: mmc card to attach this queue
108
* @lock: queue lock
109
* @subname: partition subname
110
*
111
* Initialise a MMC card request queue.
112
*/
113
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
114
spinlock_t *lock, const char *subname)
115
{
116
struct mmc_host *host = card->host;
117
u64 limit = BLK_BOUNCE_HIGH;
118
int ret;
119
120
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
121
limit = *mmc_dev(host)->dma_mask;
122
123
mq->card = card;
124
mq->queue = blk_init_queue(mmc_request, lock);
125
if (!mq->queue)
126
return -ENOMEM;
127
128
mq->queue->queuedata = mq;
129
mq->req = NULL;
130
131
blk_queue_prep_rq(mq->queue, mmc_prep_request);
132
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133
if (mmc_can_erase(card)) {
134
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
135
mq->queue->limits.max_discard_sectors = UINT_MAX;
136
if (card->erased_byte == 0)
137
mq->queue->limits.discard_zeroes_data = 1;
138
mq->queue->limits.discard_granularity = card->pref_erase << 9;
139
if (mmc_can_secure_erase_trim(card))
140
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
141
mq->queue);
142
}
143
144
#ifdef CONFIG_MMC_BLOCK_BOUNCE
145
if (host->max_segs == 1) {
146
unsigned int bouncesz;
147
148
bouncesz = MMC_QUEUE_BOUNCESZ;
149
150
if (bouncesz > host->max_req_size)
151
bouncesz = host->max_req_size;
152
if (bouncesz > host->max_seg_size)
153
bouncesz = host->max_seg_size;
154
if (bouncesz > (host->max_blk_count * 512))
155
bouncesz = host->max_blk_count * 512;
156
157
if (bouncesz > 512) {
158
mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
159
if (!mq->bounce_buf) {
160
printk(KERN_WARNING "%s: unable to "
161
"allocate bounce buffer\n",
162
mmc_card_name(card));
163
}
164
}
165
166
if (mq->bounce_buf) {
167
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
168
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
169
blk_queue_max_segments(mq->queue, bouncesz / 512);
170
blk_queue_max_segment_size(mq->queue, bouncesz);
171
172
mq->sg = kmalloc(sizeof(struct scatterlist),
173
GFP_KERNEL);
174
if (!mq->sg) {
175
ret = -ENOMEM;
176
goto cleanup_queue;
177
}
178
sg_init_table(mq->sg, 1);
179
180
mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
181
bouncesz / 512, GFP_KERNEL);
182
if (!mq->bounce_sg) {
183
ret = -ENOMEM;
184
goto cleanup_queue;
185
}
186
sg_init_table(mq->bounce_sg, bouncesz / 512);
187
}
188
}
189
#endif
190
191
if (!mq->bounce_buf) {
192
blk_queue_bounce_limit(mq->queue, limit);
193
blk_queue_max_hw_sectors(mq->queue,
194
min(host->max_blk_count, host->max_req_size / 512));
195
blk_queue_max_segments(mq->queue, host->max_segs);
196
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
197
198
mq->sg = kmalloc(sizeof(struct scatterlist) *
199
host->max_segs, GFP_KERNEL);
200
if (!mq->sg) {
201
ret = -ENOMEM;
202
goto cleanup_queue;
203
}
204
sg_init_table(mq->sg, host->max_segs);
205
}
206
207
sema_init(&mq->thread_sem, 1);
208
209
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
210
host->index, subname ? subname : "");
211
212
if (IS_ERR(mq->thread)) {
213
ret = PTR_ERR(mq->thread);
214
goto free_bounce_sg;
215
}
216
217
return 0;
218
free_bounce_sg:
219
if (mq->bounce_sg)
220
kfree(mq->bounce_sg);
221
mq->bounce_sg = NULL;
222
cleanup_queue:
223
if (mq->sg)
224
kfree(mq->sg);
225
mq->sg = NULL;
226
if (mq->bounce_buf)
227
kfree(mq->bounce_buf);
228
mq->bounce_buf = NULL;
229
blk_cleanup_queue(mq->queue);
230
return ret;
231
}
232
233
void mmc_cleanup_queue(struct mmc_queue *mq)
234
{
235
struct request_queue *q = mq->queue;
236
unsigned long flags;
237
238
/* Make sure the queue isn't suspended, as that will deadlock */
239
mmc_queue_resume(mq);
240
241
/* Then terminate our worker thread */
242
kthread_stop(mq->thread);
243
244
/* Empty the queue */
245
spin_lock_irqsave(q->queue_lock, flags);
246
q->queuedata = NULL;
247
blk_start_queue(q);
248
spin_unlock_irqrestore(q->queue_lock, flags);
249
250
if (mq->bounce_sg)
251
kfree(mq->bounce_sg);
252
mq->bounce_sg = NULL;
253
254
kfree(mq->sg);
255
mq->sg = NULL;
256
257
if (mq->bounce_buf)
258
kfree(mq->bounce_buf);
259
mq->bounce_buf = NULL;
260
261
mq->card = NULL;
262
}
263
EXPORT_SYMBOL(mmc_cleanup_queue);
264
265
/**
266
* mmc_queue_suspend - suspend a MMC request queue
267
* @mq: MMC queue to suspend
268
*
269
* Stop the block request queue, and wait for our thread to
270
* complete any outstanding requests. This ensures that we
271
* won't suspend while a request is being processed.
272
*/
273
void mmc_queue_suspend(struct mmc_queue *mq)
274
{
275
struct request_queue *q = mq->queue;
276
unsigned long flags;
277
278
if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
279
mq->flags |= MMC_QUEUE_SUSPENDED;
280
281
spin_lock_irqsave(q->queue_lock, flags);
282
blk_stop_queue(q);
283
spin_unlock_irqrestore(q->queue_lock, flags);
284
285
down(&mq->thread_sem);
286
}
287
}
288
289
/**
290
* mmc_queue_resume - resume a previously suspended MMC request queue
291
* @mq: MMC queue to resume
292
*/
293
void mmc_queue_resume(struct mmc_queue *mq)
294
{
295
struct request_queue *q = mq->queue;
296
unsigned long flags;
297
298
if (mq->flags & MMC_QUEUE_SUSPENDED) {
299
mq->flags &= ~MMC_QUEUE_SUSPENDED;
300
301
up(&mq->thread_sem);
302
303
spin_lock_irqsave(q->queue_lock, flags);
304
blk_start_queue(q);
305
spin_unlock_irqrestore(q->queue_lock, flags);
306
}
307
}
308
309
/*
310
* Prepare the sg list(s) to be handed of to the host driver
311
*/
312
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
313
{
314
unsigned int sg_len;
315
size_t buflen;
316
struct scatterlist *sg;
317
int i;
318
319
if (!mq->bounce_buf)
320
return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
321
322
BUG_ON(!mq->bounce_sg);
323
324
sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
325
326
mq->bounce_sg_len = sg_len;
327
328
buflen = 0;
329
for_each_sg(mq->bounce_sg, sg, sg_len, i)
330
buflen += sg->length;
331
332
sg_init_one(mq->sg, mq->bounce_buf, buflen);
333
334
return 1;
335
}
336
337
/*
338
* If writing, bounce the data to the buffer before the request
339
* is sent to the host driver
340
*/
341
void mmc_queue_bounce_pre(struct mmc_queue *mq)
342
{
343
if (!mq->bounce_buf)
344
return;
345
346
if (rq_data_dir(mq->req) != WRITE)
347
return;
348
349
sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
350
mq->bounce_buf, mq->sg[0].length);
351
}
352
353
/*
354
* If reading, bounce the data from the buffer after the request
355
* has been handled by the host driver
356
*/
357
void mmc_queue_bounce_post(struct mmc_queue *mq)
358
{
359
if (!mq->bounce_buf)
360
return;
361
362
if (rq_data_dir(mq->req) != READ)
363
return;
364
365
sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
366
mq->bounce_buf, mq->sg[0].length);
367
}
368
369
370