Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/md/dm-delay.c
15109 views
1
/*
2
* Copyright (C) 2005-2007 Red Hat GmbH
3
*
4
* A target that delays reads and/or writes and can send
5
* them to different devices.
6
*
7
* This file is released under the GPL.
8
*/
9
10
#include <linux/module.h>
11
#include <linux/init.h>
12
#include <linux/blkdev.h>
13
#include <linux/bio.h>
14
#include <linux/slab.h>
15
16
#include <linux/device-mapper.h>
17
18
#define DM_MSG_PREFIX "delay"
19
20
struct delay_c {
21
struct timer_list delay_timer;
22
struct mutex timer_lock;
23
struct work_struct flush_expired_bios;
24
struct list_head delayed_bios;
25
atomic_t may_delay;
26
mempool_t *delayed_pool;
27
28
struct dm_dev *dev_read;
29
sector_t start_read;
30
unsigned read_delay;
31
unsigned reads;
32
33
struct dm_dev *dev_write;
34
sector_t start_write;
35
unsigned write_delay;
36
unsigned writes;
37
};
38
39
struct dm_delay_info {
40
struct delay_c *context;
41
struct list_head list;
42
struct bio *bio;
43
unsigned long expires;
44
};
45
46
static DEFINE_MUTEX(delayed_bios_lock);
47
48
static struct workqueue_struct *kdelayd_wq;
49
static struct kmem_cache *delayed_cache;
50
51
static void handle_delayed_timer(unsigned long data)
52
{
53
struct delay_c *dc = (struct delay_c *)data;
54
55
queue_work(kdelayd_wq, &dc->flush_expired_bios);
56
}
57
58
static void queue_timeout(struct delay_c *dc, unsigned long expires)
59
{
60
mutex_lock(&dc->timer_lock);
61
62
if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
63
mod_timer(&dc->delay_timer, expires);
64
65
mutex_unlock(&dc->timer_lock);
66
}
67
68
static void flush_bios(struct bio *bio)
69
{
70
struct bio *n;
71
72
while (bio) {
73
n = bio->bi_next;
74
bio->bi_next = NULL;
75
generic_make_request(bio);
76
bio = n;
77
}
78
}
79
80
static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
81
{
82
struct dm_delay_info *delayed, *next;
83
unsigned long next_expires = 0;
84
int start_timer = 0;
85
struct bio_list flush_bios = { };
86
87
mutex_lock(&delayed_bios_lock);
88
list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
89
if (flush_all || time_after_eq(jiffies, delayed->expires)) {
90
list_del(&delayed->list);
91
bio_list_add(&flush_bios, delayed->bio);
92
if ((bio_data_dir(delayed->bio) == WRITE))
93
delayed->context->writes--;
94
else
95
delayed->context->reads--;
96
mempool_free(delayed, dc->delayed_pool);
97
continue;
98
}
99
100
if (!start_timer) {
101
start_timer = 1;
102
next_expires = delayed->expires;
103
} else
104
next_expires = min(next_expires, delayed->expires);
105
}
106
107
mutex_unlock(&delayed_bios_lock);
108
109
if (start_timer)
110
queue_timeout(dc, next_expires);
111
112
return bio_list_get(&flush_bios);
113
}
114
115
static void flush_expired_bios(struct work_struct *work)
116
{
117
struct delay_c *dc;
118
119
dc = container_of(work, struct delay_c, flush_expired_bios);
120
flush_bios(flush_delayed_bios(dc, 0));
121
}
122
123
/*
124
* Mapping parameters:
125
* <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
126
*
127
* With separate write parameters, the first set is only used for reads.
128
* Delays are specified in milliseconds.
129
*/
130
static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
131
{
132
struct delay_c *dc;
133
unsigned long long tmpll;
134
135
if (argc != 3 && argc != 6) {
136
ti->error = "requires exactly 3 or 6 arguments";
137
return -EINVAL;
138
}
139
140
dc = kmalloc(sizeof(*dc), GFP_KERNEL);
141
if (!dc) {
142
ti->error = "Cannot allocate context";
143
return -ENOMEM;
144
}
145
146
dc->reads = dc->writes = 0;
147
148
if (sscanf(argv[1], "%llu", &tmpll) != 1) {
149
ti->error = "Invalid device sector";
150
goto bad;
151
}
152
dc->start_read = tmpll;
153
154
if (sscanf(argv[2], "%u", &dc->read_delay) != 1) {
155
ti->error = "Invalid delay";
156
goto bad;
157
}
158
159
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
160
&dc->dev_read)) {
161
ti->error = "Device lookup failed";
162
goto bad;
163
}
164
165
dc->dev_write = NULL;
166
if (argc == 3)
167
goto out;
168
169
if (sscanf(argv[4], "%llu", &tmpll) != 1) {
170
ti->error = "Invalid write device sector";
171
goto bad_dev_read;
172
}
173
dc->start_write = tmpll;
174
175
if (sscanf(argv[5], "%u", &dc->write_delay) != 1) {
176
ti->error = "Invalid write delay";
177
goto bad_dev_read;
178
}
179
180
if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
181
&dc->dev_write)) {
182
ti->error = "Write device lookup failed";
183
goto bad_dev_read;
184
}
185
186
out:
187
dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache);
188
if (!dc->delayed_pool) {
189
DMERR("Couldn't create delayed bio pool.");
190
goto bad_dev_write;
191
}
192
193
setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
194
195
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
196
INIT_LIST_HEAD(&dc->delayed_bios);
197
mutex_init(&dc->timer_lock);
198
atomic_set(&dc->may_delay, 1);
199
200
ti->num_flush_requests = 1;
201
ti->num_discard_requests = 1;
202
ti->private = dc;
203
return 0;
204
205
bad_dev_write:
206
if (dc->dev_write)
207
dm_put_device(ti, dc->dev_write);
208
bad_dev_read:
209
dm_put_device(ti, dc->dev_read);
210
bad:
211
kfree(dc);
212
return -EINVAL;
213
}
214
215
static void delay_dtr(struct dm_target *ti)
216
{
217
struct delay_c *dc = ti->private;
218
219
flush_workqueue(kdelayd_wq);
220
221
dm_put_device(ti, dc->dev_read);
222
223
if (dc->dev_write)
224
dm_put_device(ti, dc->dev_write);
225
226
mempool_destroy(dc->delayed_pool);
227
kfree(dc);
228
}
229
230
static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
231
{
232
struct dm_delay_info *delayed;
233
unsigned long expires = 0;
234
235
if (!delay || !atomic_read(&dc->may_delay))
236
return 1;
237
238
delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO);
239
240
delayed->context = dc;
241
delayed->bio = bio;
242
delayed->expires = expires = jiffies + (delay * HZ / 1000);
243
244
mutex_lock(&delayed_bios_lock);
245
246
if (bio_data_dir(bio) == WRITE)
247
dc->writes++;
248
else
249
dc->reads++;
250
251
list_add_tail(&delayed->list, &dc->delayed_bios);
252
253
mutex_unlock(&delayed_bios_lock);
254
255
queue_timeout(dc, expires);
256
257
return 0;
258
}
259
260
static void delay_presuspend(struct dm_target *ti)
261
{
262
struct delay_c *dc = ti->private;
263
264
atomic_set(&dc->may_delay, 0);
265
del_timer_sync(&dc->delay_timer);
266
flush_bios(flush_delayed_bios(dc, 1));
267
}
268
269
static void delay_resume(struct dm_target *ti)
270
{
271
struct delay_c *dc = ti->private;
272
273
atomic_set(&dc->may_delay, 1);
274
}
275
276
static int delay_map(struct dm_target *ti, struct bio *bio,
277
union map_info *map_context)
278
{
279
struct delay_c *dc = ti->private;
280
281
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
282
bio->bi_bdev = dc->dev_write->bdev;
283
if (bio_sectors(bio))
284
bio->bi_sector = dc->start_write +
285
dm_target_offset(ti, bio->bi_sector);
286
287
return delay_bio(dc, dc->write_delay, bio);
288
}
289
290
bio->bi_bdev = dc->dev_read->bdev;
291
bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
292
293
return delay_bio(dc, dc->read_delay, bio);
294
}
295
296
static int delay_status(struct dm_target *ti, status_type_t type,
297
char *result, unsigned maxlen)
298
{
299
struct delay_c *dc = ti->private;
300
int sz = 0;
301
302
switch (type) {
303
case STATUSTYPE_INFO:
304
DMEMIT("%u %u", dc->reads, dc->writes);
305
break;
306
307
case STATUSTYPE_TABLE:
308
DMEMIT("%s %llu %u", dc->dev_read->name,
309
(unsigned long long) dc->start_read,
310
dc->read_delay);
311
if (dc->dev_write)
312
DMEMIT(" %s %llu %u", dc->dev_write->name,
313
(unsigned long long) dc->start_write,
314
dc->write_delay);
315
break;
316
}
317
318
return 0;
319
}
320
321
static int delay_iterate_devices(struct dm_target *ti,
322
iterate_devices_callout_fn fn, void *data)
323
{
324
struct delay_c *dc = ti->private;
325
int ret = 0;
326
327
ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data);
328
if (ret)
329
goto out;
330
331
if (dc->dev_write)
332
ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data);
333
334
out:
335
return ret;
336
}
337
338
static struct target_type delay_target = {
339
.name = "delay",
340
.version = {1, 1, 0},
341
.module = THIS_MODULE,
342
.ctr = delay_ctr,
343
.dtr = delay_dtr,
344
.map = delay_map,
345
.presuspend = delay_presuspend,
346
.resume = delay_resume,
347
.status = delay_status,
348
.iterate_devices = delay_iterate_devices,
349
};
350
351
static int __init dm_delay_init(void)
352
{
353
int r = -ENOMEM;
354
355
kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
356
if (!kdelayd_wq) {
357
DMERR("Couldn't start kdelayd");
358
goto bad_queue;
359
}
360
361
delayed_cache = KMEM_CACHE(dm_delay_info, 0);
362
if (!delayed_cache) {
363
DMERR("Couldn't create delayed bio cache.");
364
goto bad_memcache;
365
}
366
367
r = dm_register_target(&delay_target);
368
if (r < 0) {
369
DMERR("register failed %d", r);
370
goto bad_register;
371
}
372
373
return 0;
374
375
bad_register:
376
kmem_cache_destroy(delayed_cache);
377
bad_memcache:
378
destroy_workqueue(kdelayd_wq);
379
bad_queue:
380
return r;
381
}
382
383
static void __exit dm_delay_exit(void)
384
{
385
dm_unregister_target(&delay_target);
386
kmem_cache_destroy(delayed_cache);
387
destroy_workqueue(kdelayd_wq);
388
}
389
390
/* Module hooks */
391
module_init(dm_delay_init);
392
module_exit(dm_delay_exit);
393
394
MODULE_DESCRIPTION(DM_NAME " delay target");
395
MODULE_AUTHOR("Heinz Mauelshagen <[email protected]>");
396
MODULE_LICENSE("GPL");
397
398