Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/block/blk-sysfs.c
15109 views
1
/*
2
* Functions related to sysfs handling
3
*/
4
#include <linux/kernel.h>
5
#include <linux/slab.h>
6
#include <linux/module.h>
7
#include <linux/bio.h>
8
#include <linux/blkdev.h>
9
#include <linux/blktrace_api.h>
10
11
#include "blk.h"
12
13
struct queue_sysfs_entry {
14
struct attribute attr;
15
ssize_t (*show)(struct request_queue *, char *);
16
ssize_t (*store)(struct request_queue *, const char *, size_t);
17
};
18
19
static ssize_t
20
queue_var_show(unsigned long var, char *page)
21
{
22
return sprintf(page, "%lu\n", var);
23
}
24
25
static ssize_t
26
queue_var_store(unsigned long *var, const char *page, size_t count)
27
{
28
char *p = (char *) page;
29
30
*var = simple_strtoul(p, &p, 10);
31
return count;
32
}
33
34
static ssize_t queue_requests_show(struct request_queue *q, char *page)
35
{
36
return queue_var_show(q->nr_requests, (page));
37
}
38
39
static ssize_t
40
queue_requests_store(struct request_queue *q, const char *page, size_t count)
41
{
42
struct request_list *rl = &q->rq;
43
unsigned long nr;
44
int ret;
45
46
if (!q->request_fn)
47
return -EINVAL;
48
49
ret = queue_var_store(&nr, page, count);
50
if (nr < BLKDEV_MIN_RQ)
51
nr = BLKDEV_MIN_RQ;
52
53
spin_lock_irq(q->queue_lock);
54
q->nr_requests = nr;
55
blk_queue_congestion_threshold(q);
56
57
if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
58
blk_set_queue_congested(q, BLK_RW_SYNC);
59
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
60
blk_clear_queue_congested(q, BLK_RW_SYNC);
61
62
if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
63
blk_set_queue_congested(q, BLK_RW_ASYNC);
64
else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
65
blk_clear_queue_congested(q, BLK_RW_ASYNC);
66
67
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
68
blk_set_queue_full(q, BLK_RW_SYNC);
69
} else {
70
blk_clear_queue_full(q, BLK_RW_SYNC);
71
wake_up(&rl->wait[BLK_RW_SYNC]);
72
}
73
74
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
75
blk_set_queue_full(q, BLK_RW_ASYNC);
76
} else {
77
blk_clear_queue_full(q, BLK_RW_ASYNC);
78
wake_up(&rl->wait[BLK_RW_ASYNC]);
79
}
80
spin_unlock_irq(q->queue_lock);
81
return ret;
82
}
83
84
static ssize_t queue_ra_show(struct request_queue *q, char *page)
85
{
86
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
87
(PAGE_CACHE_SHIFT - 10);
88
89
return queue_var_show(ra_kb, (page));
90
}
91
92
static ssize_t
93
queue_ra_store(struct request_queue *q, const char *page, size_t count)
94
{
95
unsigned long ra_kb;
96
ssize_t ret = queue_var_store(&ra_kb, page, count);
97
98
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
99
100
return ret;
101
}
102
103
static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
104
{
105
int max_sectors_kb = queue_max_sectors(q) >> 1;
106
107
return queue_var_show(max_sectors_kb, (page));
108
}
109
110
static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
111
{
112
return queue_var_show(queue_max_segments(q), (page));
113
}
114
115
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
116
{
117
return queue_var_show(q->limits.max_integrity_segments, (page));
118
}
119
120
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
121
{
122
if (blk_queue_cluster(q))
123
return queue_var_show(queue_max_segment_size(q), (page));
124
125
return queue_var_show(PAGE_CACHE_SIZE, (page));
126
}
127
128
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
129
{
130
return queue_var_show(queue_logical_block_size(q), page);
131
}
132
133
static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
134
{
135
return queue_var_show(queue_physical_block_size(q), page);
136
}
137
138
static ssize_t queue_io_min_show(struct request_queue *q, char *page)
139
{
140
return queue_var_show(queue_io_min(q), page);
141
}
142
143
static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
144
{
145
return queue_var_show(queue_io_opt(q), page);
146
}
147
148
static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
149
{
150
return queue_var_show(q->limits.discard_granularity, page);
151
}
152
153
static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
154
{
155
return sprintf(page, "%llu\n",
156
(unsigned long long)q->limits.max_discard_sectors << 9);
157
}
158
159
static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
160
{
161
return queue_var_show(queue_discard_zeroes_data(q), page);
162
}
163
164
static ssize_t
165
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
166
{
167
unsigned long max_sectors_kb,
168
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
169
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
170
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
171
172
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
173
return -EINVAL;
174
175
spin_lock_irq(q->queue_lock);
176
q->limits.max_sectors = max_sectors_kb << 1;
177
spin_unlock_irq(q->queue_lock);
178
179
return ret;
180
}
181
182
static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
183
{
184
int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
185
186
return queue_var_show(max_hw_sectors_kb, (page));
187
}
188
189
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
190
static ssize_t \
191
queue_show_##name(struct request_queue *q, char *page) \
192
{ \
193
int bit; \
194
bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
195
return queue_var_show(neg ? !bit : bit, page); \
196
} \
197
static ssize_t \
198
queue_store_##name(struct request_queue *q, const char *page, size_t count) \
199
{ \
200
unsigned long val; \
201
ssize_t ret; \
202
ret = queue_var_store(&val, page, count); \
203
if (neg) \
204
val = !val; \
205
\
206
spin_lock_irq(q->queue_lock); \
207
if (val) \
208
queue_flag_set(QUEUE_FLAG_##flag, q); \
209
else \
210
queue_flag_clear(QUEUE_FLAG_##flag, q); \
211
spin_unlock_irq(q->queue_lock); \
212
return ret; \
213
}
214
215
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
216
QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
217
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
218
#undef QUEUE_SYSFS_BIT_FNS
219
220
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
221
{
222
return queue_var_show((blk_queue_nomerges(q) << 1) |
223
blk_queue_noxmerges(q), page);
224
}
225
226
static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
227
size_t count)
228
{
229
unsigned long nm;
230
ssize_t ret = queue_var_store(&nm, page, count);
231
232
spin_lock_irq(q->queue_lock);
233
queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
234
queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
235
if (nm == 2)
236
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
237
else if (nm)
238
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
239
spin_unlock_irq(q->queue_lock);
240
241
return ret;
242
}
243
244
static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
245
{
246
bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
247
248
return queue_var_show(set, page);
249
}
250
251
static ssize_t
252
queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
253
{
254
ssize_t ret = -EINVAL;
255
#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
256
unsigned long val;
257
258
ret = queue_var_store(&val, page, count);
259
spin_lock_irq(q->queue_lock);
260
if (val)
261
queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
262
else
263
queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
264
spin_unlock_irq(q->queue_lock);
265
#endif
266
return ret;
267
}
268
269
static struct queue_sysfs_entry queue_requests_entry = {
270
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
271
.show = queue_requests_show,
272
.store = queue_requests_store,
273
};
274
275
static struct queue_sysfs_entry queue_ra_entry = {
276
.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
277
.show = queue_ra_show,
278
.store = queue_ra_store,
279
};
280
281
static struct queue_sysfs_entry queue_max_sectors_entry = {
282
.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
283
.show = queue_max_sectors_show,
284
.store = queue_max_sectors_store,
285
};
286
287
static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
288
.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
289
.show = queue_max_hw_sectors_show,
290
};
291
292
static struct queue_sysfs_entry queue_max_segments_entry = {
293
.attr = {.name = "max_segments", .mode = S_IRUGO },
294
.show = queue_max_segments_show,
295
};
296
297
static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
298
.attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
299
.show = queue_max_integrity_segments_show,
300
};
301
302
static struct queue_sysfs_entry queue_max_segment_size_entry = {
303
.attr = {.name = "max_segment_size", .mode = S_IRUGO },
304
.show = queue_max_segment_size_show,
305
};
306
307
static struct queue_sysfs_entry queue_iosched_entry = {
308
.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
309
.show = elv_iosched_show,
310
.store = elv_iosched_store,
311
};
312
313
static struct queue_sysfs_entry queue_hw_sector_size_entry = {
314
.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
315
.show = queue_logical_block_size_show,
316
};
317
318
static struct queue_sysfs_entry queue_logical_block_size_entry = {
319
.attr = {.name = "logical_block_size", .mode = S_IRUGO },
320
.show = queue_logical_block_size_show,
321
};
322
323
static struct queue_sysfs_entry queue_physical_block_size_entry = {
324
.attr = {.name = "physical_block_size", .mode = S_IRUGO },
325
.show = queue_physical_block_size_show,
326
};
327
328
static struct queue_sysfs_entry queue_io_min_entry = {
329
.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
330
.show = queue_io_min_show,
331
};
332
333
static struct queue_sysfs_entry queue_io_opt_entry = {
334
.attr = {.name = "optimal_io_size", .mode = S_IRUGO },
335
.show = queue_io_opt_show,
336
};
337
338
static struct queue_sysfs_entry queue_discard_granularity_entry = {
339
.attr = {.name = "discard_granularity", .mode = S_IRUGO },
340
.show = queue_discard_granularity_show,
341
};
342
343
static struct queue_sysfs_entry queue_discard_max_entry = {
344
.attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
345
.show = queue_discard_max_show,
346
};
347
348
static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
349
.attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
350
.show = queue_discard_zeroes_data_show,
351
};
352
353
static struct queue_sysfs_entry queue_nonrot_entry = {
354
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
355
.show = queue_show_nonrot,
356
.store = queue_store_nonrot,
357
};
358
359
static struct queue_sysfs_entry queue_nomerges_entry = {
360
.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
361
.show = queue_nomerges_show,
362
.store = queue_nomerges_store,
363
};
364
365
static struct queue_sysfs_entry queue_rq_affinity_entry = {
366
.attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
367
.show = queue_rq_affinity_show,
368
.store = queue_rq_affinity_store,
369
};
370
371
static struct queue_sysfs_entry queue_iostats_entry = {
372
.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
373
.show = queue_show_iostats,
374
.store = queue_store_iostats,
375
};
376
377
static struct queue_sysfs_entry queue_random_entry = {
378
.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
379
.show = queue_show_random,
380
.store = queue_store_random,
381
};
382
383
static struct attribute *default_attrs[] = {
384
&queue_requests_entry.attr,
385
&queue_ra_entry.attr,
386
&queue_max_hw_sectors_entry.attr,
387
&queue_max_sectors_entry.attr,
388
&queue_max_segments_entry.attr,
389
&queue_max_integrity_segments_entry.attr,
390
&queue_max_segment_size_entry.attr,
391
&queue_iosched_entry.attr,
392
&queue_hw_sector_size_entry.attr,
393
&queue_logical_block_size_entry.attr,
394
&queue_physical_block_size_entry.attr,
395
&queue_io_min_entry.attr,
396
&queue_io_opt_entry.attr,
397
&queue_discard_granularity_entry.attr,
398
&queue_discard_max_entry.attr,
399
&queue_discard_zeroes_data_entry.attr,
400
&queue_nonrot_entry.attr,
401
&queue_nomerges_entry.attr,
402
&queue_rq_affinity_entry.attr,
403
&queue_iostats_entry.attr,
404
&queue_random_entry.attr,
405
NULL,
406
};
407
408
#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
409
410
static ssize_t
411
queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
412
{
413
struct queue_sysfs_entry *entry = to_queue(attr);
414
struct request_queue *q =
415
container_of(kobj, struct request_queue, kobj);
416
ssize_t res;
417
418
if (!entry->show)
419
return -EIO;
420
mutex_lock(&q->sysfs_lock);
421
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
422
mutex_unlock(&q->sysfs_lock);
423
return -ENOENT;
424
}
425
res = entry->show(q, page);
426
mutex_unlock(&q->sysfs_lock);
427
return res;
428
}
429
430
static ssize_t
431
queue_attr_store(struct kobject *kobj, struct attribute *attr,
432
const char *page, size_t length)
433
{
434
struct queue_sysfs_entry *entry = to_queue(attr);
435
struct request_queue *q;
436
ssize_t res;
437
438
if (!entry->store)
439
return -EIO;
440
441
q = container_of(kobj, struct request_queue, kobj);
442
mutex_lock(&q->sysfs_lock);
443
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
444
mutex_unlock(&q->sysfs_lock);
445
return -ENOENT;
446
}
447
res = entry->store(q, page, length);
448
mutex_unlock(&q->sysfs_lock);
449
return res;
450
}
451
452
/**
453
* blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
454
* @kobj: the kobj belonging of the request queue to be released
455
*
456
* Description:
457
* blk_cleanup_queue is the pair to blk_init_queue() or
458
* blk_queue_make_request(). It should be called when a request queue is
459
* being released; typically when a block device is being de-registered.
460
* Currently, its primary task it to free all the &struct request
461
* structures that were allocated to the queue and the queue itself.
462
*
463
* Caveat:
464
* Hopefully the low level driver will have finished any
465
* outstanding requests first...
466
**/
467
static void blk_release_queue(struct kobject *kobj)
468
{
469
struct request_queue *q =
470
container_of(kobj, struct request_queue, kobj);
471
struct request_list *rl = &q->rq;
472
473
blk_sync_queue(q);
474
475
if (rl->rq_pool)
476
mempool_destroy(rl->rq_pool);
477
478
if (q->queue_tags)
479
__blk_queue_free_tags(q);
480
481
blk_trace_shutdown(q);
482
483
bdi_destroy(&q->backing_dev_info);
484
kmem_cache_free(blk_requestq_cachep, q);
485
}
486
487
static const struct sysfs_ops queue_sysfs_ops = {
488
.show = queue_attr_show,
489
.store = queue_attr_store,
490
};
491
492
struct kobj_type blk_queue_ktype = {
493
.sysfs_ops = &queue_sysfs_ops,
494
.default_attrs = default_attrs,
495
.release = blk_release_queue,
496
};
497
498
int blk_register_queue(struct gendisk *disk)
499
{
500
int ret;
501
struct device *dev = disk_to_dev(disk);
502
struct request_queue *q = disk->queue;
503
504
if (WARN_ON(!q))
505
return -ENXIO;
506
507
ret = blk_trace_init_sysfs(dev);
508
if (ret)
509
return ret;
510
511
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
512
if (ret < 0) {
513
blk_trace_remove_sysfs(dev);
514
return ret;
515
}
516
517
kobject_uevent(&q->kobj, KOBJ_ADD);
518
519
if (!q->request_fn)
520
return 0;
521
522
ret = elv_register_queue(q);
523
if (ret) {
524
kobject_uevent(&q->kobj, KOBJ_REMOVE);
525
kobject_del(&q->kobj);
526
blk_trace_remove_sysfs(dev);
527
kobject_put(&dev->kobj);
528
return ret;
529
}
530
531
return 0;
532
}
533
534
void blk_unregister_queue(struct gendisk *disk)
535
{
536
struct request_queue *q = disk->queue;
537
538
if (WARN_ON(!q))
539
return;
540
541
if (q->request_fn)
542
elv_unregister_queue(q);
543
544
kobject_uevent(&q->kobj, KOBJ_REMOVE);
545
kobject_del(&q->kobj);
546
blk_trace_remove_sysfs(disk_to_dev(disk));
547
kobject_put(&disk_to_dev(disk)->kobj);
548
}
549
550