Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-settings.c
49439 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Functions related to setting various queue properties from drivers
4
*/
5
#include <linux/kernel.h>
6
#include <linux/module.h>
7
#include <linux/init.h>
8
#include <linux/bio.h>
9
#include <linux/blk-integrity.h>
10
#include <linux/pagemap.h>
11
#include <linux/backing-dev-defs.h>
12
#include <linux/gcd.h>
13
#include <linux/lcm.h>
14
#include <linux/jiffies.h>
15
#include <linux/gfp.h>
16
#include <linux/dma-mapping.h>
17
#include <linux/t10-pi.h>
18
#include <linux/crc64.h>
19
20
#include "blk.h"
21
#include "blk-rq-qos.h"
22
#include "blk-wbt.h"
23
24
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
25
{
26
WRITE_ONCE(q->rq_timeout, timeout);
27
}
28
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29
30
/**
31
* blk_set_stacking_limits - set default limits for stacking devices
32
* @lim: the queue_limits structure to reset
33
*
34
* Prepare queue limits for applying limits from underlying devices using
35
* blk_stack_limits().
36
*/
37
void blk_set_stacking_limits(struct queue_limits *lim)
38
{
39
memset(lim, 0, sizeof(*lim));
40
lim->logical_block_size = SECTOR_SIZE;
41
lim->physical_block_size = SECTOR_SIZE;
42
lim->io_min = SECTOR_SIZE;
43
lim->discard_granularity = SECTOR_SIZE;
44
lim->dma_alignment = SECTOR_SIZE - 1;
45
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
46
47
/* Inherit limits from component devices */
48
lim->max_segments = USHRT_MAX;
49
lim->max_discard_segments = USHRT_MAX;
50
lim->max_hw_sectors = UINT_MAX;
51
lim->max_segment_size = UINT_MAX;
52
lim->max_sectors = UINT_MAX;
53
lim->max_dev_sectors = UINT_MAX;
54
lim->max_write_zeroes_sectors = UINT_MAX;
55
lim->max_hw_wzeroes_unmap_sectors = UINT_MAX;
56
lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
57
lim->max_hw_zone_append_sectors = UINT_MAX;
58
lim->max_user_discard_sectors = UINT_MAX;
59
lim->atomic_write_hw_max = UINT_MAX;
60
}
61
EXPORT_SYMBOL(blk_set_stacking_limits);
62
63
void blk_apply_bdi_limits(struct backing_dev_info *bdi,
64
struct queue_limits *lim)
65
{
66
u64 io_opt = lim->io_opt;
67
68
/*
69
* For read-ahead of large files to be effective, we need to read ahead
70
* at least twice the optimal I/O size. For rotational devices that do
71
* not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O
72
* size to avoid falling back to the (rather inefficient) small default
73
* read-ahead size.
74
*
75
* There is no hardware limitation for the read-ahead size and the user
76
* might have increased the read-ahead size through sysfs, so don't ever
77
* decrease it.
78
*/
79
if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
80
io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
81
82
bdi->ra_pages = max3(bdi->ra_pages,
83
io_opt * 2 >> PAGE_SHIFT,
84
VM_READAHEAD_PAGES);
85
bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
86
}
87
88
static int blk_validate_zoned_limits(struct queue_limits *lim)
89
{
90
if (!(lim->features & BLK_FEAT_ZONED)) {
91
if (WARN_ON_ONCE(lim->max_open_zones) ||
92
WARN_ON_ONCE(lim->max_active_zones) ||
93
WARN_ON_ONCE(lim->zone_write_granularity) ||
94
WARN_ON_ONCE(lim->max_zone_append_sectors))
95
return -EINVAL;
96
return 0;
97
}
98
99
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
100
return -EINVAL;
101
102
/*
103
* Given that active zones include open zones, the maximum number of
104
* open zones cannot be larger than the maximum number of active zones.
105
*/
106
if (lim->max_active_zones &&
107
lim->max_open_zones > lim->max_active_zones)
108
return -EINVAL;
109
110
if (lim->zone_write_granularity < lim->logical_block_size)
111
lim->zone_write_granularity = lim->logical_block_size;
112
113
/*
114
* The Zone Append size is limited by the maximum I/O size and the zone
115
* size given that it can't span zones.
116
*
117
* If no max_hw_zone_append_sectors limit is provided, the block layer
118
* will emulated it, else we're also bound by the hardware limit.
119
*/
120
lim->max_zone_append_sectors =
121
min_not_zero(lim->max_hw_zone_append_sectors,
122
min(lim->chunk_sectors, lim->max_hw_sectors));
123
return 0;
124
}
125
126
/*
127
* Maximum size of I/O that needs a block layer integrity buffer. Limited
128
* by the number of intervals for which we can fit the integrity buffer into
129
* the buffer size. Because the buffer is a single segment it is also limited
130
* by the maximum segment size.
131
*/
132
static inline unsigned int max_integrity_io_size(struct queue_limits *lim)
133
{
134
return min_t(unsigned int, lim->max_segment_size,
135
(BLK_INTEGRITY_MAX_SIZE / lim->integrity.metadata_size) <<
136
lim->integrity.interval_exp);
137
}
138
139
static int blk_validate_integrity_limits(struct queue_limits *lim)
140
{
141
struct blk_integrity *bi = &lim->integrity;
142
143
if (!bi->metadata_size) {
144
if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
145
bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
146
pr_warn("invalid PI settings.\n");
147
return -EINVAL;
148
}
149
bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
150
return 0;
151
}
152
153
if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
154
pr_warn("integrity support disabled.\n");
155
return -EINVAL;
156
}
157
158
if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
159
(bi->flags & BLK_INTEGRITY_REF_TAG)) {
160
pr_warn("ref tag not support without checksum.\n");
161
return -EINVAL;
162
}
163
164
if (bi->pi_offset + bi->pi_tuple_size > bi->metadata_size) {
165
pr_warn("pi_offset (%u) + pi_tuple_size (%u) exceeds metadata_size (%u)\n",
166
bi->pi_offset, bi->pi_tuple_size, bi->metadata_size);
167
return -EINVAL;
168
}
169
170
switch (bi->csum_type) {
171
case BLK_INTEGRITY_CSUM_NONE:
172
if (bi->pi_tuple_size) {
173
pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
174
return -EINVAL;
175
}
176
break;
177
case BLK_INTEGRITY_CSUM_CRC:
178
case BLK_INTEGRITY_CSUM_IP:
179
if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
180
pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
181
sizeof(struct t10_pi_tuple),
182
bi->pi_tuple_size);
183
return -EINVAL;
184
}
185
break;
186
case BLK_INTEGRITY_CSUM_CRC64:
187
if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
188
pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
189
sizeof(struct crc64_pi_tuple),
190
bi->pi_tuple_size);
191
return -EINVAL;
192
}
193
break;
194
}
195
196
if (!bi->interval_exp) {
197
bi->interval_exp = ilog2(lim->logical_block_size);
198
} else if (bi->interval_exp < SECTOR_SHIFT ||
199
bi->interval_exp > ilog2(lim->logical_block_size)) {
200
pr_warn("invalid interval_exp %u\n", bi->interval_exp);
201
return -EINVAL;
202
}
203
204
/*
205
* The PI generation / validation helpers do not expect intervals to
206
* straddle multiple bio_vecs. Enforce alignment so that those are
207
* never generated, and that each buffer is aligned as expected.
208
*/
209
if (bi->csum_type) {
210
lim->dma_alignment = max(lim->dma_alignment,
211
(1U << bi->interval_exp) - 1);
212
}
213
214
/*
215
* The block layer automatically adds integrity data for bios that don't
216
* already have it. Limit the I/O size so that a single maximum size
217
* metadata segment can cover the integrity data for the entire I/O.
218
*/
219
lim->max_sectors = min(lim->max_sectors,
220
max_integrity_io_size(lim) >> SECTOR_SHIFT);
221
222
return 0;
223
}
224
225
/*
226
* Returns max guaranteed bytes which we can fit in a bio.
227
*
228
* We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
229
* so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
230
* the first and last segments.
231
*/
232
static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
233
{
234
unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
235
unsigned int length;
236
237
length = min(max_segments, 2) * lim->logical_block_size;
238
if (max_segments > 2)
239
length += (max_segments - 2) * PAGE_SIZE;
240
241
return length;
242
}
243
244
static void blk_atomic_writes_update_limits(struct queue_limits *lim)
245
{
246
unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
247
blk_queue_max_guaranteed_bio(lim));
248
249
unit_limit = rounddown_pow_of_two(unit_limit);
250
251
lim->atomic_write_max_sectors =
252
min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
253
lim->max_hw_sectors);
254
lim->atomic_write_unit_min =
255
min(lim->atomic_write_hw_unit_min, unit_limit);
256
lim->atomic_write_unit_max =
257
min(lim->atomic_write_hw_unit_max, unit_limit);
258
lim->atomic_write_boundary_sectors =
259
lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
260
}
261
262
/*
263
* Test whether any boundary is aligned with any chunk size. Stacked
264
* devices store any stripe size in t->chunk_sectors.
265
*/
266
static bool blk_valid_atomic_writes_boundary(unsigned int chunk_sectors,
267
unsigned int boundary_sectors)
268
{
269
if (!chunk_sectors || !boundary_sectors)
270
return true;
271
272
if (boundary_sectors > chunk_sectors &&
273
boundary_sectors % chunk_sectors)
274
return false;
275
276
if (chunk_sectors > boundary_sectors &&
277
chunk_sectors % boundary_sectors)
278
return false;
279
280
return true;
281
}
282
283
static void blk_validate_atomic_write_limits(struct queue_limits *lim)
284
{
285
unsigned int boundary_sectors;
286
unsigned int atomic_write_hw_max_sectors =
287
lim->atomic_write_hw_max >> SECTOR_SHIFT;
288
289
if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
290
goto unsupported;
291
292
/* UINT_MAX indicates stacked limits in initial state */
293
if (lim->atomic_write_hw_max == UINT_MAX)
294
goto unsupported;
295
296
if (!lim->atomic_write_hw_max)
297
goto unsupported;
298
299
if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
300
goto unsupported;
301
302
if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
303
goto unsupported;
304
305
if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
306
lim->atomic_write_hw_unit_max))
307
goto unsupported;
308
309
if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
310
lim->atomic_write_hw_max))
311
goto unsupported;
312
313
if (WARN_ON_ONCE(lim->chunk_sectors &&
314
atomic_write_hw_max_sectors > lim->chunk_sectors))
315
goto unsupported;
316
317
boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
318
319
if (boundary_sectors) {
320
if (WARN_ON_ONCE(lim->atomic_write_hw_max >
321
lim->atomic_write_hw_boundary))
322
goto unsupported;
323
324
if (WARN_ON_ONCE(!blk_valid_atomic_writes_boundary(
325
lim->chunk_sectors, boundary_sectors)))
326
goto unsupported;
327
328
/*
329
* The boundary size just needs to be a multiple of unit_max
330
* (and not necessarily a power-of-2), so this following check
331
* could be relaxed in future.
332
* Furthermore, if needed, unit_max could even be reduced so
333
* that it is compliant with a !power-of-2 boundary.
334
*/
335
if (!is_power_of_2(boundary_sectors))
336
goto unsupported;
337
}
338
339
blk_atomic_writes_update_limits(lim);
340
return;
341
342
unsupported:
343
lim->atomic_write_max_sectors = 0;
344
lim->atomic_write_boundary_sectors = 0;
345
lim->atomic_write_unit_min = 0;
346
lim->atomic_write_unit_max = 0;
347
}
348
349
/*
350
* Check that the limits in lim are valid, initialize defaults for unset
351
* values, and cap values based on others where needed.
352
*/
353
int blk_validate_limits(struct queue_limits *lim)
354
{
355
unsigned int max_hw_sectors;
356
unsigned int logical_block_sectors;
357
unsigned long seg_size;
358
int err;
359
360
/*
361
* Unless otherwise specified, default to 512 byte logical blocks and a
362
* physical block size equal to the logical block size.
363
*/
364
if (!lim->logical_block_size)
365
lim->logical_block_size = SECTOR_SIZE;
366
else if (blk_validate_block_size(lim->logical_block_size)) {
367
pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
368
return -EINVAL;
369
}
370
if (lim->physical_block_size < lim->logical_block_size) {
371
lim->physical_block_size = lim->logical_block_size;
372
} else if (!is_power_of_2(lim->physical_block_size)) {
373
pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
374
return -EINVAL;
375
}
376
377
/*
378
* The minimum I/O size defaults to the physical block size unless
379
* explicitly overridden.
380
*/
381
if (lim->io_min < lim->physical_block_size)
382
lim->io_min = lim->physical_block_size;
383
384
/*
385
* The optimal I/O size may not be aligned to physical block size
386
* (because it may be limited by dma engines which have no clue about
387
* block size of the disks attached to them), so we round it down here.
388
*/
389
lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
390
391
/*
392
* max_hw_sectors has a somewhat weird default for historical reason,
393
* but driver really should set their own instead of relying on this
394
* value.
395
*
396
* The block layer relies on the fact that every driver can
397
* handle at lest a page worth of data per I/O, and needs the value
398
* aligned to the logical block size.
399
*/
400
if (!lim->max_hw_sectors)
401
lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
402
if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
403
return -EINVAL;
404
logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
405
if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
406
return -EINVAL;
407
lim->max_hw_sectors = round_down(lim->max_hw_sectors,
408
logical_block_sectors);
409
410
/*
411
* The actual max_sectors value is a complex beast and also takes the
412
* max_dev_sectors value (set by SCSI ULPs) and a user configurable
413
* value into account. The ->max_sectors value is always calculated
414
* from these, so directly setting it won't have any effect.
415
*/
416
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
417
lim->max_dev_sectors);
418
if (lim->max_user_sectors) {
419
if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
420
return -EINVAL;
421
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
422
} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
423
lim->max_sectors =
424
min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
425
} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
426
lim->max_sectors =
427
min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
428
} else {
429
lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
430
}
431
lim->max_sectors = round_down(lim->max_sectors,
432
logical_block_sectors);
433
434
/*
435
* Random default for the maximum number of segments. Driver should not
436
* rely on this and set their own.
437
*/
438
if (!lim->max_segments)
439
lim->max_segments = BLK_MAX_SEGMENTS;
440
441
if (lim->max_hw_wzeroes_unmap_sectors &&
442
lim->max_hw_wzeroes_unmap_sectors != lim->max_write_zeroes_sectors)
443
return -EINVAL;
444
lim->max_wzeroes_unmap_sectors = min(lim->max_hw_wzeroes_unmap_sectors,
445
lim->max_user_wzeroes_unmap_sectors);
446
447
lim->max_discard_sectors =
448
min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
449
450
/*
451
* When discard is not supported, discard_granularity should be reported
452
* as 0 to userspace.
453
*/
454
if (lim->max_discard_sectors)
455
lim->discard_granularity =
456
max(lim->discard_granularity, lim->physical_block_size);
457
else
458
lim->discard_granularity = 0;
459
460
if (!lim->max_discard_segments)
461
lim->max_discard_segments = 1;
462
463
/*
464
* By default there is no limit on the segment boundary alignment,
465
* but if there is one it can't be smaller than the page size as
466
* that would break all the normal I/O patterns.
467
*/
468
if (!lim->seg_boundary_mask)
469
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
470
if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
471
return -EINVAL;
472
473
/*
474
* Stacking device may have both virtual boundary and max segment
475
* size limit, so allow this setting now, and long-term the two
476
* might need to move out of stacking limits since we have immutable
477
* bvec and lower layer bio splitting is supposed to handle the two
478
* correctly.
479
*/
480
if (lim->virt_boundary_mask) {
481
if (!lim->max_segment_size)
482
lim->max_segment_size = UINT_MAX;
483
} else {
484
/*
485
* The maximum segment size has an odd historic 64k default that
486
* drivers probably should override. Just like the I/O size we
487
* require drivers to at least handle a full page per segment.
488
*/
489
if (!lim->max_segment_size)
490
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
491
if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
492
return -EINVAL;
493
}
494
495
/* setup max segment size for building new segment in fast path */
496
if (lim->seg_boundary_mask > lim->max_segment_size - 1)
497
seg_size = lim->max_segment_size;
498
else
499
seg_size = lim->seg_boundary_mask + 1;
500
lim->max_fast_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
501
502
/*
503
* We require drivers to at least do logical block aligned I/O, but
504
* historically could not check for that due to the separate calls
505
* to set the limits. Once the transition is finished the check
506
* below should be narrowed down to check the logical block size.
507
*/
508
if (!lim->dma_alignment)
509
lim->dma_alignment = SECTOR_SIZE - 1;
510
if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
511
return -EINVAL;
512
513
if (lim->alignment_offset) {
514
lim->alignment_offset &= (lim->physical_block_size - 1);
515
lim->flags &= ~BLK_FLAG_MISALIGNED;
516
}
517
518
if (!(lim->features & BLK_FEAT_WRITE_CACHE))
519
lim->features &= ~BLK_FEAT_FUA;
520
521
blk_validate_atomic_write_limits(lim);
522
523
err = blk_validate_integrity_limits(lim);
524
if (err)
525
return err;
526
return blk_validate_zoned_limits(lim);
527
}
528
EXPORT_SYMBOL_GPL(blk_validate_limits);
529
530
/*
531
* Set the default limits for a newly allocated queue. @lim contains the
532
* initial limits set by the driver, which could be no limit in which case
533
* all fields are cleared to zero.
534
*/
535
int blk_set_default_limits(struct queue_limits *lim)
536
{
537
/*
538
* Most defaults are set by capping the bounds in blk_validate_limits,
539
* but these limits are special and need an explicit initialization to
540
* the max value here.
541
*/
542
lim->max_user_discard_sectors = UINT_MAX;
543
lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
544
return blk_validate_limits(lim);
545
}
546
547
/**
548
* queue_limits_commit_update - commit an atomic update of queue limits
549
* @q: queue to update
550
* @lim: limits to apply
551
*
552
* Apply the limits in @lim that were obtained from queue_limits_start_update()
553
* and updated by the caller to @q. The caller must have frozen the queue or
554
* ensure that there are no outstanding I/Os by other means.
555
*
556
* Returns 0 if successful, else a negative error code.
557
*/
558
int queue_limits_commit_update(struct request_queue *q,
559
struct queue_limits *lim)
560
{
561
int error;
562
563
lockdep_assert_held(&q->limits_lock);
564
565
error = blk_validate_limits(lim);
566
if (error)
567
goto out_unlock;
568
569
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
570
if (q->crypto_profile && lim->integrity.tag_size) {
571
pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
572
error = -EINVAL;
573
goto out_unlock;
574
}
575
#endif
576
577
q->limits = *lim;
578
if (q->disk)
579
blk_apply_bdi_limits(q->disk->bdi, lim);
580
out_unlock:
581
mutex_unlock(&q->limits_lock);
582
return error;
583
}
584
EXPORT_SYMBOL_GPL(queue_limits_commit_update);
585
586
/**
587
* queue_limits_commit_update_frozen - commit an atomic update of queue limits
588
* @q: queue to update
589
* @lim: limits to apply
590
*
591
* Apply the limits in @lim that were obtained from queue_limits_start_update()
592
* and updated with the new values by the caller to @q. Freezes the queue
593
* before the update and unfreezes it after.
594
*
595
* Returns 0 if successful, else a negative error code.
596
*/
597
int queue_limits_commit_update_frozen(struct request_queue *q,
598
struct queue_limits *lim)
599
{
600
unsigned int memflags;
601
int ret;
602
603
memflags = blk_mq_freeze_queue(q);
604
ret = queue_limits_commit_update(q, lim);
605
blk_mq_unfreeze_queue(q, memflags);
606
607
return ret;
608
}
609
EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
610
611
/**
612
* queue_limits_set - apply queue limits to queue
613
* @q: queue to update
614
* @lim: limits to apply
615
*
616
* Apply the limits in @lim that were freshly initialized to @q.
617
* To update existing limits use queue_limits_start_update() and
618
* queue_limits_commit_update() instead.
619
*
620
* Returns 0 if successful, else a negative error code.
621
*/
622
int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
623
{
624
mutex_lock(&q->limits_lock);
625
return queue_limits_commit_update(q, lim);
626
}
627
EXPORT_SYMBOL_GPL(queue_limits_set);
628
629
static int queue_limit_alignment_offset(const struct queue_limits *lim,
630
sector_t sector)
631
{
632
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
633
unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
634
<< SECTOR_SHIFT;
635
636
return (granularity + lim->alignment_offset - alignment) % granularity;
637
}
638
639
static unsigned int queue_limit_discard_alignment(
640
const struct queue_limits *lim, sector_t sector)
641
{
642
unsigned int alignment, granularity, offset;
643
644
if (!lim->max_discard_sectors)
645
return 0;
646
647
/* Why are these in bytes, not sectors? */
648
alignment = lim->discard_alignment >> SECTOR_SHIFT;
649
granularity = lim->discard_granularity >> SECTOR_SHIFT;
650
651
/* Offset of the partition start in 'granularity' sectors */
652
offset = sector_div(sector, granularity);
653
654
/* And why do we do this modulus *again* in blkdev_issue_discard()? */
655
offset = (granularity + alignment - offset) % granularity;
656
657
/* Turn it back into bytes, gaah */
658
return offset << SECTOR_SHIFT;
659
}
660
661
static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
662
{
663
sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
664
if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
665
sectors = PAGE_SIZE >> SECTOR_SHIFT;
666
return sectors;
667
}
668
669
/* Check if second and later bottom devices are compliant */
670
static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
671
struct queue_limits *b)
672
{
673
/* We're not going to support different boundary sizes.. yet */
674
if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
675
return false;
676
677
/* Can't support this */
678
if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
679
return false;
680
681
/* Or this */
682
if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
683
return false;
684
685
t->atomic_write_hw_max = min(t->atomic_write_hw_max,
686
b->atomic_write_hw_max);
687
t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
688
b->atomic_write_hw_unit_min);
689
t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
690
b->atomic_write_hw_unit_max);
691
return true;
692
}
693
694
static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
695
{
696
unsigned int chunk_bytes;
697
698
if (!t->chunk_sectors)
699
return;
700
701
/*
702
* If chunk sectors is so large that its value in bytes overflows
703
* UINT_MAX, then just shift it down so it definitely will fit.
704
* We don't support atomic writes of such a large size anyway.
705
*/
706
if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
707
chunk_bytes = t->chunk_sectors;
708
709
/*
710
* Find values for limits which work for chunk size.
711
* b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
712
* size, as the chunk size is not restricted to a power-of-2.
713
* So we need to find highest power-of-2 which works for the chunk
714
* size.
715
* As an example scenario, we could have t->unit_max = 16K and
716
* t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
717
* value aligned with both limits, i.e. 8K in this example.
718
*/
719
t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
720
max_pow_of_two_factor(chunk_bytes));
721
722
t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
723
t->atomic_write_hw_unit_max);
724
t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
725
}
726
727
/* Check stacking of first bottom device */
728
static bool blk_stack_atomic_writes_head(struct queue_limits *t,
729
struct queue_limits *b)
730
{
731
if (!blk_valid_atomic_writes_boundary(t->chunk_sectors,
732
b->atomic_write_hw_boundary >> SECTOR_SHIFT))
733
return false;
734
735
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
736
t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
737
t->atomic_write_hw_max = b->atomic_write_hw_max;
738
t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
739
return true;
740
}
741
742
static void blk_stack_atomic_writes_limits(struct queue_limits *t,
743
struct queue_limits *b, sector_t start)
744
{
745
if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
746
goto unsupported;
747
748
if (!b->atomic_write_hw_unit_min)
749
goto unsupported;
750
751
if (!blk_atomic_write_start_sect_aligned(start, b))
752
goto unsupported;
753
754
/* UINT_MAX indicates no stacking of bottom devices yet */
755
if (t->atomic_write_hw_max == UINT_MAX) {
756
if (!blk_stack_atomic_writes_head(t, b))
757
goto unsupported;
758
} else {
759
if (!blk_stack_atomic_writes_tail(t, b))
760
goto unsupported;
761
}
762
blk_stack_atomic_writes_chunk_sectors(t);
763
return;
764
765
unsupported:
766
t->atomic_write_hw_max = 0;
767
t->atomic_write_hw_unit_max = 0;
768
t->atomic_write_hw_unit_min = 0;
769
t->atomic_write_hw_boundary = 0;
770
}
771
772
/**
773
* blk_stack_limits - adjust queue_limits for stacked devices
774
* @t: the stacking driver limits (top device)
775
* @b: the underlying queue limits (bottom, component device)
776
* @start: first data sector within component device
777
*
778
* Description:
779
* This function is used by stacking drivers like MD and DM to ensure
780
* that all component devices have compatible block sizes and
781
* alignments. The stacking driver must provide a queue_limits
782
* struct (top) and then iteratively call the stacking function for
783
* all component (bottom) devices. The stacking function will
784
* attempt to combine the values and ensure proper alignment.
785
*
786
* Returns 0 if the top and bottom queue_limits are compatible. The
787
* top device's block sizes and alignment offsets may be adjusted to
788
* ensure alignment with the bottom device. If no compatible sizes
789
* and alignments exist, -1 is returned and the resulting top
790
* queue_limits will have the misaligned flag set to indicate that
791
* the alignment_offset is undefined.
792
*/
793
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
794
sector_t start)
795
{
796
unsigned int top, bottom, alignment;
797
int ret = 0;
798
799
t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
800
801
/*
802
* Some feaures need to be supported both by the stacking driver and all
803
* underlying devices. The stacking driver sets these flags before
804
* stacking the limits, and this will clear the flags if any of the
805
* underlying devices does not support it.
806
*/
807
if (!(b->features & BLK_FEAT_NOWAIT))
808
t->features &= ~BLK_FEAT_NOWAIT;
809
if (!(b->features & BLK_FEAT_POLL))
810
t->features &= ~BLK_FEAT_POLL;
811
812
t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
813
814
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
815
t->max_user_sectors = min_not_zero(t->max_user_sectors,
816
b->max_user_sectors);
817
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
818
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
819
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
820
b->max_write_zeroes_sectors);
821
t->max_user_wzeroes_unmap_sectors =
822
min(t->max_user_wzeroes_unmap_sectors,
823
b->max_user_wzeroes_unmap_sectors);
824
t->max_hw_wzeroes_unmap_sectors =
825
min(t->max_hw_wzeroes_unmap_sectors,
826
b->max_hw_wzeroes_unmap_sectors);
827
828
t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
829
b->max_hw_zone_append_sectors);
830
831
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
832
b->seg_boundary_mask);
833
t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
834
b->virt_boundary_mask);
835
836
t->max_segments = min_not_zero(t->max_segments, b->max_segments);
837
t->max_discard_segments = min_not_zero(t->max_discard_segments,
838
b->max_discard_segments);
839
t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
840
b->max_integrity_segments);
841
842
t->max_segment_size = min_not_zero(t->max_segment_size,
843
b->max_segment_size);
844
845
alignment = queue_limit_alignment_offset(b, start);
846
847
/* Bottom device has different alignment. Check that it is
848
* compatible with the current top alignment.
849
*/
850
if (t->alignment_offset != alignment) {
851
852
top = max(t->physical_block_size, t->io_min)
853
+ t->alignment_offset;
854
bottom = max(b->physical_block_size, b->io_min) + alignment;
855
856
/* Verify that top and bottom intervals line up */
857
if (max(top, bottom) % min(top, bottom)) {
858
t->flags |= BLK_FLAG_MISALIGNED;
859
ret = -1;
860
}
861
}
862
863
t->logical_block_size = max(t->logical_block_size,
864
b->logical_block_size);
865
866
t->physical_block_size = max(t->physical_block_size,
867
b->physical_block_size);
868
869
t->io_min = max(t->io_min, b->io_min);
870
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
871
t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
872
873
/* Set non-power-of-2 compatible chunk_sectors boundary */
874
if (b->chunk_sectors)
875
t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
876
877
/* Physical block size a multiple of the logical block size? */
878
if (t->physical_block_size & (t->logical_block_size - 1)) {
879
t->physical_block_size = t->logical_block_size;
880
t->flags |= BLK_FLAG_MISALIGNED;
881
ret = -1;
882
}
883
884
/* Minimum I/O a multiple of the physical block size? */
885
if (t->io_min & (t->physical_block_size - 1)) {
886
t->io_min = t->physical_block_size;
887
t->flags |= BLK_FLAG_MISALIGNED;
888
ret = -1;
889
}
890
891
/* Optimal I/O a multiple of the physical block size? */
892
if (t->io_opt & (t->physical_block_size - 1)) {
893
t->io_opt = 0;
894
t->flags |= BLK_FLAG_MISALIGNED;
895
ret = -1;
896
}
897
898
/* chunk_sectors a multiple of the physical block size? */
899
if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
900
t->chunk_sectors = 0;
901
t->flags |= BLK_FLAG_MISALIGNED;
902
ret = -1;
903
}
904
905
/* Find lowest common alignment_offset */
906
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
907
% max(t->physical_block_size, t->io_min);
908
909
/* Verify that new alignment_offset is on a logical block boundary */
910
if (t->alignment_offset & (t->logical_block_size - 1)) {
911
t->flags |= BLK_FLAG_MISALIGNED;
912
ret = -1;
913
}
914
915
t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
916
t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
917
t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
918
919
/* Discard alignment and granularity */
920
if (b->discard_granularity) {
921
alignment = queue_limit_discard_alignment(b, start);
922
923
t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
924
b->max_discard_sectors);
925
t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
926
b->max_hw_discard_sectors);
927
t->discard_granularity = max(t->discard_granularity,
928
b->discard_granularity);
929
t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
930
t->discard_granularity;
931
}
932
t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
933
b->max_secure_erase_sectors);
934
t->zone_write_granularity = max(t->zone_write_granularity,
935
b->zone_write_granularity);
936
if (!(t->features & BLK_FEAT_ZONED)) {
937
t->zone_write_granularity = 0;
938
t->max_zone_append_sectors = 0;
939
}
940
blk_stack_atomic_writes_limits(t, b, start);
941
942
return ret;
943
}
944
EXPORT_SYMBOL(blk_stack_limits);
945
946
/**
947
* queue_limits_stack_bdev - adjust queue_limits for stacked devices
948
* @t: the stacking driver limits (top device)
949
* @bdev: the underlying block device (bottom)
950
* @offset: offset to beginning of data within component device
951
* @pfx: prefix to use for warnings logged
952
*
953
* Description:
954
* This function is used by stacking drivers like MD and DM to ensure
955
* that all component devices have compatible block sizes and
956
* alignments. The stacking driver must provide a queue_limits
957
* struct (top) and then iteratively call the stacking function for
958
* all component (bottom) devices. The stacking function will
959
* attempt to combine the values and ensure proper alignment.
960
*/
961
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
962
sector_t offset, const char *pfx)
963
{
964
if (blk_stack_limits(t, bdev_limits(bdev),
965
get_start_sect(bdev) + offset))
966
pr_notice("%s: Warning: Device %pg is misaligned\n",
967
pfx, bdev);
968
}
969
EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
970
971
/**
972
* queue_limits_stack_integrity - stack integrity profile
973
* @t: target queue limits
974
* @b: base queue limits
975
*
976
* Check if the integrity profile in the @b can be stacked into the
977
* target @t. Stacking is possible if either:
978
*
979
* a) does not have any integrity information stacked into it yet
980
* b) the integrity profile in @b is identical to the one in @t
981
*
982
* If @b can be stacked into @t, return %true. Else return %false and clear the
983
* integrity information in @t.
984
*/
985
bool queue_limits_stack_integrity(struct queue_limits *t,
986
struct queue_limits *b)
987
{
988
struct blk_integrity *ti = &t->integrity;
989
struct blk_integrity *bi = &b->integrity;
990
991
if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
992
return true;
993
994
if (ti->flags & BLK_INTEGRITY_STACKED) {
995
if (ti->metadata_size != bi->metadata_size)
996
goto incompatible;
997
if (ti->interval_exp != bi->interval_exp)
998
goto incompatible;
999
if (ti->tag_size != bi->tag_size)
1000
goto incompatible;
1001
if (ti->csum_type != bi->csum_type)
1002
goto incompatible;
1003
if (ti->pi_tuple_size != bi->pi_tuple_size)
1004
goto incompatible;
1005
if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
1006
(bi->flags & BLK_INTEGRITY_REF_TAG))
1007
goto incompatible;
1008
} else {
1009
ti->flags = BLK_INTEGRITY_STACKED;
1010
ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
1011
(bi->flags & BLK_INTEGRITY_REF_TAG);
1012
ti->csum_type = bi->csum_type;
1013
ti->pi_tuple_size = bi->pi_tuple_size;
1014
ti->metadata_size = bi->metadata_size;
1015
ti->pi_offset = bi->pi_offset;
1016
ti->interval_exp = bi->interval_exp;
1017
ti->tag_size = bi->tag_size;
1018
}
1019
return true;
1020
1021
incompatible:
1022
memset(ti, 0, sizeof(*ti));
1023
return false;
1024
}
1025
EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
1026
1027
/**
1028
* blk_set_queue_depth - tell the block layer about the device queue depth
1029
* @q: the request queue for the device
1030
* @depth: queue depth
1031
*
1032
*/
1033
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
1034
{
1035
q->queue_depth = depth;
1036
rq_qos_queue_depth_changed(q);
1037
}
1038
EXPORT_SYMBOL(blk_set_queue_depth);
1039
1040
int bdev_alignment_offset(struct block_device *bdev)
1041
{
1042
struct request_queue *q = bdev_get_queue(bdev);
1043
1044
if (q->limits.flags & BLK_FLAG_MISALIGNED)
1045
return -1;
1046
if (bdev_is_partition(bdev))
1047
return queue_limit_alignment_offset(&q->limits,
1048
bdev->bd_start_sect);
1049
return q->limits.alignment_offset;
1050
}
1051
EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1052
1053
unsigned int bdev_discard_alignment(struct block_device *bdev)
1054
{
1055
struct request_queue *q = bdev_get_queue(bdev);
1056
1057
if (bdev_is_partition(bdev))
1058
return queue_limit_discard_alignment(&q->limits,
1059
bdev->bd_start_sect);
1060
return q->limits.discard_alignment;
1061
}
1062
EXPORT_SYMBOL_GPL(bdev_discard_alignment);
1063
1064