#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blk-integrity.h>
#include <linux/pagemap.h>
#include <linux/backing-dev-defs.h>
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/jiffies.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include <linux/t10-pi.h>
#include <linux/crc64.h>
#include "blk.h"
#include "blk-rq-qos.h"
#include "blk-wbt.h"
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
WRITE_ONCE(q->rq_timeout, timeout);
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
void blk_set_stacking_limits(struct queue_limits *lim)
{
memset(lim, 0, sizeof(*lim));
lim->logical_block_size = SECTOR_SIZE;
lim->physical_block_size = SECTOR_SIZE;
lim->io_min = SECTOR_SIZE;
lim->discard_granularity = SECTOR_SIZE;
lim->dma_alignment = SECTOR_SIZE - 1;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segments = USHRT_MAX;
lim->max_discard_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
lim->max_dev_sectors = UINT_MAX;
lim->max_write_zeroes_sectors = UINT_MAX;
lim->max_hw_wzeroes_unmap_sectors = UINT_MAX;
lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
lim->max_hw_zone_append_sectors = UINT_MAX;
lim->max_user_discard_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
void blk_apply_bdi_limits(struct backing_dev_info *bdi,
struct queue_limits *lim)
{
u64 io_opt = lim->io_opt;
if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
bdi->ra_pages = max3(bdi->ra_pages,
io_opt * 2 >> PAGE_SHIFT,
VM_READAHEAD_PAGES);
bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
}
static int blk_validate_zoned_limits(struct queue_limits *lim)
{
if (!(lim->features & BLK_FEAT_ZONED)) {
if (WARN_ON_ONCE(lim->max_open_zones) ||
WARN_ON_ONCE(lim->max_active_zones) ||
WARN_ON_ONCE(lim->zone_write_granularity) ||
WARN_ON_ONCE(lim->max_zone_append_sectors))
return -EINVAL;
return 0;
}
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
return -EINVAL;
if (lim->max_active_zones &&
lim->max_open_zones > lim->max_active_zones)
return -EINVAL;
if (lim->zone_write_granularity < lim->logical_block_size)
lim->zone_write_granularity = lim->logical_block_size;
lim->max_zone_append_sectors =
min_not_zero(lim->max_hw_zone_append_sectors,
min(lim->chunk_sectors, lim->max_hw_sectors));
return 0;
}
static int blk_validate_integrity_limits(struct queue_limits *lim)
{
struct blk_integrity *bi = &lim->integrity;
if (!bi->metadata_size) {
if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
pr_warn("invalid PI settings.\n");
return -EINVAL;
}
bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
return 0;
}
if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
pr_warn("integrity support disabled.\n");
return -EINVAL;
}
if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
(bi->flags & BLK_INTEGRITY_REF_TAG)) {
pr_warn("ref tag not support without checksum.\n");
return -EINVAL;
}
if (bi->pi_tuple_size > bi->metadata_size) {
pr_warn("pi_tuple_size (%u) exceeds metadata_size (%u)\n",
bi->pi_tuple_size,
bi->metadata_size);
return -EINVAL;
}
switch (bi->csum_type) {
case BLK_INTEGRITY_CSUM_NONE:
if (bi->pi_tuple_size) {
pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
return -EINVAL;
}
break;
case BLK_INTEGRITY_CSUM_CRC:
case BLK_INTEGRITY_CSUM_IP:
if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
sizeof(struct t10_pi_tuple),
bi->pi_tuple_size);
return -EINVAL;
}
break;
case BLK_INTEGRITY_CSUM_CRC64:
if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
sizeof(struct crc64_pi_tuple),
bi->pi_tuple_size);
return -EINVAL;
}
break;
}
if (!bi->interval_exp)
bi->interval_exp = ilog2(lim->logical_block_size);
return 0;
}
static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
{
unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
unsigned int length;
length = min(max_segments, 2) * lim->logical_block_size;
if (max_segments > 2)
length += (max_segments - 2) * PAGE_SIZE;
return length;
}
static void blk_atomic_writes_update_limits(struct queue_limits *lim)
{
unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
blk_queue_max_guaranteed_bio(lim));
unit_limit = rounddown_pow_of_two(unit_limit);
lim->atomic_write_max_sectors =
min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
lim->max_hw_sectors);
lim->atomic_write_unit_min =
min(lim->atomic_write_hw_unit_min, unit_limit);
lim->atomic_write_unit_max =
min(lim->atomic_write_hw_unit_max, unit_limit);
lim->atomic_write_boundary_sectors =
lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
}
static void blk_validate_atomic_write_limits(struct queue_limits *lim)
{
unsigned int boundary_sectors;
unsigned int atomic_write_hw_max_sectors =
lim->atomic_write_hw_max >> SECTOR_SHIFT;
if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
goto unsupported;
if (!lim->atomic_write_hw_max)
goto unsupported;
if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
goto unsupported;
if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
goto unsupported;
if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
lim->atomic_write_hw_unit_max))
goto unsupported;
if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
lim->atomic_write_hw_max))
goto unsupported;
if (WARN_ON_ONCE(lim->chunk_sectors &&
atomic_write_hw_max_sectors > lim->chunk_sectors))
goto unsupported;
boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
if (boundary_sectors) {
if (WARN_ON_ONCE(lim->atomic_write_hw_max >
lim->atomic_write_hw_boundary))
goto unsupported;
if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
goto unsupported;
if (!is_power_of_2(boundary_sectors))
goto unsupported;
}
blk_atomic_writes_update_limits(lim);
return;
unsupported:
lim->atomic_write_max_sectors = 0;
lim->atomic_write_boundary_sectors = 0;
lim->atomic_write_unit_min = 0;
lim->atomic_write_unit_max = 0;
}
int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
unsigned int logical_block_sectors;
unsigned long seg_size;
int err;
if (!lim->logical_block_size)
lim->logical_block_size = SECTOR_SIZE;
else if (blk_validate_block_size(lim->logical_block_size)) {
pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
return -EINVAL;
}
if (lim->physical_block_size < lim->logical_block_size) {
lim->physical_block_size = lim->logical_block_size;
} else if (!is_power_of_2(lim->physical_block_size)) {
pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
return -EINVAL;
}
if (lim->io_min < lim->physical_block_size)
lim->io_min = lim->physical_block_size;
lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
if (!lim->max_hw_sectors)
lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
return -EINVAL;
logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
return -EINVAL;
lim->max_hw_sectors = round_down(lim->max_hw_sectors,
logical_block_sectors);
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
lim->max_sectors =
min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
lim->max_sectors =
min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
} else {
lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
}
lim->max_sectors = round_down(lim->max_sectors,
logical_block_sectors);
if (!lim->max_segments)
lim->max_segments = BLK_MAX_SEGMENTS;
if (lim->max_hw_wzeroes_unmap_sectors &&
lim->max_hw_wzeroes_unmap_sectors != lim->max_write_zeroes_sectors)
return -EINVAL;
lim->max_wzeroes_unmap_sectors = min(lim->max_hw_wzeroes_unmap_sectors,
lim->max_user_wzeroes_unmap_sectors);
lim->max_discard_sectors =
min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
if (lim->max_discard_sectors)
lim->discard_granularity =
max(lim->discard_granularity, lim->physical_block_size);
else
lim->discard_granularity = 0;
if (!lim->max_discard_segments)
lim->max_discard_segments = 1;
if (!lim->seg_boundary_mask)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
return -EINVAL;
if (lim->virt_boundary_mask) {
if (!lim->max_segment_size)
lim->max_segment_size = UINT_MAX;
} else {
if (!lim->max_segment_size)
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
return -EINVAL;
}
if (lim->seg_boundary_mask > lim->max_segment_size - 1)
seg_size = lim->max_segment_size;
else
seg_size = lim->seg_boundary_mask + 1;
lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
if (!lim->dma_alignment)
lim->dma_alignment = SECTOR_SIZE - 1;
if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
return -EINVAL;
if (lim->alignment_offset) {
lim->alignment_offset &= (lim->physical_block_size - 1);
lim->flags &= ~BLK_FLAG_MISALIGNED;
}
if (!(lim->features & BLK_FEAT_WRITE_CACHE))
lim->features &= ~BLK_FEAT_FUA;
blk_validate_atomic_write_limits(lim);
err = blk_validate_integrity_limits(lim);
if (err)
return err;
return blk_validate_zoned_limits(lim);
}
EXPORT_SYMBOL_GPL(blk_validate_limits);
int blk_set_default_limits(struct queue_limits *lim)
{
lim->max_user_discard_sectors = UINT_MAX;
lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
return blk_validate_limits(lim);
}
int queue_limits_commit_update(struct request_queue *q,
struct queue_limits *lim)
{
int error;
error = blk_validate_limits(lim);
if (error)
goto out_unlock;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
if (q->crypto_profile && lim->integrity.tag_size) {
pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
error = -EINVAL;
goto out_unlock;
}
#endif
q->limits = *lim;
if (q->disk)
blk_apply_bdi_limits(q->disk->bdi, lim);
out_unlock:
mutex_unlock(&q->limits_lock);
return error;
}
EXPORT_SYMBOL_GPL(queue_limits_commit_update);
int queue_limits_commit_update_frozen(struct request_queue *q,
struct queue_limits *lim)
{
unsigned int memflags;
int ret;
memflags = blk_mq_freeze_queue(q);
ret = queue_limits_commit_update(q, lim);
blk_mq_unfreeze_queue(q, memflags);
return ret;
}
EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
{
mutex_lock(&q->limits_lock);
return queue_limits_commit_update(q, lim);
}
EXPORT_SYMBOL_GPL(queue_limits_set);
static int queue_limit_alignment_offset(const struct queue_limits *lim,
sector_t sector)
{
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
<< SECTOR_SHIFT;
return (granularity + lim->alignment_offset - alignment) % granularity;
}
static unsigned int queue_limit_discard_alignment(
const struct queue_limits *lim, sector_t sector)
{
unsigned int alignment, granularity, offset;
if (!lim->max_discard_sectors)
return 0;
alignment = lim->discard_alignment >> SECTOR_SHIFT;
granularity = lim->discard_granularity >> SECTOR_SHIFT;
offset = sector_div(sector, granularity);
offset = (granularity + alignment - offset) % granularity;
return offset << SECTOR_SHIFT;
}
static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
{
sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
sectors = PAGE_SIZE >> SECTOR_SHIFT;
return sectors;
}
static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
struct queue_limits *b)
{
if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
return false;
if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
return false;
if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
return false;
t->atomic_write_hw_max = min(t->atomic_write_hw_max,
b->atomic_write_hw_max);
t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
b->atomic_write_hw_unit_min);
t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
b->atomic_write_hw_unit_max);
return true;
}
static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
struct queue_limits *b)
{
if (b->atomic_write_hw_boundary > t->io_min &&
b->atomic_write_hw_boundary % t->io_min)
return false;
if (t->io_min > b->atomic_write_hw_boundary &&
t->io_min % b->atomic_write_hw_boundary)
return false;
t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
return true;
}
static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
{
unsigned int chunk_bytes;
if (!t->chunk_sectors)
return;
if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
chunk_bytes = t->chunk_sectors;
t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
max_pow_of_two_factor(chunk_bytes));
t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
t->atomic_write_hw_unit_max);
t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
}
static bool blk_stack_atomic_writes_head(struct queue_limits *t,
struct queue_limits *b)
{
if (b->atomic_write_hw_boundary &&
!blk_stack_atomic_writes_boundary_head(t, b))
return false;
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
t->atomic_write_hw_max = b->atomic_write_hw_max;
return true;
}
static void blk_stack_atomic_writes_limits(struct queue_limits *t,
struct queue_limits *b, sector_t start)
{
if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
goto unsupported;
if (!b->atomic_write_hw_unit_min)
goto unsupported;
if (!blk_atomic_write_start_sect_aligned(start, b))
goto unsupported;
if (t->atomic_write_hw_max) {
if (!blk_stack_atomic_writes_tail(t, b))
goto unsupported;
return;
}
if (!blk_stack_atomic_writes_head(t, b))
goto unsupported;
blk_stack_atomic_writes_chunk_sectors(t);
return;
unsupported:
t->atomic_write_hw_max = 0;
t->atomic_write_hw_unit_max = 0;
t->atomic_write_hw_unit_min = 0;
t->atomic_write_hw_boundary = 0;
}
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t start)
{
unsigned int top, bottom, alignment, ret = 0;
t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
if (!(b->features & BLK_FEAT_NOWAIT))
t->features &= ~BLK_FEAT_NOWAIT;
if (!(b->features & BLK_FEAT_POLL))
t->features &= ~BLK_FEAT_POLL;
t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_user_sectors = min_not_zero(t->max_user_sectors,
b->max_user_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
b->max_write_zeroes_sectors);
t->max_user_wzeroes_unmap_sectors =
min(t->max_user_wzeroes_unmap_sectors,
b->max_user_wzeroes_unmap_sectors);
t->max_hw_wzeroes_unmap_sectors =
min(t->max_hw_wzeroes_unmap_sectors,
b->max_hw_wzeroes_unmap_sectors);
t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
b->max_hw_zone_append_sectors);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
b->seg_boundary_mask);
t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
b->virt_boundary_mask);
t->max_segments = min_not_zero(t->max_segments, b->max_segments);
t->max_discard_segments = min_not_zero(t->max_discard_segments,
b->max_discard_segments);
t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
b->max_integrity_segments);
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);
alignment = queue_limit_alignment_offset(b, start);
if (t->alignment_offset != alignment) {
top = max(t->physical_block_size, t->io_min)
+ t->alignment_offset;
bottom = max(b->physical_block_size, b->io_min) + alignment;
if (max(top, bottom) % min(top, bottom)) {
t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
}
t->logical_block_size = max(t->logical_block_size,
b->logical_block_size);
t->physical_block_size = max(t->physical_block_size,
b->physical_block_size);
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
if (b->chunk_sectors)
t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size;
t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
if (t->io_min & (t->physical_block_size - 1)) {
t->io_min = t->physical_block_size;
t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
if (t->io_opt & (t->physical_block_size - 1)) {
t->io_opt = 0;
t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
t->chunk_sectors = 0;
t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min);
if (t->alignment_offset & (t->logical_block_size - 1)) {
t->flags |= BLK_FLAG_MISALIGNED;
ret = -1;
}
t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
if (b->discard_granularity) {
alignment = queue_limit_discard_alignment(b, start);
t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
b->max_discard_sectors);
t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
b->max_hw_discard_sectors);
t->discard_granularity = max(t->discard_granularity,
b->discard_granularity);
t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
t->discard_granularity;
}
t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
b->max_secure_erase_sectors);
t->zone_write_granularity = max(t->zone_write_granularity,
b->zone_write_granularity);
if (!(t->features & BLK_FEAT_ZONED)) {
t->zone_write_granularity = 0;
t->max_zone_append_sectors = 0;
}
blk_stack_atomic_writes_limits(t, b, start);
return ret;
}
EXPORT_SYMBOL(blk_stack_limits);
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
sector_t offset, const char *pfx)
{
if (blk_stack_limits(t, bdev_limits(bdev),
get_start_sect(bdev) + offset))
pr_notice("%s: Warning: Device %pg is misaligned\n",
pfx, bdev);
}
EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
bool queue_limits_stack_integrity(struct queue_limits *t,
struct queue_limits *b)
{
struct blk_integrity *ti = &t->integrity;
struct blk_integrity *bi = &b->integrity;
if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
return true;
if (ti->flags & BLK_INTEGRITY_STACKED) {
if (ti->metadata_size != bi->metadata_size)
goto incompatible;
if (ti->interval_exp != bi->interval_exp)
goto incompatible;
if (ti->tag_size != bi->tag_size)
goto incompatible;
if (ti->csum_type != bi->csum_type)
goto incompatible;
if (ti->pi_tuple_size != bi->pi_tuple_size)
goto incompatible;
if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
(bi->flags & BLK_INTEGRITY_REF_TAG))
goto incompatible;
} else {
ti->flags = BLK_INTEGRITY_STACKED;
ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
(bi->flags & BLK_INTEGRITY_REF_TAG);
ti->csum_type = bi->csum_type;
ti->pi_tuple_size = bi->pi_tuple_size;
ti->metadata_size = bi->metadata_size;
ti->pi_offset = bi->pi_offset;
ti->interval_exp = bi->interval_exp;
ti->tag_size = bi->tag_size;
}
return true;
incompatible:
memset(ti, 0, sizeof(*ti));
return false;
}
EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
q->queue_depth = depth;
rq_qos_queue_depth_changed(q);
}
EXPORT_SYMBOL(blk_set_queue_depth);
int bdev_alignment_offset(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q->limits.flags & BLK_FLAG_MISALIGNED)
return -1;
if (bdev_is_partition(bdev))
return queue_limit_alignment_offset(&q->limits,
bdev->bd_start_sect);
return q->limits.alignment_offset;
}
EXPORT_SYMBOL_GPL(bdev_alignment_offset);
unsigned int bdev_discard_alignment(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (bdev_is_partition(bdev))
return queue_limit_discard_alignment(&q->limits,
bdev->bd_start_sect);
return q->limits.discard_alignment;
}
EXPORT_SYMBOL_GPL(bdev_discard_alignment);