Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-integrity.c
26242 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* blk-integrity.c - Block layer data integrity extensions
4
*
5
* Copyright (C) 2007, 2008 Oracle Corporation
6
* Written by: Martin K. Petersen <[email protected]>
7
*/
8
9
#include <linux/blk-integrity.h>
10
#include <linux/backing-dev.h>
11
#include <linux/mempool.h>
12
#include <linux/bio.h>
13
#include <linux/scatterlist.h>
14
#include <linux/export.h>
15
#include <linux/slab.h>
16
#include <linux/t10-pi.h>
17
18
#include "blk.h"
19
20
/**
21
* blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
22
* @q: request queue
23
* @bio: bio with integrity metadata attached
24
*
25
* Description: Returns the number of elements required in a
26
* scatterlist corresponding to the integrity metadata in a bio.
27
*/
28
int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
29
{
30
struct bio_vec iv, ivprv = { NULL };
31
unsigned int segments = 0;
32
unsigned int seg_size = 0;
33
struct bvec_iter iter;
34
int prev = 0;
35
36
bio_for_each_integrity_vec(iv, bio, iter) {
37
38
if (prev) {
39
if (!biovec_phys_mergeable(q, &ivprv, &iv))
40
goto new_segment;
41
if (seg_size + iv.bv_len > queue_max_segment_size(q))
42
goto new_segment;
43
44
seg_size += iv.bv_len;
45
} else {
46
new_segment:
47
segments++;
48
seg_size = iv.bv_len;
49
}
50
51
prev = 1;
52
ivprv = iv;
53
}
54
55
return segments;
56
}
57
58
int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
59
struct logical_block_metadata_cap __user *argp)
60
{
61
struct blk_integrity *bi = blk_get_integrity(bdev->bd_disk);
62
struct logical_block_metadata_cap meta_cap = {};
63
size_t usize = _IOC_SIZE(cmd);
64
65
if (_IOC_DIR(cmd) != _IOC_DIR(FS_IOC_GETLBMD_CAP) ||
66
_IOC_TYPE(cmd) != _IOC_TYPE(FS_IOC_GETLBMD_CAP) ||
67
_IOC_NR(cmd) != _IOC_NR(FS_IOC_GETLBMD_CAP) ||
68
_IOC_SIZE(cmd) < LBMD_SIZE_VER0)
69
return -ENOIOCTLCMD;
70
71
if (!bi)
72
goto out;
73
74
if (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE)
75
meta_cap.lbmd_flags |= LBMD_PI_CAP_INTEGRITY;
76
if (bi->flags & BLK_INTEGRITY_REF_TAG)
77
meta_cap.lbmd_flags |= LBMD_PI_CAP_REFTAG;
78
meta_cap.lbmd_interval = 1 << bi->interval_exp;
79
meta_cap.lbmd_size = bi->metadata_size;
80
meta_cap.lbmd_pi_size = bi->pi_tuple_size;
81
meta_cap.lbmd_pi_offset = bi->pi_offset;
82
meta_cap.lbmd_opaque_size = bi->metadata_size - bi->pi_tuple_size;
83
if (meta_cap.lbmd_opaque_size && !bi->pi_offset)
84
meta_cap.lbmd_opaque_offset = bi->pi_tuple_size;
85
86
switch (bi->csum_type) {
87
case BLK_INTEGRITY_CSUM_NONE:
88
meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_NONE;
89
break;
90
case BLK_INTEGRITY_CSUM_IP:
91
meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_IP;
92
break;
93
case BLK_INTEGRITY_CSUM_CRC:
94
meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC16_T10DIF;
95
break;
96
case BLK_INTEGRITY_CSUM_CRC64:
97
meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC64_NVME;
98
break;
99
}
100
101
if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE)
102
meta_cap.lbmd_app_tag_size = 2;
103
104
if (bi->flags & BLK_INTEGRITY_REF_TAG) {
105
switch (bi->csum_type) {
106
case BLK_INTEGRITY_CSUM_CRC64:
107
meta_cap.lbmd_ref_tag_size =
108
sizeof_field(struct crc64_pi_tuple, ref_tag);
109
break;
110
case BLK_INTEGRITY_CSUM_CRC:
111
case BLK_INTEGRITY_CSUM_IP:
112
meta_cap.lbmd_ref_tag_size =
113
sizeof_field(struct t10_pi_tuple, ref_tag);
114
break;
115
default:
116
break;
117
}
118
}
119
120
out:
121
return copy_struct_to_user(argp, usize, &meta_cap, sizeof(meta_cap),
122
NULL);
123
}
124
125
/**
126
* blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
127
* @rq: request to map
128
* @sglist: target scatterlist
129
*
130
* Description: Map the integrity vectors in request into a
131
* scatterlist. The scatterlist must be big enough to hold all
132
* elements. I.e. sized using blk_rq_count_integrity_sg() or
133
* rq->nr_integrity_segments.
134
*/
135
int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
136
{
137
struct bio_vec iv, ivprv = { NULL };
138
struct request_queue *q = rq->q;
139
struct scatterlist *sg = NULL;
140
struct bio *bio = rq->bio;
141
unsigned int segments = 0;
142
struct bvec_iter iter;
143
int prev = 0;
144
145
bio_for_each_integrity_vec(iv, bio, iter) {
146
if (prev) {
147
if (!biovec_phys_mergeable(q, &ivprv, &iv))
148
goto new_segment;
149
if (sg->length + iv.bv_len > queue_max_segment_size(q))
150
goto new_segment;
151
152
sg->length += iv.bv_len;
153
} else {
154
new_segment:
155
if (!sg)
156
sg = sglist;
157
else {
158
sg_unmark_end(sg);
159
sg = sg_next(sg);
160
}
161
162
sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
163
segments++;
164
}
165
166
prev = 1;
167
ivprv = iv;
168
}
169
170
if (sg)
171
sg_mark_end(sg);
172
173
/*
174
* Something must have been wrong if the figured number of segment
175
* is bigger than number of req's physical integrity segments
176
*/
177
BUG_ON(segments > rq->nr_integrity_segments);
178
BUG_ON(segments > queue_max_integrity_segments(q));
179
return segments;
180
}
181
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
182
183
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
184
ssize_t bytes)
185
{
186
int ret;
187
struct iov_iter iter;
188
189
iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes);
190
ret = bio_integrity_map_user(rq->bio, &iter);
191
if (ret)
192
return ret;
193
194
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
195
rq->cmd_flags |= REQ_INTEGRITY;
196
return 0;
197
}
198
EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
199
200
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
201
struct request *next)
202
{
203
if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
204
return true;
205
206
if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
207
return false;
208
209
if (bio_integrity(req->bio)->bip_flags !=
210
bio_integrity(next->bio)->bip_flags)
211
return false;
212
213
if (req->nr_integrity_segments + next->nr_integrity_segments >
214
q->limits.max_integrity_segments)
215
return false;
216
217
if (integrity_req_gap_back_merge(req, next->bio))
218
return false;
219
220
return true;
221
}
222
223
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
224
struct bio *bio)
225
{
226
int nr_integrity_segs;
227
228
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
229
return true;
230
231
if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
232
return false;
233
234
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
235
return false;
236
237
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
238
if (req->nr_integrity_segments + nr_integrity_segs >
239
q->limits.max_integrity_segments)
240
return false;
241
242
return true;
243
}
244
245
static inline struct blk_integrity *dev_to_bi(struct device *dev)
246
{
247
return &dev_to_disk(dev)->queue->limits.integrity;
248
}
249
250
const char *blk_integrity_profile_name(struct blk_integrity *bi)
251
{
252
switch (bi->csum_type) {
253
case BLK_INTEGRITY_CSUM_IP:
254
if (bi->flags & BLK_INTEGRITY_REF_TAG)
255
return "T10-DIF-TYPE1-IP";
256
return "T10-DIF-TYPE3-IP";
257
case BLK_INTEGRITY_CSUM_CRC:
258
if (bi->flags & BLK_INTEGRITY_REF_TAG)
259
return "T10-DIF-TYPE1-CRC";
260
return "T10-DIF-TYPE3-CRC";
261
case BLK_INTEGRITY_CSUM_CRC64:
262
if (bi->flags & BLK_INTEGRITY_REF_TAG)
263
return "EXT-DIF-TYPE1-CRC64";
264
return "EXT-DIF-TYPE3-CRC64";
265
case BLK_INTEGRITY_CSUM_NONE:
266
break;
267
}
268
269
return "nop";
270
}
271
EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
272
273
static ssize_t flag_store(struct device *dev, const char *page, size_t count,
274
unsigned char flag)
275
{
276
struct request_queue *q = dev_to_disk(dev)->queue;
277
struct queue_limits lim;
278
unsigned long val;
279
int err;
280
281
err = kstrtoul(page, 10, &val);
282
if (err)
283
return err;
284
285
/* note that the flags are inverted vs the values in the sysfs files */
286
lim = queue_limits_start_update(q);
287
if (val)
288
lim.integrity.flags &= ~flag;
289
else
290
lim.integrity.flags |= flag;
291
292
err = queue_limits_commit_update_frozen(q, &lim);
293
if (err)
294
return err;
295
return count;
296
}
297
298
static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
299
{
300
struct blk_integrity *bi = dev_to_bi(dev);
301
302
return sysfs_emit(page, "%d\n", !(bi->flags & flag));
303
}
304
305
static ssize_t format_show(struct device *dev, struct device_attribute *attr,
306
char *page)
307
{
308
struct blk_integrity *bi = dev_to_bi(dev);
309
310
if (!bi->metadata_size)
311
return sysfs_emit(page, "none\n");
312
return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
313
}
314
315
static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
316
char *page)
317
{
318
struct blk_integrity *bi = dev_to_bi(dev);
319
320
return sysfs_emit(page, "%u\n", bi->tag_size);
321
}
322
323
static ssize_t protection_interval_bytes_show(struct device *dev,
324
struct device_attribute *attr,
325
char *page)
326
{
327
struct blk_integrity *bi = dev_to_bi(dev);
328
329
return sysfs_emit(page, "%u\n",
330
bi->interval_exp ? 1 << bi->interval_exp : 0);
331
}
332
333
static ssize_t read_verify_store(struct device *dev,
334
struct device_attribute *attr,
335
const char *page, size_t count)
336
{
337
return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
338
}
339
340
static ssize_t read_verify_show(struct device *dev,
341
struct device_attribute *attr, char *page)
342
{
343
return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
344
}
345
346
static ssize_t write_generate_store(struct device *dev,
347
struct device_attribute *attr,
348
const char *page, size_t count)
349
{
350
return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
351
}
352
353
static ssize_t write_generate_show(struct device *dev,
354
struct device_attribute *attr, char *page)
355
{
356
return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
357
}
358
359
static ssize_t device_is_integrity_capable_show(struct device *dev,
360
struct device_attribute *attr,
361
char *page)
362
{
363
struct blk_integrity *bi = dev_to_bi(dev);
364
365
return sysfs_emit(page, "%u\n",
366
!!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
367
}
368
369
static DEVICE_ATTR_RO(format);
370
static DEVICE_ATTR_RO(tag_size);
371
static DEVICE_ATTR_RO(protection_interval_bytes);
372
static DEVICE_ATTR_RW(read_verify);
373
static DEVICE_ATTR_RW(write_generate);
374
static DEVICE_ATTR_RO(device_is_integrity_capable);
375
376
static struct attribute *integrity_attrs[] = {
377
&dev_attr_format.attr,
378
&dev_attr_tag_size.attr,
379
&dev_attr_protection_interval_bytes.attr,
380
&dev_attr_read_verify.attr,
381
&dev_attr_write_generate.attr,
382
&dev_attr_device_is_integrity_capable.attr,
383
NULL
384
};
385
386
const struct attribute_group blk_integrity_attr_group = {
387
.name = "integrity",
388
.attrs = integrity_attrs,
389
};
390
391