Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-integrity.c
49000 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* blk-integrity.c - Block layer data integrity extensions
4
*
5
* Copyright (C) 2007, 2008 Oracle Corporation
6
* Written by: Martin K. Petersen <[email protected]>
7
*/
8
9
#include <linux/blk-integrity.h>
10
#include <linux/backing-dev.h>
11
#include <linux/mempool.h>
12
#include <linux/bio.h>
13
#include <linux/scatterlist.h>
14
#include <linux/export.h>
15
#include <linux/slab.h>
16
#include <linux/t10-pi.h>
17
18
#include "blk.h"
19
20
/**
21
* blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
22
* @q: request queue
23
* @bio: bio with integrity metadata attached
24
*
25
* Description: Returns the number of elements required in a
26
* scatterlist corresponding to the integrity metadata in a bio.
27
*/
28
int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
29
{
30
struct bio_vec iv, ivprv = { NULL };
31
unsigned int segments = 0;
32
unsigned int seg_size = 0;
33
struct bvec_iter iter;
34
int prev = 0;
35
36
bio_for_each_integrity_vec(iv, bio, iter) {
37
38
if (prev) {
39
if (!biovec_phys_mergeable(q, &ivprv, &iv))
40
goto new_segment;
41
if (seg_size + iv.bv_len > queue_max_segment_size(q))
42
goto new_segment;
43
44
seg_size += iv.bv_len;
45
} else {
46
new_segment:
47
segments++;
48
seg_size = iv.bv_len;
49
}
50
51
prev = 1;
52
ivprv = iv;
53
}
54
55
return segments;
56
}
57
58
int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
59
struct logical_block_metadata_cap __user *argp)
60
{
61
struct blk_integrity *bi;
62
struct logical_block_metadata_cap meta_cap = {};
63
size_t usize = _IOC_SIZE(cmd);
64
65
if (!extensible_ioctl_valid(cmd, FS_IOC_GETLBMD_CAP, LBMD_SIZE_VER0))
66
return -ENOIOCTLCMD;
67
68
bi = blk_get_integrity(bdev->bd_disk);
69
if (!bi)
70
goto out;
71
72
if (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE)
73
meta_cap.lbmd_flags |= LBMD_PI_CAP_INTEGRITY;
74
if (bi->flags & BLK_INTEGRITY_REF_TAG)
75
meta_cap.lbmd_flags |= LBMD_PI_CAP_REFTAG;
76
meta_cap.lbmd_interval = 1 << bi->interval_exp;
77
meta_cap.lbmd_size = bi->metadata_size;
78
meta_cap.lbmd_pi_size = bi->pi_tuple_size;
79
meta_cap.lbmd_pi_offset = bi->pi_offset;
80
meta_cap.lbmd_opaque_size = bi->metadata_size - bi->pi_tuple_size;
81
if (meta_cap.lbmd_opaque_size && !bi->pi_offset)
82
meta_cap.lbmd_opaque_offset = bi->pi_tuple_size;
83
84
switch (bi->csum_type) {
85
case BLK_INTEGRITY_CSUM_NONE:
86
meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_NONE;
87
break;
88
case BLK_INTEGRITY_CSUM_IP:
89
meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_IP;
90
break;
91
case BLK_INTEGRITY_CSUM_CRC:
92
meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC16_T10DIF;
93
break;
94
case BLK_INTEGRITY_CSUM_CRC64:
95
meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC64_NVME;
96
break;
97
}
98
99
if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE)
100
meta_cap.lbmd_app_tag_size = 2;
101
102
if (bi->flags & BLK_INTEGRITY_REF_TAG) {
103
switch (bi->csum_type) {
104
case BLK_INTEGRITY_CSUM_CRC64:
105
meta_cap.lbmd_ref_tag_size =
106
sizeof_field(struct crc64_pi_tuple, ref_tag);
107
break;
108
case BLK_INTEGRITY_CSUM_CRC:
109
case BLK_INTEGRITY_CSUM_IP:
110
meta_cap.lbmd_ref_tag_size =
111
sizeof_field(struct t10_pi_tuple, ref_tag);
112
break;
113
default:
114
break;
115
}
116
}
117
118
out:
119
return copy_struct_to_user(argp, usize, &meta_cap, sizeof(meta_cap),
120
NULL);
121
}
122
123
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
124
ssize_t bytes)
125
{
126
int ret;
127
struct iov_iter iter;
128
129
iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes);
130
ret = bio_integrity_map_user(rq->bio, &iter);
131
if (ret)
132
return ret;
133
134
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
135
rq->cmd_flags |= REQ_INTEGRITY;
136
return 0;
137
}
138
EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
139
140
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
141
struct request *next)
142
{
143
struct bio_integrity_payload *bip, *bip_next;
144
145
if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
146
return true;
147
148
if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
149
return false;
150
151
bip = bio_integrity(req->bio);
152
bip_next = bio_integrity(next->bio);
153
if (bip->bip_flags != bip_next->bip_flags)
154
return false;
155
156
if (bip->bip_flags & BIP_CHECK_APPTAG &&
157
bip->app_tag != bip_next->app_tag)
158
return false;
159
160
if (req->nr_integrity_segments + next->nr_integrity_segments >
161
q->limits.max_integrity_segments)
162
return false;
163
164
if (integrity_req_gap_back_merge(req, next->bio))
165
return false;
166
167
return true;
168
}
169
170
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
171
struct bio *bio)
172
{
173
struct bio_integrity_payload *bip, *bip_bio = bio_integrity(bio);
174
int nr_integrity_segs;
175
176
if (blk_integrity_rq(req) == 0 && bip_bio == NULL)
177
return true;
178
179
if (blk_integrity_rq(req) == 0 || bip_bio == NULL)
180
return false;
181
182
bip = bio_integrity(req->bio);
183
if (bip->bip_flags != bip_bio->bip_flags)
184
return false;
185
186
if (bip->bip_flags & BIP_CHECK_APPTAG &&
187
bip->app_tag != bip_bio->app_tag)
188
return false;
189
190
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
191
if (req->nr_integrity_segments + nr_integrity_segs >
192
q->limits.max_integrity_segments)
193
return false;
194
195
return true;
196
}
197
198
static inline struct blk_integrity *dev_to_bi(struct device *dev)
199
{
200
return &dev_to_disk(dev)->queue->limits.integrity;
201
}
202
203
const char *blk_integrity_profile_name(struct blk_integrity *bi)
204
{
205
switch (bi->csum_type) {
206
case BLK_INTEGRITY_CSUM_IP:
207
if (bi->flags & BLK_INTEGRITY_REF_TAG)
208
return "T10-DIF-TYPE1-IP";
209
return "T10-DIF-TYPE3-IP";
210
case BLK_INTEGRITY_CSUM_CRC:
211
if (bi->flags & BLK_INTEGRITY_REF_TAG)
212
return "T10-DIF-TYPE1-CRC";
213
return "T10-DIF-TYPE3-CRC";
214
case BLK_INTEGRITY_CSUM_CRC64:
215
if (bi->flags & BLK_INTEGRITY_REF_TAG)
216
return "EXT-DIF-TYPE1-CRC64";
217
return "EXT-DIF-TYPE3-CRC64";
218
case BLK_INTEGRITY_CSUM_NONE:
219
break;
220
}
221
222
return "nop";
223
}
224
EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
225
226
static ssize_t flag_store(struct device *dev, const char *page, size_t count,
227
unsigned char flag)
228
{
229
struct request_queue *q = dev_to_disk(dev)->queue;
230
struct queue_limits lim;
231
unsigned long val;
232
int err;
233
234
err = kstrtoul(page, 10, &val);
235
if (err)
236
return err;
237
238
/* note that the flags are inverted vs the values in the sysfs files */
239
lim = queue_limits_start_update(q);
240
if (val)
241
lim.integrity.flags &= ~flag;
242
else
243
lim.integrity.flags |= flag;
244
245
err = queue_limits_commit_update_frozen(q, &lim);
246
if (err)
247
return err;
248
return count;
249
}
250
251
static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
252
{
253
struct blk_integrity *bi = dev_to_bi(dev);
254
255
return sysfs_emit(page, "%d\n", !(bi->flags & flag));
256
}
257
258
static ssize_t format_show(struct device *dev, struct device_attribute *attr,
259
char *page)
260
{
261
struct blk_integrity *bi = dev_to_bi(dev);
262
263
if (!bi->metadata_size)
264
return sysfs_emit(page, "none\n");
265
return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
266
}
267
268
static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
269
char *page)
270
{
271
struct blk_integrity *bi = dev_to_bi(dev);
272
273
return sysfs_emit(page, "%u\n", bi->tag_size);
274
}
275
276
static ssize_t protection_interval_bytes_show(struct device *dev,
277
struct device_attribute *attr,
278
char *page)
279
{
280
struct blk_integrity *bi = dev_to_bi(dev);
281
282
return sysfs_emit(page, "%u\n",
283
bi->interval_exp ? 1 << bi->interval_exp : 0);
284
}
285
286
static ssize_t read_verify_store(struct device *dev,
287
struct device_attribute *attr,
288
const char *page, size_t count)
289
{
290
return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
291
}
292
293
static ssize_t read_verify_show(struct device *dev,
294
struct device_attribute *attr, char *page)
295
{
296
return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
297
}
298
299
static ssize_t write_generate_store(struct device *dev,
300
struct device_attribute *attr,
301
const char *page, size_t count)
302
{
303
return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
304
}
305
306
static ssize_t write_generate_show(struct device *dev,
307
struct device_attribute *attr, char *page)
308
{
309
return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
310
}
311
312
static ssize_t device_is_integrity_capable_show(struct device *dev,
313
struct device_attribute *attr,
314
char *page)
315
{
316
struct blk_integrity *bi = dev_to_bi(dev);
317
318
return sysfs_emit(page, "%u\n",
319
!!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
320
}
321
322
static DEVICE_ATTR_RO(format);
323
static DEVICE_ATTR_RO(tag_size);
324
static DEVICE_ATTR_RO(protection_interval_bytes);
325
static DEVICE_ATTR_RW(read_verify);
326
static DEVICE_ATTR_RW(write_generate);
327
static DEVICE_ATTR_RO(device_is_integrity_capable);
328
329
static struct attribute *integrity_attrs[] = {
330
&dev_attr_format.attr,
331
&dev_attr_tag_size.attr,
332
&dev_attr_protection_interval_bytes.attr,
333
&dev_attr_read_verify.attr,
334
&dev_attr_write_generate.attr,
335
&dev_attr_device_is_integrity_capable.attr,
336
NULL
337
};
338
339
const struct attribute_group blk_integrity_attr_group = {
340
.name = "integrity",
341
.attrs = integrity_attrs,
342
};
343
344