Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/bio-integrity-auto.c
50038 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2007, 2008, 2009 Oracle Corporation
4
* Written by: Martin K. Petersen <[email protected]>
5
*
6
* Automatically generate and verify integrity data on PI capable devices if the
7
* bio submitter didn't provide PI itself. This ensures that kernel verifies
8
* data integrity even if the file system (or other user of the block device) is
9
* not aware of PI.
10
*/
11
#include <linux/blk-integrity.h>
12
#include <linux/t10-pi.h>
13
#include <linux/workqueue.h>
14
#include "blk.h"
15
16
struct bio_integrity_data {
17
struct bio *bio;
18
struct bvec_iter saved_bio_iter;
19
struct work_struct work;
20
struct bio_integrity_payload bip;
21
struct bio_vec bvec;
22
};
23
24
static struct kmem_cache *bid_slab;
25
static mempool_t bid_pool;
26
static struct workqueue_struct *kintegrityd_wq;
27
28
static void bio_integrity_finish(struct bio_integrity_data *bid)
29
{
30
bid->bio->bi_integrity = NULL;
31
bid->bio->bi_opf &= ~REQ_INTEGRITY;
32
bio_integrity_free_buf(&bid->bip);
33
mempool_free(bid, &bid_pool);
34
}
35
36
static void bio_integrity_verify_fn(struct work_struct *work)
37
{
38
struct bio_integrity_data *bid =
39
container_of(work, struct bio_integrity_data, work);
40
struct bio *bio = bid->bio;
41
42
blk_integrity_verify_iter(bio, &bid->saved_bio_iter);
43
bio_integrity_finish(bid);
44
bio_endio(bio);
45
}
46
47
#define BIP_CHECK_FLAGS (BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG)
48
static bool bip_should_check(struct bio_integrity_payload *bip)
49
{
50
return bip->bip_flags & BIP_CHECK_FLAGS;
51
}
52
53
static bool bi_offload_capable(struct blk_integrity *bi)
54
{
55
return bi->metadata_size == bi->pi_tuple_size;
56
}
57
58
/**
59
* __bio_integrity_endio - Integrity I/O completion function
60
* @bio: Protected bio
61
*
62
* Normally I/O completion is done in interrupt context. However, verifying I/O
63
* integrity is a time-consuming task which must be run in process context.
64
*
65
* This function postpones completion accordingly.
66
*/
67
bool __bio_integrity_endio(struct bio *bio)
68
{
69
struct bio_integrity_payload *bip = bio_integrity(bio);
70
struct bio_integrity_data *bid =
71
container_of(bip, struct bio_integrity_data, bip);
72
73
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
74
bip_should_check(bip)) {
75
INIT_WORK(&bid->work, bio_integrity_verify_fn);
76
queue_work(kintegrityd_wq, &bid->work);
77
return false;
78
}
79
80
bio_integrity_finish(bid);
81
return true;
82
}
83
84
/**
85
* bio_integrity_prep - Prepare bio for integrity I/O
86
* @bio: bio to prepare
87
*
88
* Checks if the bio already has an integrity payload attached. If it does, the
89
* payload has been generated by another kernel subsystem, and we just pass it
90
* through.
91
* Otherwise allocates integrity payload and for writes the integrity metadata
92
* will be generated. For reads, the completion handler will verify the
93
* metadata.
94
*/
95
bool bio_integrity_prep(struct bio *bio)
96
{
97
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
98
struct bio_integrity_data *bid;
99
bool set_flags = true;
100
gfp_t gfp = GFP_NOIO;
101
102
if (!bi)
103
return true;
104
105
if (!bio_sectors(bio))
106
return true;
107
108
/* Already protected? */
109
if (bio_integrity(bio))
110
return true;
111
112
switch (bio_op(bio)) {
113
case REQ_OP_READ:
114
if (bi->flags & BLK_INTEGRITY_NOVERIFY) {
115
if (bi_offload_capable(bi))
116
return true;
117
set_flags = false;
118
}
119
break;
120
case REQ_OP_WRITE:
121
/*
122
* Zero the memory allocated to not leak uninitialized kernel
123
* memory to disk for non-integrity metadata where nothing else
124
* initializes the memory.
125
*/
126
if (bi->flags & BLK_INTEGRITY_NOGENERATE) {
127
if (bi_offload_capable(bi))
128
return true;
129
set_flags = false;
130
gfp |= __GFP_ZERO;
131
} else if (bi->metadata_size > bi->pi_tuple_size)
132
gfp |= __GFP_ZERO;
133
break;
134
default:
135
return true;
136
}
137
138
if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
139
return true;
140
141
bid = mempool_alloc(&bid_pool, GFP_NOIO);
142
bio_integrity_init(bio, &bid->bip, &bid->bvec, 1);
143
bid->bio = bio;
144
bid->bip.bip_flags |= BIP_BLOCK_INTEGRITY;
145
bio_integrity_alloc_buf(bio, gfp & __GFP_ZERO);
146
147
bip_set_seed(&bid->bip, bio->bi_iter.bi_sector);
148
149
if (set_flags) {
150
if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
151
bid->bip.bip_flags |= BIP_IP_CHECKSUM;
152
if (bi->csum_type)
153
bid->bip.bip_flags |= BIP_CHECK_GUARD;
154
if (bi->flags & BLK_INTEGRITY_REF_TAG)
155
bid->bip.bip_flags |= BIP_CHECK_REFTAG;
156
}
157
158
/* Auto-generate integrity metadata if this is a write */
159
if (bio_data_dir(bio) == WRITE && bip_should_check(&bid->bip))
160
blk_integrity_generate(bio);
161
else
162
bid->saved_bio_iter = bio->bi_iter;
163
return true;
164
}
165
EXPORT_SYMBOL(bio_integrity_prep);
166
167
void blk_flush_integrity(void)
168
{
169
flush_workqueue(kintegrityd_wq);
170
}
171
172
static int __init blk_integrity_auto_init(void)
173
{
174
bid_slab = kmem_cache_create("bio_integrity_data",
175
sizeof(struct bio_integrity_data), 0,
176
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
177
178
if (mempool_init_slab_pool(&bid_pool, BIO_POOL_SIZE, bid_slab))
179
panic("bio: can't create integrity pool\n");
180
181
/*
182
* kintegrityd won't block much but may burn a lot of CPU cycles.
183
* Make it highpri CPU intensive wq with max concurrency of 1.
184
*/
185
kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
186
WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
187
if (!kintegrityd_wq)
188
panic("Failed to create kintegrityd\n");
189
return 0;
190
}
191
subsys_initcall(blk_integrity_auto_init);
192
193