Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-ia-ranges.c
26242 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Block device concurrent positioning ranges.
4
*
5
* Copyright (C) 2021 Western Digital Corporation or its Affiliates.
6
*/
7
#include <linux/kernel.h>
8
#include <linux/blkdev.h>
9
#include <linux/slab.h>
10
#include <linux/init.h>
11
12
#include "blk.h"
13
14
static ssize_t
15
blk_ia_range_sector_show(struct blk_independent_access_range *iar,
16
char *buf)
17
{
18
return sprintf(buf, "%llu\n", iar->sector);
19
}
20
21
static ssize_t
22
blk_ia_range_nr_sectors_show(struct blk_independent_access_range *iar,
23
char *buf)
24
{
25
return sprintf(buf, "%llu\n", iar->nr_sectors);
26
}
27
28
struct blk_ia_range_sysfs_entry {
29
struct attribute attr;
30
ssize_t (*show)(struct blk_independent_access_range *iar, char *buf);
31
};
32
33
static struct blk_ia_range_sysfs_entry blk_ia_range_sector_entry = {
34
.attr = { .name = "sector", .mode = 0444 },
35
.show = blk_ia_range_sector_show,
36
};
37
38
static struct blk_ia_range_sysfs_entry blk_ia_range_nr_sectors_entry = {
39
.attr = { .name = "nr_sectors", .mode = 0444 },
40
.show = blk_ia_range_nr_sectors_show,
41
};
42
43
static struct attribute *blk_ia_range_attrs[] = {
44
&blk_ia_range_sector_entry.attr,
45
&blk_ia_range_nr_sectors_entry.attr,
46
NULL,
47
};
48
ATTRIBUTE_GROUPS(blk_ia_range);
49
50
static ssize_t blk_ia_range_sysfs_show(struct kobject *kobj,
51
struct attribute *attr, char *buf)
52
{
53
struct blk_ia_range_sysfs_entry *entry =
54
container_of(attr, struct blk_ia_range_sysfs_entry, attr);
55
struct blk_independent_access_range *iar =
56
container_of(kobj, struct blk_independent_access_range, kobj);
57
58
return entry->show(iar, buf);
59
}
60
61
static const struct sysfs_ops blk_ia_range_sysfs_ops = {
62
.show = blk_ia_range_sysfs_show,
63
};
64
65
/*
66
* Independent access range entries are not freed individually, but alltogether
67
* with struct blk_independent_access_ranges and its array of ranges. Since
68
* kobject_add() takes a reference on the parent kobject contained in
69
* struct blk_independent_access_ranges, the array of independent access range
70
* entries cannot be freed until kobject_del() is called for all entries.
71
* So we do not need to do anything here, but still need this no-op release
72
* operation to avoid complaints from the kobject code.
73
*/
74
static void blk_ia_range_sysfs_nop_release(struct kobject *kobj)
75
{
76
}
77
78
static const struct kobj_type blk_ia_range_ktype = {
79
.sysfs_ops = &blk_ia_range_sysfs_ops,
80
.default_groups = blk_ia_range_groups,
81
.release = blk_ia_range_sysfs_nop_release,
82
};
83
84
/*
85
* This will be executed only after all independent access range entries are
86
* removed with kobject_del(), at which point, it is safe to free everything,
87
* including the array of ranges.
88
*/
89
static void blk_ia_ranges_sysfs_release(struct kobject *kobj)
90
{
91
struct blk_independent_access_ranges *iars =
92
container_of(kobj, struct blk_independent_access_ranges, kobj);
93
94
kfree(iars);
95
}
96
97
static const struct kobj_type blk_ia_ranges_ktype = {
98
.release = blk_ia_ranges_sysfs_release,
99
};
100
101
/**
102
* disk_register_independent_access_ranges - register with sysfs a set of
103
* independent access ranges
104
* @disk: Target disk
105
*
106
* Register with sysfs a set of independent access ranges for @disk.
107
*/
108
int disk_register_independent_access_ranges(struct gendisk *disk)
109
{
110
struct blk_independent_access_ranges *iars = disk->ia_ranges;
111
struct request_queue *q = disk->queue;
112
int i, ret;
113
114
lockdep_assert_held(&q->sysfs_lock);
115
116
if (!iars)
117
return 0;
118
119
/*
120
* At this point, iars is the new set of sector access ranges that needs
121
* to be registered with sysfs.
122
*/
123
WARN_ON(iars->sysfs_registered);
124
ret = kobject_init_and_add(&iars->kobj, &blk_ia_ranges_ktype,
125
&disk->queue_kobj, "%s",
126
"independent_access_ranges");
127
if (ret) {
128
disk->ia_ranges = NULL;
129
kobject_put(&iars->kobj);
130
return ret;
131
}
132
133
for (i = 0; i < iars->nr_ia_ranges; i++) {
134
ret = kobject_init_and_add(&iars->ia_range[i].kobj,
135
&blk_ia_range_ktype, &iars->kobj,
136
"%d", i);
137
if (ret) {
138
while (--i >= 0)
139
kobject_del(&iars->ia_range[i].kobj);
140
kobject_del(&iars->kobj);
141
kobject_put(&iars->kobj);
142
return ret;
143
}
144
}
145
146
iars->sysfs_registered = true;
147
148
return 0;
149
}
150
151
void disk_unregister_independent_access_ranges(struct gendisk *disk)
152
{
153
struct request_queue *q = disk->queue;
154
struct blk_independent_access_ranges *iars = disk->ia_ranges;
155
int i;
156
157
lockdep_assert_held(&q->sysfs_lock);
158
159
if (!iars)
160
return;
161
162
if (iars->sysfs_registered) {
163
for (i = 0; i < iars->nr_ia_ranges; i++)
164
kobject_del(&iars->ia_range[i].kobj);
165
kobject_del(&iars->kobj);
166
kobject_put(&iars->kobj);
167
} else {
168
kfree(iars);
169
}
170
171
disk->ia_ranges = NULL;
172
}
173
174
static struct blk_independent_access_range *
175
disk_find_ia_range(struct blk_independent_access_ranges *iars,
176
sector_t sector)
177
{
178
struct blk_independent_access_range *iar;
179
int i;
180
181
for (i = 0; i < iars->nr_ia_ranges; i++) {
182
iar = &iars->ia_range[i];
183
if (sector >= iar->sector &&
184
sector < iar->sector + iar->nr_sectors)
185
return iar;
186
}
187
188
return NULL;
189
}
190
191
static bool disk_check_ia_ranges(struct gendisk *disk,
192
struct blk_independent_access_ranges *iars)
193
{
194
struct blk_independent_access_range *iar, *tmp;
195
sector_t capacity = get_capacity(disk);
196
sector_t sector = 0;
197
int i;
198
199
if (WARN_ON_ONCE(!iars->nr_ia_ranges))
200
return false;
201
202
/*
203
* While sorting the ranges in increasing LBA order, check that the
204
* ranges do not overlap, that there are no sector holes and that all
205
* sectors belong to one range.
206
*/
207
for (i = 0; i < iars->nr_ia_ranges; i++) {
208
tmp = disk_find_ia_range(iars, sector);
209
if (!tmp || tmp->sector != sector) {
210
pr_warn("Invalid non-contiguous independent access ranges\n");
211
return false;
212
}
213
214
iar = &iars->ia_range[i];
215
if (tmp != iar) {
216
swap(iar->sector, tmp->sector);
217
swap(iar->nr_sectors, tmp->nr_sectors);
218
}
219
220
sector += iar->nr_sectors;
221
}
222
223
if (sector != capacity) {
224
pr_warn("Independent access ranges do not match disk capacity\n");
225
return false;
226
}
227
228
return true;
229
}
230
231
static bool disk_ia_ranges_changed(struct gendisk *disk,
232
struct blk_independent_access_ranges *new)
233
{
234
struct blk_independent_access_ranges *old = disk->ia_ranges;
235
int i;
236
237
if (!old)
238
return true;
239
240
if (old->nr_ia_ranges != new->nr_ia_ranges)
241
return true;
242
243
for (i = 0; i < old->nr_ia_ranges; i++) {
244
if (new->ia_range[i].sector != old->ia_range[i].sector ||
245
new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors)
246
return true;
247
}
248
249
return false;
250
}
251
252
/**
253
* disk_alloc_independent_access_ranges - Allocate an independent access ranges
254
* data structure
255
* @disk: target disk
256
* @nr_ia_ranges: Number of independent access ranges
257
*
258
* Allocate a struct blk_independent_access_ranges structure with @nr_ia_ranges
259
* access range descriptors.
260
*/
261
struct blk_independent_access_ranges *
262
disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges)
263
{
264
struct blk_independent_access_ranges *iars;
265
266
iars = kzalloc_node(struct_size(iars, ia_range, nr_ia_ranges),
267
GFP_KERNEL, disk->queue->node);
268
if (iars)
269
iars->nr_ia_ranges = nr_ia_ranges;
270
return iars;
271
}
272
EXPORT_SYMBOL_GPL(disk_alloc_independent_access_ranges);
273
274
/**
275
* disk_set_independent_access_ranges - Set a disk independent access ranges
276
* @disk: target disk
277
* @iars: independent access ranges structure
278
*
279
* Set the independent access ranges information of the request queue
280
* of @disk to @iars. If @iars is NULL and the independent access ranges
281
* structure already set is cleared. If there are no differences between
282
* @iars and the independent access ranges structure already set, @iars
283
* is freed.
284
*/
285
void disk_set_independent_access_ranges(struct gendisk *disk,
286
struct blk_independent_access_ranges *iars)
287
{
288
struct request_queue *q = disk->queue;
289
290
mutex_lock(&q->sysfs_lock);
291
if (iars && !disk_check_ia_ranges(disk, iars)) {
292
kfree(iars);
293
iars = NULL;
294
}
295
if (iars && !disk_ia_ranges_changed(disk, iars)) {
296
kfree(iars);
297
goto unlock;
298
}
299
300
/*
301
* This may be called for a registered queue. E.g. during a device
302
* revalidation. If that is the case, we need to unregister the old
303
* set of independent access ranges and register the new set. If the
304
* queue is not registered, registration of the device request queue
305
* will register the independent access ranges.
306
*/
307
disk_unregister_independent_access_ranges(disk);
308
disk->ia_ranges = iars;
309
if (blk_queue_registered(q))
310
disk_register_independent_access_ranges(disk);
311
unlock:
312
mutex_unlock(&q->sysfs_lock);
313
}
314
EXPORT_SYMBOL_GPL(disk_set_independent_access_ranges);
315
316