Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/base/regmap/regcache-maple.c
26444 views
1
// SPDX-License-Identifier: GPL-2.0
2
//
3
// Register cache access API - maple tree based cache
4
//
5
// Copyright 2023 Arm, Ltd
6
//
7
// Author: Mark Brown <[email protected]>
8
9
#include <linux/debugfs.h>
10
#include <linux/device.h>
11
#include <linux/maple_tree.h>
12
#include <linux/slab.h>
13
14
#include "internal.h"
15
16
static int regcache_maple_read(struct regmap *map,
17
unsigned int reg, unsigned int *value)
18
{
19
struct maple_tree *mt = map->cache;
20
MA_STATE(mas, mt, reg, reg);
21
unsigned long *entry;
22
23
rcu_read_lock();
24
25
entry = mas_walk(&mas);
26
if (!entry) {
27
rcu_read_unlock();
28
return -ENOENT;
29
}
30
31
*value = entry[reg - mas.index];
32
33
rcu_read_unlock();
34
35
return 0;
36
}
37
38
static int regcache_maple_write(struct regmap *map, unsigned int reg,
39
unsigned int val)
40
{
41
struct maple_tree *mt = map->cache;
42
MA_STATE(mas, mt, reg, reg);
43
unsigned long *entry, *upper, *lower;
44
unsigned long index, last;
45
size_t lower_sz, upper_sz;
46
int ret;
47
48
rcu_read_lock();
49
50
entry = mas_walk(&mas);
51
if (entry) {
52
entry[reg - mas.index] = val;
53
rcu_read_unlock();
54
return 0;
55
}
56
57
/* Any adjacent entries to extend/merge? */
58
mas_set_range(&mas, reg - 1, reg + 1);
59
index = reg;
60
last = reg;
61
62
lower = mas_find(&mas, reg - 1);
63
if (lower) {
64
index = mas.index;
65
lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66
}
67
68
upper = mas_find(&mas, reg + 1);
69
if (upper) {
70
last = mas.last;
71
upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72
}
73
74
rcu_read_unlock();
75
76
entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
77
if (!entry)
78
return -ENOMEM;
79
80
if (lower)
81
memcpy(entry, lower, lower_sz);
82
entry[reg - index] = val;
83
if (upper)
84
memcpy(&entry[reg - index + 1], upper, upper_sz);
85
86
/*
87
* This is safe because the regmap lock means the Maple lock
88
* is redundant, but we need to take it due to lockdep asserts
89
* in the maple tree code.
90
*/
91
mas_lock(&mas);
92
93
mas_set_range(&mas, index, last);
94
ret = mas_store_gfp(&mas, entry, map->alloc_flags);
95
96
mas_unlock(&mas);
97
98
if (ret == 0) {
99
kfree(lower);
100
kfree(upper);
101
}
102
103
return ret;
104
}
105
106
static int regcache_maple_drop(struct regmap *map, unsigned int min,
107
unsigned int max)
108
{
109
struct maple_tree *mt = map->cache;
110
MA_STATE(mas, mt, min, max);
111
unsigned long *entry, *lower, *upper;
112
/* initialized to work around false-positive -Wuninitialized warning */
113
unsigned long lower_index = 0, lower_last = 0;
114
unsigned long upper_index, upper_last;
115
int ret = 0;
116
117
lower = NULL;
118
upper = NULL;
119
120
mas_lock(&mas);
121
122
mas_for_each(&mas, entry, max) {
123
/*
124
* This is safe because the regmap lock means the
125
* Maple lock is redundant, but we need to take it due
126
* to lockdep asserts in the maple tree code.
127
*/
128
mas_unlock(&mas);
129
130
/* Do we need to save any of this entry? */
131
if (mas.index < min) {
132
lower_index = mas.index;
133
lower_last = min -1;
134
135
lower = kmemdup_array(entry,
136
min - mas.index, sizeof(*lower),
137
map->alloc_flags);
138
if (!lower) {
139
ret = -ENOMEM;
140
goto out_unlocked;
141
}
142
}
143
144
if (mas.last > max) {
145
upper_index = max + 1;
146
upper_last = mas.last;
147
148
upper = kmemdup_array(&entry[max - mas.index + 1],
149
mas.last - max, sizeof(*upper),
150
map->alloc_flags);
151
if (!upper) {
152
ret = -ENOMEM;
153
goto out_unlocked;
154
}
155
}
156
157
kfree(entry);
158
mas_lock(&mas);
159
mas_erase(&mas);
160
161
/* Insert new nodes with the saved data */
162
if (lower) {
163
mas_set_range(&mas, lower_index, lower_last);
164
ret = mas_store_gfp(&mas, lower, map->alloc_flags);
165
if (ret != 0)
166
goto out;
167
lower = NULL;
168
}
169
170
if (upper) {
171
mas_set_range(&mas, upper_index, upper_last);
172
ret = mas_store_gfp(&mas, upper, map->alloc_flags);
173
if (ret != 0)
174
goto out;
175
upper = NULL;
176
}
177
}
178
179
out:
180
mas_unlock(&mas);
181
out_unlocked:
182
kfree(lower);
183
kfree(upper);
184
185
return ret;
186
}
187
188
static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
189
struct ma_state *mas,
190
unsigned int min, unsigned int max)
191
{
192
void *buf;
193
unsigned long r;
194
size_t val_bytes = map->format.val_bytes;
195
int ret = 0;
196
197
mas_pause(mas);
198
rcu_read_unlock();
199
200
/*
201
* Use a raw write if writing more than one register to a
202
* device that supports raw writes to reduce transaction
203
* overheads.
204
*/
205
if (max - min > 1 && regmap_can_raw_write(map)) {
206
buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
207
if (!buf) {
208
ret = -ENOMEM;
209
goto out;
210
}
211
212
/* Render the data for a raw write */
213
for (r = min; r < max; r++) {
214
regcache_set_val(map, buf, r - min,
215
entry[r - mas->index]);
216
}
217
218
ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
219
false);
220
221
kfree(buf);
222
} else {
223
for (r = min; r < max; r++) {
224
ret = _regmap_write(map, r,
225
entry[r - mas->index]);
226
if (ret != 0)
227
goto out;
228
}
229
}
230
231
out:
232
rcu_read_lock();
233
234
return ret;
235
}
236
237
static int regcache_maple_sync(struct regmap *map, unsigned int min,
238
unsigned int max)
239
{
240
struct maple_tree *mt = map->cache;
241
unsigned long *entry;
242
MA_STATE(mas, mt, min, max);
243
unsigned long lmin = min;
244
unsigned long lmax = max;
245
unsigned int r, v, sync_start;
246
int ret = 0;
247
bool sync_needed = false;
248
249
map->cache_bypass = true;
250
251
rcu_read_lock();
252
253
mas_for_each(&mas, entry, max) {
254
for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
255
v = entry[r - mas.index];
256
257
if (regcache_reg_needs_sync(map, r, v)) {
258
if (!sync_needed) {
259
sync_start = r;
260
sync_needed = true;
261
}
262
continue;
263
}
264
265
if (!sync_needed)
266
continue;
267
268
ret = regcache_maple_sync_block(map, entry, &mas,
269
sync_start, r);
270
if (ret != 0)
271
goto out;
272
sync_needed = false;
273
}
274
275
if (sync_needed) {
276
ret = regcache_maple_sync_block(map, entry, &mas,
277
sync_start, r);
278
if (ret != 0)
279
goto out;
280
sync_needed = false;
281
}
282
}
283
284
out:
285
rcu_read_unlock();
286
287
map->cache_bypass = false;
288
289
return ret;
290
}
291
292
static int regcache_maple_exit(struct regmap *map)
293
{
294
struct maple_tree *mt = map->cache;
295
MA_STATE(mas, mt, 0, UINT_MAX);
296
unsigned int *entry;
297
298
/* if we've already been called then just return */
299
if (!mt)
300
return 0;
301
302
mas_lock(&mas);
303
mas_for_each(&mas, entry, UINT_MAX)
304
kfree(entry);
305
__mt_destroy(mt);
306
mas_unlock(&mas);
307
308
kfree(mt);
309
map->cache = NULL;
310
311
return 0;
312
}
313
314
static int regcache_maple_insert_block(struct regmap *map, int first,
315
int last)
316
{
317
struct maple_tree *mt = map->cache;
318
MA_STATE(mas, mt, first, last);
319
unsigned long *entry;
320
int i, ret;
321
322
entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
323
if (!entry)
324
return -ENOMEM;
325
326
for (i = 0; i < last - first + 1; i++)
327
entry[i] = map->reg_defaults[first + i].def;
328
329
mas_lock(&mas);
330
331
mas_set_range(&mas, map->reg_defaults[first].reg,
332
map->reg_defaults[last].reg);
333
ret = mas_store_gfp(&mas, entry, map->alloc_flags);
334
335
mas_unlock(&mas);
336
337
if (ret)
338
kfree(entry);
339
340
return ret;
341
}
342
343
static int regcache_maple_init(struct regmap *map)
344
{
345
struct maple_tree *mt;
346
int i;
347
int ret;
348
int range_start;
349
350
mt = kmalloc(sizeof(*mt), map->alloc_flags);
351
if (!mt)
352
return -ENOMEM;
353
map->cache = mt;
354
355
mt_init(mt);
356
357
if (!mt_external_lock(mt) && map->lock_key)
358
lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
359
360
if (!map->num_reg_defaults)
361
return 0;
362
363
range_start = 0;
364
365
/* Scan for ranges of contiguous registers */
366
for (i = 1; i < map->num_reg_defaults; i++) {
367
if (map->reg_defaults[i].reg !=
368
map->reg_defaults[i - 1].reg + 1) {
369
ret = regcache_maple_insert_block(map, range_start,
370
i - 1);
371
if (ret != 0)
372
goto err;
373
374
range_start = i;
375
}
376
}
377
378
/* Add the last block */
379
ret = regcache_maple_insert_block(map, range_start,
380
map->num_reg_defaults - 1);
381
if (ret != 0)
382
goto err;
383
384
return 0;
385
386
err:
387
regcache_maple_exit(map);
388
return ret;
389
}
390
391
struct regcache_ops regcache_maple_ops = {
392
.type = REGCACHE_MAPLE,
393
.name = "maple",
394
.init = regcache_maple_init,
395
.exit = regcache_maple_exit,
396
.read = regcache_maple_read,
397
.write = regcache_maple_write,
398
.drop = regcache_maple_drop,
399
.sync = regcache_maple_sync,
400
};
401
402