Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/compat/linuxkpi/common/src/linux_xarray.c
39586 views
1
/*-
2
* Copyright (c) 2020 Mellanox Technologies, Ltd.
3
* All rights reserved.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
7
* are met:
8
* 1. Redistributions of source code must retain the above copyright
9
* notice unmodified, this list of conditions, and the following
10
* disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
*/
26
27
#include <sys/cdefs.h>
28
#include <linux/xarray.h>
29
30
#include <vm/vm_pageout.h>
31
32
/*
33
* Linux' XArray allows to store a NULL pointer as a value. xa_load() would
34
* return NULL for both an unused index and an index set to NULL. But it
35
* impacts xa_alloc() which needs to find the next available index.
36
*
37
* However, our implementation relies on a radix tree (see `linux_radix.c`)
38
* which does not accept NULL pointers as values. I'm not sure this is a
39
* limitation or a feature, so to work around this, a NULL value is replaced by
40
* `NULL_VALUE`, an unlikely address, when we pass it to linux_radix.
41
*/
42
#define NULL_VALUE (void *)0x1
43
44
/*
45
* This function removes the element at the given index and returns
46
* the pointer to the removed element, if any.
47
*/
48
void *
49
__xa_erase(struct xarray *xa, uint32_t index)
50
{
51
void *retval;
52
53
XA_ASSERT_LOCKED(xa);
54
55
retval = radix_tree_delete(&xa->xa_head, index);
56
if (retval == NULL_VALUE)
57
retval = NULL;
58
59
return (retval);
60
}
61
62
void *
63
xa_erase(struct xarray *xa, uint32_t index)
64
{
65
void *retval;
66
67
xa_lock(xa);
68
retval = __xa_erase(xa, index);
69
xa_unlock(xa);
70
71
return (retval);
72
}
73
74
/*
75
* This function returns the element pointer at the given index. A
76
* value of NULL is returned if the element does not exist.
77
*/
78
void *
79
xa_load(struct xarray *xa, uint32_t index)
80
{
81
void *retval;
82
83
xa_lock(xa);
84
retval = radix_tree_lookup(&xa->xa_head, index);
85
xa_unlock(xa);
86
87
if (retval == NULL_VALUE)
88
retval = NULL;
89
90
return (retval);
91
}
92
93
/*
94
* This is an internal function used to sleep until more memory
95
* becomes available.
96
*/
97
static void
98
xa_vm_wait_locked(struct xarray *xa)
99
{
100
xa_unlock(xa);
101
vm_wait(NULL);
102
xa_lock(xa);
103
}
104
105
/*
106
* This function iterates the xarray until it finds a free slot where
107
* it can insert the element pointer to by "ptr". It starts at the
108
* index pointed to by "pindex" and updates this value at return. The
109
* "mask" argument defines the maximum index allowed, inclusivly, and
110
* must be a power of two minus one value. The "gfp" argument
111
* basically tells if we can wait for more memory to become available
112
* or not. This function returns zero upon success or a negative error
113
* code on failure. A typical error code is -ENOMEM which means either
114
* the xarray is full, or there was not enough internal memory
115
* available to complete the radix tree insertion.
116
*/
117
int
118
__xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp)
119
{
120
int retval;
121
122
XA_ASSERT_LOCKED(xa);
123
124
/* mask should allow to allocate at least one item */
125
MPASS(mask > ((xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0));
126
127
/* mask can be any power of two value minus one */
128
MPASS((mask & (mask + 1)) == 0);
129
130
*pindex = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
131
if (ptr == NULL)
132
ptr = NULL_VALUE;
133
retry:
134
retval = radix_tree_insert(&xa->xa_head, *pindex, ptr);
135
136
switch (retval) {
137
case -EEXIST:
138
if (likely(*pindex != mask)) {
139
(*pindex)++;
140
goto retry;
141
}
142
retval = -ENOMEM;
143
break;
144
case -ENOMEM:
145
if (likely(gfp & M_WAITOK)) {
146
xa_vm_wait_locked(xa);
147
goto retry;
148
}
149
break;
150
default:
151
break;
152
}
153
return (retval);
154
}
155
156
int
157
xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp)
158
{
159
int retval;
160
161
if (ptr == NULL)
162
ptr = NULL_VALUE;
163
164
xa_lock(xa);
165
retval = __xa_alloc(xa, pindex, ptr, mask, gfp);
166
xa_unlock(xa);
167
168
return (retval);
169
}
170
171
/*
172
* This function works the same like the "xa_alloc" function, except
173
* it wraps the next index value to zero when there are no entries
174
* left at the end of the xarray searching for a free slot from the
175
* beginning of the array. If the xarray is full -ENOMEM is returned.
176
*/
177
int
178
__xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask,
179
uint32_t *pnext_index, gfp_t gfp)
180
{
181
int retval;
182
int timeout = 1;
183
184
XA_ASSERT_LOCKED(xa);
185
186
/* mask should allow to allocate at least one item */
187
MPASS(mask > ((xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0));
188
189
/* mask can be any power of two value minus one */
190
MPASS((mask & (mask + 1)) == 0);
191
192
*pnext_index = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
193
if (ptr == NULL)
194
ptr = NULL_VALUE;
195
retry:
196
retval = radix_tree_insert(&xa->xa_head, *pnext_index, ptr);
197
198
switch (retval) {
199
case -EEXIST:
200
if (unlikely(*pnext_index == mask) && !timeout--) {
201
retval = -ENOMEM;
202
break;
203
}
204
(*pnext_index)++;
205
(*pnext_index) &= mask;
206
if (*pnext_index == 0 && (xa->xa_flags & XA_FLAGS_ALLOC1) != 0)
207
(*pnext_index)++;
208
goto retry;
209
case -ENOMEM:
210
if (likely(gfp & M_WAITOK)) {
211
xa_vm_wait_locked(xa);
212
goto retry;
213
}
214
break;
215
default:
216
break;
217
}
218
*pindex = *pnext_index;
219
220
return (retval);
221
}
222
223
int
224
xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask,
225
uint32_t *pnext_index, gfp_t gfp)
226
{
227
int retval;
228
229
xa_lock(xa);
230
retval = __xa_alloc_cyclic(xa, pindex, ptr, mask, pnext_index, gfp);
231
xa_unlock(xa);
232
233
return (retval);
234
}
235
236
int
237
xa_alloc_cyclic_irq(struct xarray *xa, uint32_t *pindex, void *ptr,
238
uint32_t mask, uint32_t *pnext_index, gfp_t gfp)
239
{
240
int retval;
241
242
xa_lock_irq(xa);
243
retval = __xa_alloc_cyclic(xa, pindex, ptr, mask, pnext_index, gfp);
244
xa_unlock_irq(xa);
245
246
return (retval);
247
}
248
249
/*
250
* This function tries to insert an element at the given index. The
251
* "gfp" argument basically decides of this function can sleep or not
252
* trying to allocate internal memory for its radix tree. The
253
* function returns an error code upon failure. Typical error codes
254
* are element exists (-EEXIST) or out of memory (-ENOMEM).
255
*/
256
int
257
__xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
258
{
259
int retval;
260
261
XA_ASSERT_LOCKED(xa);
262
if (ptr == NULL)
263
ptr = NULL_VALUE;
264
retry:
265
retval = radix_tree_insert(&xa->xa_head, index, ptr);
266
267
switch (retval) {
268
case -ENOMEM:
269
if (likely(gfp & M_WAITOK)) {
270
xa_vm_wait_locked(xa);
271
goto retry;
272
}
273
break;
274
default:
275
break;
276
}
277
return (retval);
278
}
279
280
int
281
xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
282
{
283
int retval;
284
285
xa_lock(xa);
286
retval = __xa_insert(xa, index, ptr, gfp);
287
xa_unlock(xa);
288
289
return (retval);
290
}
291
292
/*
293
* This function updates the element at the given index and returns a
294
* pointer to the old element. The "gfp" argument basically decides of
295
* this function can sleep or not trying to allocate internal memory
296
* for its radix tree. The function returns an XA_ERROR() pointer code
297
* upon failure. Code using this function must always check if the
298
* return value is an XA_ERROR() code before using the returned value.
299
*/
300
void *
301
__xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
302
{
303
int retval;
304
305
XA_ASSERT_LOCKED(xa);
306
if (ptr == NULL)
307
ptr = NULL_VALUE;
308
retry:
309
retval = radix_tree_store(&xa->xa_head, index, &ptr);
310
311
switch (retval) {
312
case 0:
313
if (ptr == NULL_VALUE)
314
ptr = NULL;
315
break;
316
case -ENOMEM:
317
if (likely(gfp & M_WAITOK)) {
318
xa_vm_wait_locked(xa);
319
goto retry;
320
}
321
ptr = XA_ERROR(retval);
322
break;
323
default:
324
ptr = XA_ERROR(retval);
325
break;
326
}
327
return (ptr);
328
}
329
330
void *
331
xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
332
{
333
void *retval;
334
335
xa_lock(xa);
336
retval = __xa_store(xa, index, ptr, gfp);
337
xa_unlock(xa);
338
339
return (retval);
340
}
341
342
/*
343
* This function initialize an xarray structure.
344
*/
345
void
346
xa_init_flags(struct xarray *xa, uint32_t flags)
347
{
348
memset(xa, 0, sizeof(*xa));
349
350
mtx_init(&xa->xa_lock, "lkpi-xarray", NULL, MTX_DEF | MTX_RECURSE);
351
xa->xa_head.gfp_mask = GFP_NOWAIT;
352
xa->xa_flags = flags;
353
}
354
355
/*
356
* This function destroys an xarray structure and all its internal
357
* memory and locks.
358
*/
359
void
360
xa_destroy(struct xarray *xa)
361
{
362
struct radix_tree_iter iter;
363
void **ppslot;
364
365
xa_lock(xa);
366
radix_tree_for_each_slot(ppslot, &xa->xa_head, &iter, 0)
367
radix_tree_iter_delete(&xa->xa_head, &iter, ppslot);
368
xa_unlock(xa);
369
370
/*
371
* The mutex initialized in `xa_init_flags()` is not destroyed here on
372
* purpose. The reason is that on Linux, the xarray remains usable
373
* after a call to `xa_destroy()`. For instance the i915 DRM driver
374
* relies on that during the initialixation of its GuC. Basically,
375
* `xa_destroy()` "resets" the structure to zero but doesn't really
376
* destroy it.
377
*/
378
}
379
380
/*
381
* This function checks if an xarray is empty or not.
382
* It returns true if empty, else false.
383
*/
384
bool
385
__xa_empty(struct xarray *xa)
386
{
387
struct radix_tree_iter iter = {};
388
void **temp;
389
390
XA_ASSERT_LOCKED(xa);
391
392
return (!radix_tree_iter_find(&xa->xa_head, &iter, &temp));
393
}
394
395
bool
396
xa_empty(struct xarray *xa)
397
{
398
bool retval;
399
400
xa_lock(xa);
401
retval = __xa_empty(xa);
402
xa_unlock(xa);
403
404
return (retval);
405
}
406
407
/*
408
* This function returns the next valid xarray entry based on the
409
* index given by "pindex". The valued pointed to by "pindex" is
410
* updated before return.
411
*/
412
void *
413
__xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
414
{
415
struct radix_tree_iter iter = { .index = *pindex };
416
void **ppslot;
417
void *retval;
418
bool found;
419
420
XA_ASSERT_LOCKED(xa);
421
422
if (not_first) {
423
/* advance to next index, if any */
424
iter.index++;
425
if (iter.index == 0)
426
return (NULL);
427
}
428
429
found = radix_tree_iter_find(&xa->xa_head, &iter, &ppslot);
430
if (likely(found)) {
431
retval = *ppslot;
432
if (retval == NULL_VALUE)
433
retval = NULL;
434
*pindex = iter.index;
435
} else {
436
retval = NULL;
437
}
438
return (retval);
439
}
440
441
void *
442
xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
443
{
444
void *retval;
445
446
xa_lock(xa);
447
retval = __xa_next(xa, pindex, not_first);
448
xa_unlock(xa);
449
450
return (retval);
451
}
452
453