Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/lib/libuutil/uu_avl.c
48375 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License (the "License").
7
* You may not use this file except in compliance with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or https://opensource.org/licenses/CDDL-1.0.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
/*
23
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24
* Use is subject to license terms.
25
*/
26
27
28
29
#include "libuutil_common.h"
30
31
#include <stdlib.h>
32
#include <string.h>
33
#include <unistd.h>
34
#include <sys/avl.h>
35
36
static uu_avl_pool_t uu_null_apool = { &uu_null_apool, &uu_null_apool };
37
static pthread_mutex_t uu_apool_list_lock = PTHREAD_MUTEX_INITIALIZER;
38
39
/*
40
* The index mark change on every insert and delete, to catch stale
41
* references.
42
*
43
* We leave the low bit alone, since the avl code uses it.
44
*/
45
#define INDEX_MAX (sizeof (uintptr_t) - 2)
46
#define INDEX_NEXT(m) (((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX)
47
48
#define INDEX_DECODE(i) ((i) & ~INDEX_MAX)
49
#define INDEX_ENCODE(p, n) (((n) & ~INDEX_MAX) | (p)->ua_index)
50
#define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ua_index)
51
#define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
52
53
/*
54
* When an element is inactive (not in a tree), we keep a marked pointer to
55
* its containing pool in its first word, and a NULL pointer in its second.
56
*
57
* On insert, we use these to verify that it comes from the correct pool.
58
*/
59
#define NODE_ARRAY(p, n) ((uintptr_t *)((uintptr_t)(n) + \
60
(pp)->uap_nodeoffset))
61
62
#define POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1))
63
64
#define DEAD_MARKER 0xc4
65
66
uu_avl_pool_t *
67
uu_avl_pool_create(const char *name, size_t objsize, size_t nodeoffset,
68
uu_compare_fn_t *compare_func, uint32_t flags)
69
{
70
uu_avl_pool_t *pp, *next, *prev;
71
72
if (name == NULL ||
73
uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
74
nodeoffset + sizeof (uu_avl_node_t) > objsize ||
75
compare_func == NULL) {
76
uu_set_error(UU_ERROR_INVALID_ARGUMENT);
77
return (NULL);
78
}
79
80
if (flags & ~UU_AVL_POOL_DEBUG) {
81
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
82
return (NULL);
83
}
84
85
pp = uu_zalloc(sizeof (uu_avl_pool_t));
86
if (pp == NULL) {
87
uu_set_error(UU_ERROR_NO_MEMORY);
88
return (NULL);
89
}
90
91
(void) strlcpy(pp->uap_name, name, sizeof (pp->uap_name));
92
pp->uap_nodeoffset = nodeoffset;
93
pp->uap_objsize = objsize;
94
pp->uap_cmp = compare_func;
95
if (flags & UU_AVL_POOL_DEBUG)
96
pp->uap_debug = 1;
97
pp->uap_last_index = 0;
98
99
(void) pthread_mutex_init(&pp->uap_lock, NULL);
100
101
pp->uap_null_avl.ua_next = &pp->uap_null_avl;
102
pp->uap_null_avl.ua_prev = &pp->uap_null_avl;
103
104
(void) pthread_mutex_lock(&uu_apool_list_lock);
105
pp->uap_next = next = &uu_null_apool;
106
pp->uap_prev = prev = next->uap_prev;
107
next->uap_prev = pp;
108
prev->uap_next = pp;
109
(void) pthread_mutex_unlock(&uu_apool_list_lock);
110
111
return (pp);
112
}
113
114
void
115
uu_avl_pool_destroy(uu_avl_pool_t *pp)
116
{
117
if (pp->uap_debug) {
118
if (pp->uap_null_avl.ua_next != &pp->uap_null_avl ||
119
pp->uap_null_avl.ua_prev != &pp->uap_null_avl) {
120
uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has "
121
"outstanding avls, or is corrupt.\n",
122
(int)sizeof (pp->uap_name), pp->uap_name,
123
(void *)pp);
124
}
125
}
126
(void) pthread_mutex_lock(&uu_apool_list_lock);
127
pp->uap_next->uap_prev = pp->uap_prev;
128
pp->uap_prev->uap_next = pp->uap_next;
129
(void) pthread_mutex_unlock(&uu_apool_list_lock);
130
(void) pthread_mutex_destroy(&pp->uap_lock);
131
pp->uap_prev = NULL;
132
pp->uap_next = NULL;
133
uu_free(pp);
134
}
135
136
void
137
uu_avl_node_init(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
138
{
139
uintptr_t *na = (uintptr_t *)np;
140
141
if (pp->uap_debug) {
142
uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
143
if (offset + sizeof (*np) > pp->uap_objsize) {
144
uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
145
"offset %ld doesn't fit in object (size %ld)\n",
146
base, (void *)np, (void *)pp, pp->uap_name,
147
(long)offset, (long)pp->uap_objsize);
148
}
149
if (offset != pp->uap_nodeoffset) {
150
uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
151
"offset %ld doesn't match pool's offset (%ld)\n",
152
base, (void *)np, (void *)pp, pp->uap_name,
153
(long)offset, (long)pp->uap_objsize);
154
}
155
}
156
157
na[0] = POOL_TO_MARKER(pp);
158
na[1] = 0;
159
}
160
161
void
162
uu_avl_node_fini(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
163
{
164
uintptr_t *na = (uintptr_t *)np;
165
166
if (pp->uap_debug) {
167
if (na[0] == DEAD_MARKER && na[1] == DEAD_MARKER) {
168
uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
169
"node already finied\n",
170
base, (void *)np, (void *)pp, pp->uap_name);
171
}
172
if (na[0] != POOL_TO_MARKER(pp) || na[1] != 0) {
173
uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
174
"node corrupt, in tree, or in different pool\n",
175
base, (void *)np, (void *)pp, pp->uap_name);
176
}
177
}
178
179
na[0] = DEAD_MARKER;
180
na[1] = DEAD_MARKER;
181
na[2] = DEAD_MARKER;
182
}
183
184
struct uu_avl_node_compare_info {
185
uu_compare_fn_t *ac_compare;
186
void *ac_private;
187
void *ac_right;
188
void *ac_found;
189
};
190
191
static int
192
uu_avl_node_compare(const void *l, const void *r)
193
{
194
struct uu_avl_node_compare_info *info =
195
(struct uu_avl_node_compare_info *)l;
196
197
int res = info->ac_compare(r, info->ac_right, info->ac_private);
198
199
if (res == 0) {
200
if (info->ac_found == NULL)
201
info->ac_found = (void *)r;
202
return (-1);
203
}
204
if (res < 0)
205
return (1);
206
return (-1);
207
}
208
209
uu_avl_t *
210
uu_avl_create(uu_avl_pool_t *pp, void *parent, uint32_t flags)
211
{
212
uu_avl_t *ap, *next, *prev;
213
214
if (flags & ~UU_AVL_DEBUG) {
215
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
216
return (NULL);
217
}
218
219
ap = uu_zalloc(sizeof (*ap));
220
if (ap == NULL) {
221
uu_set_error(UU_ERROR_NO_MEMORY);
222
return (NULL);
223
}
224
225
ap->ua_pool = pp;
226
ap->ua_parent = parent;
227
ap->ua_debug = pp->uap_debug || (flags & UU_AVL_DEBUG);
228
ap->ua_index = (pp->uap_last_index = INDEX_NEXT(pp->uap_last_index));
229
230
avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize,
231
pp->uap_nodeoffset);
232
233
ap->ua_null_walk.uaw_next = &ap->ua_null_walk;
234
ap->ua_null_walk.uaw_prev = &ap->ua_null_walk;
235
236
(void) pthread_mutex_lock(&pp->uap_lock);
237
next = &pp->uap_null_avl;
238
prev = next->ua_prev;
239
ap->ua_next = next;
240
ap->ua_prev = prev;
241
next->ua_prev = ap;
242
prev->ua_next = ap;
243
(void) pthread_mutex_unlock(&pp->uap_lock);
244
245
return (ap);
246
}
247
248
void
249
uu_avl_destroy(uu_avl_t *ap)
250
{
251
uu_avl_pool_t *pp = ap->ua_pool;
252
253
if (ap->ua_debug) {
254
if (avl_numnodes(&ap->ua_tree) != 0) {
255
uu_panic("uu_avl_destroy(%p): tree not empty\n",
256
(void *)ap);
257
}
258
if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk ||
259
ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) {
260
uu_panic("uu_avl_destroy(%p): outstanding walkers\n",
261
(void *)ap);
262
}
263
}
264
(void) pthread_mutex_lock(&pp->uap_lock);
265
ap->ua_next->ua_prev = ap->ua_prev;
266
ap->ua_prev->ua_next = ap->ua_next;
267
(void) pthread_mutex_unlock(&pp->uap_lock);
268
ap->ua_prev = NULL;
269
ap->ua_next = NULL;
270
271
ap->ua_pool = NULL;
272
avl_destroy(&ap->ua_tree);
273
274
uu_free(ap);
275
}
276
277
size_t
278
uu_avl_numnodes(uu_avl_t *ap)
279
{
280
return (avl_numnodes(&ap->ua_tree));
281
}
282
283
void *
284
uu_avl_first(uu_avl_t *ap)
285
{
286
return (avl_first(&ap->ua_tree));
287
}
288
289
void *
290
uu_avl_last(uu_avl_t *ap)
291
{
292
return (avl_last(&ap->ua_tree));
293
}
294
295
void *
296
uu_avl_next(uu_avl_t *ap, void *node)
297
{
298
return (AVL_NEXT(&ap->ua_tree, node));
299
}
300
301
void *
302
uu_avl_prev(uu_avl_t *ap, void *node)
303
{
304
return (AVL_PREV(&ap->ua_tree, node));
305
}
306
307
static void
308
_avl_walk_init(uu_avl_walk_t *wp, uu_avl_t *ap, uint32_t flags)
309
{
310
uu_avl_walk_t *next, *prev;
311
312
int robust = (flags & UU_WALK_ROBUST);
313
int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
314
315
(void) memset(wp, 0, sizeof (*wp));
316
wp->uaw_avl = ap;
317
wp->uaw_robust = robust;
318
wp->uaw_dir = direction;
319
320
if (direction > 0)
321
wp->uaw_next_result = avl_first(&ap->ua_tree);
322
else
323
wp->uaw_next_result = avl_last(&ap->ua_tree);
324
325
if (ap->ua_debug || robust) {
326
wp->uaw_next = next = &ap->ua_null_walk;
327
wp->uaw_prev = prev = next->uaw_prev;
328
next->uaw_prev = wp;
329
prev->uaw_next = wp;
330
}
331
}
332
333
static void *
334
_avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap)
335
{
336
void *np = wp->uaw_next_result;
337
338
avl_tree_t *t = &ap->ua_tree;
339
340
if (np == NULL)
341
return (NULL);
342
343
wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) :
344
AVL_PREV(t, np);
345
346
return (np);
347
}
348
349
static void
350
_avl_walk_fini(uu_avl_walk_t *wp)
351
{
352
if (wp->uaw_next != NULL) {
353
wp->uaw_next->uaw_prev = wp->uaw_prev;
354
wp->uaw_prev->uaw_next = wp->uaw_next;
355
wp->uaw_next = NULL;
356
wp->uaw_prev = NULL;
357
}
358
wp->uaw_avl = NULL;
359
wp->uaw_next_result = NULL;
360
}
361
362
uu_avl_walk_t *
363
uu_avl_walk_start(uu_avl_t *ap, uint32_t flags)
364
{
365
uu_avl_walk_t *wp;
366
367
if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
368
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
369
return (NULL);
370
}
371
372
wp = uu_zalloc(sizeof (*wp));
373
if (wp == NULL) {
374
uu_set_error(UU_ERROR_NO_MEMORY);
375
return (NULL);
376
}
377
378
_avl_walk_init(wp, ap, flags);
379
return (wp);
380
}
381
382
void *
383
uu_avl_walk_next(uu_avl_walk_t *wp)
384
{
385
return (_avl_walk_advance(wp, wp->uaw_avl));
386
}
387
388
void
389
uu_avl_walk_end(uu_avl_walk_t *wp)
390
{
391
_avl_walk_fini(wp);
392
uu_free(wp);
393
}
394
395
int
396
uu_avl_walk(uu_avl_t *ap, uu_walk_fn_t *func, void *private, uint32_t flags)
397
{
398
void *e;
399
uu_avl_walk_t my_walk;
400
401
int status = UU_WALK_NEXT;
402
403
if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
404
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
405
return (-1);
406
}
407
408
_avl_walk_init(&my_walk, ap, flags);
409
while (status == UU_WALK_NEXT &&
410
(e = _avl_walk_advance(&my_walk, ap)) != NULL)
411
status = (*func)(e, private);
412
_avl_walk_fini(&my_walk);
413
414
if (status >= 0)
415
return (0);
416
uu_set_error(UU_ERROR_CALLBACK_FAILED);
417
return (-1);
418
}
419
420
void
421
uu_avl_remove(uu_avl_t *ap, void *elem)
422
{
423
uu_avl_walk_t *wp;
424
uu_avl_pool_t *pp = ap->ua_pool;
425
uintptr_t *na = NODE_ARRAY(pp, elem);
426
427
if (ap->ua_debug) {
428
/*
429
* invalidate outstanding uu_avl_index_ts.
430
*/
431
ap->ua_index = INDEX_NEXT(ap->ua_index);
432
}
433
434
/*
435
* Robust walkers most be advanced, if we are removing the node
436
* they are currently using. In debug mode, non-robust walkers
437
* are also on the walker list.
438
*/
439
for (wp = ap->ua_null_walk.uaw_next; wp != &ap->ua_null_walk;
440
wp = wp->uaw_next) {
441
if (wp->uaw_robust) {
442
if (elem == wp->uaw_next_result)
443
(void) _avl_walk_advance(wp, ap);
444
} else if (wp->uaw_next_result != NULL) {
445
uu_panic("uu_avl_remove(%p, %p): active non-robust "
446
"walker\n", (void *)ap, elem);
447
}
448
}
449
450
avl_remove(&ap->ua_tree, elem);
451
452
na[0] = POOL_TO_MARKER(pp);
453
na[1] = 0;
454
}
455
456
void *
457
uu_avl_teardown(uu_avl_t *ap, void **cookie)
458
{
459
void *elem = avl_destroy_nodes(&ap->ua_tree, cookie);
460
461
if (elem != NULL) {
462
uu_avl_pool_t *pp = ap->ua_pool;
463
uintptr_t *na = NODE_ARRAY(pp, elem);
464
465
na[0] = POOL_TO_MARKER(pp);
466
na[1] = 0;
467
}
468
return (elem);
469
}
470
471
void *
472
uu_avl_find(uu_avl_t *ap, void *elem, void *private, uu_avl_index_t *out)
473
{
474
struct uu_avl_node_compare_info info;
475
void *result;
476
477
info.ac_compare = ap->ua_pool->uap_cmp;
478
info.ac_private = private;
479
info.ac_right = elem;
480
info.ac_found = NULL;
481
482
result = avl_find(&ap->ua_tree, &info, out);
483
if (out != NULL)
484
*out = INDEX_ENCODE(ap, *out);
485
486
if (ap->ua_debug && result != NULL)
487
uu_panic("uu_avl_find: internal error: avl_find succeeded\n");
488
489
return (info.ac_found);
490
}
491
492
void
493
uu_avl_insert(uu_avl_t *ap, void *elem, uu_avl_index_t idx)
494
{
495
if (ap->ua_debug) {
496
uu_avl_pool_t *pp = ap->ua_pool;
497
uintptr_t *na = NODE_ARRAY(pp, elem);
498
499
if (na[1] != 0)
500
uu_panic("uu_avl_insert(%p, %p, %p): node already "
501
"in tree, or corrupt\n",
502
(void *)ap, elem, (void *)idx);
503
if (na[0] == 0)
504
uu_panic("uu_avl_insert(%p, %p, %p): node not "
505
"initialized\n",
506
(void *)ap, elem, (void *)idx);
507
if (na[0] != POOL_TO_MARKER(pp))
508
uu_panic("uu_avl_insert(%p, %p, %p): node from "
509
"other pool, or corrupt\n",
510
(void *)ap, elem, (void *)idx);
511
512
if (!INDEX_VALID(ap, idx))
513
uu_panic("uu_avl_insert(%p, %p, %p): %s\n",
514
(void *)ap, elem, (void *)idx,
515
INDEX_CHECK(idx)? "outdated index" :
516
"invalid index");
517
518
/*
519
* invalidate outstanding uu_avl_index_ts.
520
*/
521
ap->ua_index = INDEX_NEXT(ap->ua_index);
522
}
523
avl_insert(&ap->ua_tree, elem, INDEX_DECODE(idx));
524
}
525
526
void *
527
uu_avl_nearest_next(uu_avl_t *ap, uu_avl_index_t idx)
528
{
529
if (ap->ua_debug && !INDEX_VALID(ap, idx))
530
uu_panic("uu_avl_nearest_next(%p, %p): %s\n",
531
(void *)ap, (void *)idx, INDEX_CHECK(idx)?
532
"outdated index" : "invalid index");
533
return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_AFTER));
534
}
535
536
void *
537
uu_avl_nearest_prev(uu_avl_t *ap, uu_avl_index_t idx)
538
{
539
if (ap->ua_debug && !INDEX_VALID(ap, idx))
540
uu_panic("uu_avl_nearest_prev(%p, %p): %s\n",
541
(void *)ap, (void *)idx, INDEX_CHECK(idx)?
542
"outdated index" : "invalid index");
543
return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_BEFORE));
544
}
545
546
/*
547
* called from uu_lockup() and uu_release(), as part of our fork1()-safety.
548
*/
549
void
550
uu_avl_lockup(void)
551
{
552
uu_avl_pool_t *pp;
553
554
(void) pthread_mutex_lock(&uu_apool_list_lock);
555
for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
556
pp = pp->uap_next)
557
(void) pthread_mutex_lock(&pp->uap_lock);
558
}
559
560
void
561
uu_avl_release(void)
562
{
563
uu_avl_pool_t *pp;
564
565
for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
566
pp = pp->uap_next)
567
(void) pthread_mutex_unlock(&pp->uap_lock);
568
(void) pthread_mutex_unlock(&uu_apool_list_lock);
569
}
570
571