Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/lib/libuutil/uu_list.c
48375 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License (the "License").
7
* You may not use this file except in compliance with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or https://opensource.org/licenses/CDDL-1.0.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
/*
23
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24
* Use is subject to license terms.
25
*/
26
27
28
29
#include "libuutil_common.h"
30
31
#include <stdlib.h>
32
#include <string.h>
33
#include <unistd.h>
34
#include <sys/time.h>
35
36
#define ELEM_TO_NODE(lp, e) \
37
((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
38
39
#define NODE_TO_ELEM(lp, n) \
40
((void *)((uintptr_t)(n) - (lp)->ul_offset))
41
42
/*
43
* uu_list_index_ts define a location for insertion. They are simply a
44
* pointer to the object after the insertion point. We store a mark
45
* in the low-bits of the index, to help prevent mistakes.
46
*
47
* When debugging, the index mark changes on every insert and delete, to
48
* catch stale references.
49
*/
50
#define INDEX_MAX (sizeof (uintptr_t) - 1)
51
#define INDEX_NEXT(m) (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
52
53
#define INDEX_TO_NODE(i) ((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
54
#define NODE_TO_INDEX(p, n) (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
55
#define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ul_index)
56
#define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
57
58
#define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
59
60
static uu_list_pool_t uu_null_lpool = { &uu_null_lpool, &uu_null_lpool };
61
static pthread_mutex_t uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER;
62
63
uu_list_pool_t *
64
uu_list_pool_create(const char *name, size_t objsize,
65
size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags)
66
{
67
uu_list_pool_t *pp, *next, *prev;
68
69
if (name == NULL ||
70
uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
71
nodeoffset + sizeof (uu_list_node_t) > objsize) {
72
uu_set_error(UU_ERROR_INVALID_ARGUMENT);
73
return (NULL);
74
}
75
76
if (flags & ~UU_LIST_POOL_DEBUG) {
77
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
78
return (NULL);
79
}
80
81
pp = uu_zalloc(sizeof (uu_list_pool_t));
82
if (pp == NULL) {
83
uu_set_error(UU_ERROR_NO_MEMORY);
84
return (NULL);
85
}
86
87
(void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name));
88
pp->ulp_nodeoffset = nodeoffset;
89
pp->ulp_objsize = objsize;
90
pp->ulp_cmp = compare_func;
91
if (flags & UU_LIST_POOL_DEBUG)
92
pp->ulp_debug = 1;
93
pp->ulp_last_index = 0;
94
95
(void) pthread_mutex_init(&pp->ulp_lock, NULL);
96
97
pp->ulp_null_list.ul_next = &pp->ulp_null_list;
98
pp->ulp_null_list.ul_prev = &pp->ulp_null_list;
99
100
(void) pthread_mutex_lock(&uu_lpool_list_lock);
101
pp->ulp_next = next = &uu_null_lpool;
102
pp->ulp_prev = prev = next->ulp_prev;
103
next->ulp_prev = pp;
104
prev->ulp_next = pp;
105
(void) pthread_mutex_unlock(&uu_lpool_list_lock);
106
107
return (pp);
108
}
109
110
void
111
uu_list_pool_destroy(uu_list_pool_t *pp)
112
{
113
if (pp->ulp_debug) {
114
if (pp->ulp_null_list.ul_next != &pp->ulp_null_list ||
115
pp->ulp_null_list.ul_prev != &pp->ulp_null_list) {
116
uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
117
"outstanding lists, or is corrupt.\n",
118
(int)sizeof (pp->ulp_name), pp->ulp_name,
119
(void *)pp);
120
}
121
}
122
(void) pthread_mutex_lock(&uu_lpool_list_lock);
123
pp->ulp_next->ulp_prev = pp->ulp_prev;
124
pp->ulp_prev->ulp_next = pp->ulp_next;
125
(void) pthread_mutex_unlock(&uu_lpool_list_lock);
126
pp->ulp_prev = NULL;
127
pp->ulp_next = NULL;
128
uu_free(pp);
129
}
130
131
void
132
uu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
133
{
134
uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
135
136
if (pp->ulp_debug) {
137
uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
138
if (offset + sizeof (*np) > pp->ulp_objsize) {
139
uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
140
"offset %ld doesn't fit in object (size %ld)\n",
141
base, (void *)np, (void *)pp, pp->ulp_name,
142
(long)offset, (long)pp->ulp_objsize);
143
}
144
if (offset != pp->ulp_nodeoffset) {
145
uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
146
"offset %ld doesn't match pool's offset (%ld)\n",
147
base, (void *)np, (void *)pp, pp->ulp_name,
148
(long)offset, (long)pp->ulp_objsize);
149
}
150
}
151
np->uln_next = POOL_TO_MARKER(pp);
152
np->uln_prev = NULL;
153
}
154
155
void
156
uu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
157
{
158
uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
159
160
if (pp->ulp_debug) {
161
if (np->uln_next == NULL &&
162
np->uln_prev == NULL) {
163
uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
164
"node already finied\n",
165
base, (void *)np_arg, (void *)pp, pp->ulp_name);
166
}
167
if (np->uln_next != POOL_TO_MARKER(pp) ||
168
np->uln_prev != NULL) {
169
uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
170
"node corrupt or on list\n",
171
base, (void *)np_arg, (void *)pp, pp->ulp_name);
172
}
173
}
174
np->uln_next = NULL;
175
np->uln_prev = NULL;
176
}
177
178
uu_list_t *
179
uu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags)
180
{
181
uu_list_t *lp, *next, *prev;
182
183
if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) {
184
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
185
return (NULL);
186
}
187
188
if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) {
189
if (pp->ulp_debug)
190
uu_panic("uu_list_create(%p, ...): requested "
191
"UU_LIST_SORTED, but pool has no comparison func\n",
192
(void *)pp);
193
uu_set_error(UU_ERROR_NOT_SUPPORTED);
194
return (NULL);
195
}
196
197
lp = uu_zalloc(sizeof (*lp));
198
if (lp == NULL) {
199
uu_set_error(UU_ERROR_NO_MEMORY);
200
return (NULL);
201
}
202
203
lp->ul_pool = pp;
204
lp->ul_parent = parent;
205
lp->ul_offset = pp->ulp_nodeoffset;
206
lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG);
207
lp->ul_sorted = (flags & UU_LIST_SORTED);
208
lp->ul_numnodes = 0;
209
lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index));
210
211
lp->ul_null_node.uln_next = &lp->ul_null_node;
212
lp->ul_null_node.uln_prev = &lp->ul_null_node;
213
214
lp->ul_null_walk.ulw_next = &lp->ul_null_walk;
215
lp->ul_null_walk.ulw_prev = &lp->ul_null_walk;
216
217
(void) pthread_mutex_lock(&pp->ulp_lock);
218
next = &pp->ulp_null_list;
219
prev = next->ul_prev;
220
lp->ul_next = next;
221
lp->ul_prev = prev;
222
next->ul_prev = lp;
223
prev->ul_next = lp;
224
(void) pthread_mutex_unlock(&pp->ulp_lock);
225
226
return (lp);
227
}
228
229
void
230
uu_list_destroy(uu_list_t *lp)
231
{
232
uu_list_pool_t *pp = lp->ul_pool;
233
234
if (lp->ul_debug) {
235
if (lp->ul_null_node.uln_next != &lp->ul_null_node ||
236
lp->ul_null_node.uln_prev != &lp->ul_null_node) {
237
uu_panic("uu_list_destroy(%p): list not empty\n",
238
(void *)lp);
239
}
240
if (lp->ul_numnodes != 0) {
241
uu_panic("uu_list_destroy(%p): numnodes is nonzero, "
242
"but list is empty\n", (void *)lp);
243
}
244
if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk ||
245
lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) {
246
uu_panic("uu_list_destroy(%p): outstanding walkers\n",
247
(void *)lp);
248
}
249
}
250
251
(void) pthread_mutex_lock(&pp->ulp_lock);
252
lp->ul_next->ul_prev = lp->ul_prev;
253
lp->ul_prev->ul_next = lp->ul_next;
254
(void) pthread_mutex_unlock(&pp->ulp_lock);
255
lp->ul_prev = NULL;
256
lp->ul_next = NULL;
257
lp->ul_pool = NULL;
258
uu_free(lp);
259
}
260
261
static void
262
list_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev,
263
uu_list_node_impl_t *next)
264
{
265
if (lp->ul_debug) {
266
if (next->uln_prev != prev || prev->uln_next != next)
267
uu_panic("insert(%p): internal error: %p and %p not "
268
"neighbors\n", (void *)lp, (void *)next,
269
(void *)prev);
270
271
if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) ||
272
np->uln_prev != NULL) {
273
uu_panic("insert(%p): elem %p node %p corrupt, "
274
"not initialized, or already in a list.\n",
275
(void *)lp, NODE_TO_ELEM(lp, np), (void *)np);
276
}
277
/*
278
* invalidate outstanding uu_list_index_ts.
279
*/
280
lp->ul_index = INDEX_NEXT(lp->ul_index);
281
}
282
np->uln_next = next;
283
np->uln_prev = prev;
284
next->uln_prev = np;
285
prev->uln_next = np;
286
287
lp->ul_numnodes++;
288
}
289
290
void
291
uu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx)
292
{
293
uu_list_node_impl_t *np;
294
295
np = INDEX_TO_NODE(idx);
296
if (np == NULL)
297
np = &lp->ul_null_node;
298
299
if (lp->ul_debug) {
300
if (!INDEX_VALID(lp, idx))
301
uu_panic("uu_list_insert(%p, %p, %p): %s\n",
302
(void *)lp, elem, (void *)idx,
303
INDEX_CHECK(idx)? "outdated index" :
304
"invalid index");
305
if (np->uln_prev == NULL)
306
uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
307
"index\n", (void *)lp, elem, (void *)idx);
308
}
309
310
list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
311
}
312
313
void *
314
uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out)
315
{
316
int sorted = lp->ul_sorted;
317
uu_compare_fn_t *func = lp->ul_pool->ulp_cmp;
318
uu_list_node_impl_t *np;
319
320
if (func == NULL) {
321
if (out != NULL)
322
*out = 0;
323
uu_set_error(UU_ERROR_NOT_SUPPORTED);
324
return (NULL);
325
}
326
for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node;
327
np = np->uln_next) {
328
void *ep = NODE_TO_ELEM(lp, np);
329
int cmp = func(ep, elem, private);
330
if (cmp == 0) {
331
if (out != NULL)
332
*out = NODE_TO_INDEX(lp, np);
333
return (ep);
334
}
335
if (sorted && cmp > 0) {
336
if (out != NULL)
337
*out = NODE_TO_INDEX(lp, np);
338
return (NULL);
339
}
340
}
341
if (out != NULL)
342
*out = NODE_TO_INDEX(lp, 0);
343
return (NULL);
344
}
345
346
void *
347
uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx)
348
{
349
uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
350
351
if (np == NULL)
352
np = &lp->ul_null_node;
353
354
if (lp->ul_debug) {
355
if (!INDEX_VALID(lp, idx))
356
uu_panic("uu_list_nearest_next(%p, %p): %s\n",
357
(void *)lp, (void *)idx,
358
INDEX_CHECK(idx)? "outdated index" :
359
"invalid index");
360
if (np->uln_prev == NULL)
361
uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
362
"index\n", (void *)lp, (void *)idx);
363
}
364
365
if (np == &lp->ul_null_node)
366
return (NULL);
367
else
368
return (NODE_TO_ELEM(lp, np));
369
}
370
371
void *
372
uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx)
373
{
374
uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
375
376
if (np == NULL)
377
np = &lp->ul_null_node;
378
379
if (lp->ul_debug) {
380
if (!INDEX_VALID(lp, idx))
381
uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
382
(void *)lp, (void *)idx, INDEX_CHECK(idx)?
383
"outdated index" : "invalid index");
384
if (np->uln_prev == NULL)
385
uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
386
"index\n", (void *)lp, (void *)idx);
387
}
388
389
if ((np = np->uln_prev) == &lp->ul_null_node)
390
return (NULL);
391
else
392
return (NODE_TO_ELEM(lp, np));
393
}
394
395
static void
396
list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags)
397
{
398
uu_list_walk_t *next, *prev;
399
400
int robust = (flags & UU_WALK_ROBUST);
401
int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
402
403
(void) memset(wp, 0, sizeof (*wp));
404
wp->ulw_list = lp;
405
wp->ulw_robust = robust;
406
wp->ulw_dir = direction;
407
if (direction > 0)
408
wp->ulw_next_result = lp->ul_null_node.uln_next;
409
else
410
wp->ulw_next_result = lp->ul_null_node.uln_prev;
411
412
if (lp->ul_debug || robust) {
413
/*
414
* Add this walker to the list's list of walkers so
415
* uu_list_remove() can advance us if somebody tries to
416
* remove ulw_next_result.
417
*/
418
wp->ulw_next = next = &lp->ul_null_walk;
419
wp->ulw_prev = prev = next->ulw_prev;
420
next->ulw_prev = wp;
421
prev->ulw_next = wp;
422
}
423
}
424
425
static uu_list_node_impl_t *
426
list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp)
427
{
428
uu_list_node_impl_t *np = wp->ulw_next_result;
429
uu_list_node_impl_t *next;
430
431
if (np == &lp->ul_null_node)
432
return (NULL);
433
434
next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev;
435
436
wp->ulw_next_result = next;
437
return (np);
438
}
439
440
static void
441
list_walk_fini(uu_list_walk_t *wp)
442
{
443
/* GLXXX debugging? */
444
if (wp->ulw_next != NULL) {
445
wp->ulw_next->ulw_prev = wp->ulw_prev;
446
wp->ulw_prev->ulw_next = wp->ulw_next;
447
wp->ulw_next = NULL;
448
wp->ulw_prev = NULL;
449
}
450
wp->ulw_list = NULL;
451
wp->ulw_next_result = NULL;
452
}
453
454
uu_list_walk_t *
455
uu_list_walk_start(uu_list_t *lp, uint32_t flags)
456
{
457
uu_list_walk_t *wp;
458
459
if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
460
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
461
return (NULL);
462
}
463
464
wp = uu_zalloc(sizeof (*wp));
465
if (wp == NULL) {
466
uu_set_error(UU_ERROR_NO_MEMORY);
467
return (NULL);
468
}
469
470
list_walk_init(wp, lp, flags);
471
return (wp);
472
}
473
474
void *
475
uu_list_walk_next(uu_list_walk_t *wp)
476
{
477
uu_list_t *lp = wp->ulw_list;
478
uu_list_node_impl_t *np = list_walk_advance(wp, lp);
479
480
if (np == NULL)
481
return (NULL);
482
483
return (NODE_TO_ELEM(lp, np));
484
}
485
486
void
487
uu_list_walk_end(uu_list_walk_t *wp)
488
{
489
list_walk_fini(wp);
490
uu_free(wp);
491
}
492
493
int
494
uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags)
495
{
496
uu_list_node_impl_t *np;
497
498
int status = UU_WALK_NEXT;
499
500
int robust = (flags & UU_WALK_ROBUST);
501
int reverse = (flags & UU_WALK_REVERSE);
502
503
if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
504
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
505
return (-1);
506
}
507
508
if (lp->ul_debug || robust) {
509
uu_list_walk_t *my_walk;
510
void *e;
511
512
my_walk = uu_zalloc(sizeof (*my_walk));
513
if (my_walk == NULL)
514
return (-1);
515
516
list_walk_init(my_walk, lp, flags);
517
while (status == UU_WALK_NEXT &&
518
(e = uu_list_walk_next(my_walk)) != NULL)
519
status = (*func)(e, private);
520
list_walk_fini(my_walk);
521
522
uu_free(my_walk);
523
} else {
524
if (!reverse) {
525
for (np = lp->ul_null_node.uln_next;
526
status == UU_WALK_NEXT && np != &lp->ul_null_node;
527
np = np->uln_next) {
528
status = (*func)(NODE_TO_ELEM(lp, np), private);
529
}
530
} else {
531
for (np = lp->ul_null_node.uln_prev;
532
status == UU_WALK_NEXT && np != &lp->ul_null_node;
533
np = np->uln_prev) {
534
status = (*func)(NODE_TO_ELEM(lp, np), private);
535
}
536
}
537
}
538
if (status >= 0)
539
return (0);
540
uu_set_error(UU_ERROR_CALLBACK_FAILED);
541
return (-1);
542
}
543
544
void
545
uu_list_remove(uu_list_t *lp, void *elem)
546
{
547
uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem);
548
uu_list_walk_t *wp;
549
550
if (lp->ul_debug) {
551
if (np->uln_prev == NULL)
552
uu_panic("uu_list_remove(%p, %p): elem not on list\n",
553
(void *)lp, elem);
554
/*
555
* invalidate outstanding uu_list_index_ts.
556
*/
557
lp->ul_index = INDEX_NEXT(lp->ul_index);
558
}
559
560
/*
561
* robust walkers must be advanced. In debug mode, non-robust
562
* walkers are also on the list. If there are any, it's an error.
563
*/
564
for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk;
565
wp = wp->ulw_next) {
566
if (wp->ulw_robust) {
567
if (np == wp->ulw_next_result)
568
(void) list_walk_advance(wp, lp);
569
} else if (wp->ulw_next_result != NULL) {
570
uu_panic("uu_list_remove(%p, %p): active non-robust "
571
"walker\n", (void *)lp, elem);
572
}
573
}
574
575
np->uln_next->uln_prev = np->uln_prev;
576
np->uln_prev->uln_next = np->uln_next;
577
578
lp->ul_numnodes--;
579
580
np->uln_next = POOL_TO_MARKER(lp->ul_pool);
581
np->uln_prev = NULL;
582
}
583
584
void *
585
uu_list_teardown(uu_list_t *lp, void **cookie)
586
{
587
void *ep;
588
589
/*
590
* XXX: disable list modification until list is empty
591
*/
592
if (lp->ul_debug && *cookie != NULL)
593
uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n",
594
(void *)lp, (void *)cookie);
595
596
ep = uu_list_first(lp);
597
if (ep)
598
uu_list_remove(lp, ep);
599
return (ep);
600
}
601
602
int
603
uu_list_insert_before(uu_list_t *lp, void *target, void *elem)
604
{
605
uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
606
607
if (target == NULL)
608
np = &lp->ul_null_node;
609
610
if (lp->ul_debug) {
611
if (np->uln_prev == NULL)
612
uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
613
"not currently on a list\n",
614
(void *)lp, target, elem, target);
615
}
616
if (lp->ul_sorted) {
617
if (lp->ul_debug)
618
uu_panic("uu_list_insert_before(%p, ...): list is "
619
"UU_LIST_SORTED\n", (void *)lp);
620
uu_set_error(UU_ERROR_NOT_SUPPORTED);
621
return (-1);
622
}
623
624
list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
625
return (0);
626
}
627
628
int
629
uu_list_insert_after(uu_list_t *lp, void *target, void *elem)
630
{
631
uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
632
633
if (target == NULL)
634
np = &lp->ul_null_node;
635
636
if (lp->ul_debug) {
637
if (np->uln_prev == NULL)
638
uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
639
"not currently on a list\n",
640
(void *)lp, target, elem, target);
641
}
642
if (lp->ul_sorted) {
643
if (lp->ul_debug)
644
uu_panic("uu_list_insert_after(%p, ...): list is "
645
"UU_LIST_SORTED\n", (void *)lp);
646
uu_set_error(UU_ERROR_NOT_SUPPORTED);
647
return (-1);
648
}
649
650
list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next);
651
return (0);
652
}
653
654
size_t
655
uu_list_numnodes(uu_list_t *lp)
656
{
657
return (lp->ul_numnodes);
658
}
659
660
void *
661
uu_list_first(uu_list_t *lp)
662
{
663
uu_list_node_impl_t *n = lp->ul_null_node.uln_next;
664
if (n == &lp->ul_null_node)
665
return (NULL);
666
return (NODE_TO_ELEM(lp, n));
667
}
668
669
void *
670
uu_list_last(uu_list_t *lp)
671
{
672
uu_list_node_impl_t *n = lp->ul_null_node.uln_prev;
673
if (n == &lp->ul_null_node)
674
return (NULL);
675
return (NODE_TO_ELEM(lp, n));
676
}
677
678
void *
679
uu_list_next(uu_list_t *lp, void *elem)
680
{
681
uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
682
683
n = n->uln_next;
684
if (n == &lp->ul_null_node)
685
return (NULL);
686
return (NODE_TO_ELEM(lp, n));
687
}
688
689
void *
690
uu_list_prev(uu_list_t *lp, void *elem)
691
{
692
uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
693
694
n = n->uln_prev;
695
if (n == &lp->ul_null_node)
696
return (NULL);
697
return (NODE_TO_ELEM(lp, n));
698
}
699
700
/*
701
* called from uu_lockup() and uu_release(), as part of our fork1()-safety.
702
*/
703
void
704
uu_list_lockup(void)
705
{
706
uu_list_pool_t *pp;
707
708
(void) pthread_mutex_lock(&uu_lpool_list_lock);
709
for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
710
pp = pp->ulp_next)
711
(void) pthread_mutex_lock(&pp->ulp_lock);
712
}
713
714
void
715
uu_list_release(void)
716
{
717
uu_list_pool_t *pp;
718
719
for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
720
pp = pp->ulp_next)
721
(void) pthread_mutex_unlock(&pp->ulp_lock);
722
(void) pthread_mutex_unlock(&uu_lpool_list_lock);
723
}
724
725