Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/kern/kern_hhook.c
39475 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2010,2013 Lawrence Stewart <[email protected]>
5
* Copyright (c) 2010 The FreeBSD Foundation
6
* All rights reserved.
7
*
8
* This software was developed by Lawrence Stewart while studying at the Centre
9
* for Advanced Internet Architectures, Swinburne University of Technology,
10
* made possible in part by grants from the FreeBSD Foundation and Cisco
11
* University Research Program Fund at Community Foundation Silicon Valley.
12
*
13
* Portions of this software were developed at the Centre for Advanced
14
* Internet Architectures, Swinburne University of Technology, Melbourne,
15
* Australia by Lawrence Stewart under sponsorship from the FreeBSD Foundation.
16
*
17
* Redistribution and use in source and binary forms, with or without
18
* modification, are permitted provided that the following conditions
19
* are met:
20
* 1. Redistributions of source code must retain the above copyright
21
* notice, this list of conditions and the following disclaimer.
22
* 2. Redistributions in binary form must reproduce the above copyright
23
* notice, this list of conditions and the following disclaimer in the
24
* documentation and/or other materials provided with the distribution.
25
*
26
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36
* SUCH DAMAGE.
37
*/
38
39
#include <sys/param.h>
40
#include <sys/kernel.h>
41
#include <sys/hhook.h>
42
#include <sys/khelp.h>
43
#include <sys/malloc.h>
44
#include <sys/module.h>
45
#include <sys/module_khelp.h>
46
#include <sys/osd.h>
47
#include <sys/queue.h>
48
#include <sys/refcount.h>
49
#include <sys/systm.h>
50
51
#include <net/vnet.h>
52
53
struct hhook {
54
hhook_func_t hhk_func;
55
struct helper *hhk_helper;
56
void *hhk_udata;
57
STAILQ_ENTRY(hhook) hhk_next;
58
};
59
60
static MALLOC_DEFINE(M_HHOOK, "hhook", "Helper hooks are linked off hhook_head lists");
61
62
LIST_HEAD(hhookheadhead, hhook_head);
63
struct hhookheadhead hhook_head_list;
64
VNET_DEFINE(struct hhookheadhead, hhook_vhead_list);
65
#define V_hhook_vhead_list VNET(hhook_vhead_list)
66
67
static struct mtx hhook_head_list_lock;
68
MTX_SYSINIT(hhookheadlistlock, &hhook_head_list_lock, "hhook_head list lock",
69
MTX_DEF);
70
71
/* Protected by hhook_head_list_lock. */
72
static uint32_t n_hhookheads;
73
74
/* Private function prototypes. */
75
static void hhook_head_destroy(struct hhook_head *hhh);
76
void khelp_new_hhook_registered(struct hhook_head *hhh, uint32_t flags);
77
78
#define HHHLIST_LOCK() mtx_lock(&hhook_head_list_lock)
79
#define HHHLIST_UNLOCK() mtx_unlock(&hhook_head_list_lock)
80
#define HHHLIST_LOCK_ASSERT() mtx_assert(&hhook_head_list_lock, MA_OWNED)
81
82
#define HHH_LOCK_INIT(hhh) rm_init(&(hhh)->hhh_lock, "hhook_head rm lock")
83
#define HHH_LOCK_DESTROY(hhh) rm_destroy(&(hhh)->hhh_lock)
84
#define HHH_WLOCK(hhh) rm_wlock(&(hhh)->hhh_lock)
85
#define HHH_WUNLOCK(hhh) rm_wunlock(&(hhh)->hhh_lock)
86
#define HHH_RLOCK(hhh, rmpt) rm_rlock(&(hhh)->hhh_lock, (rmpt))
87
#define HHH_RUNLOCK(hhh, rmpt) rm_runlock(&(hhh)->hhh_lock, (rmpt))
88
89
/*
90
* Run all helper hook functions for a given hook point.
91
*/
92
void
93
hhook_run_hooks(struct hhook_head *hhh, void *ctx_data, struct osd *hosd)
94
{
95
struct hhook *hhk;
96
void *hdata;
97
struct rm_priotracker rmpt;
98
99
KASSERT(hhh->hhh_refcount > 0, ("hhook_head %p refcount is 0", hhh));
100
101
HHH_RLOCK(hhh, &rmpt);
102
STAILQ_FOREACH(hhk, &hhh->hhh_hooks, hhk_next) {
103
if (hhk->hhk_helper != NULL &&
104
hhk->hhk_helper->h_flags & HELPER_NEEDS_OSD) {
105
hdata = osd_get(OSD_KHELP, hosd, hhk->hhk_helper->h_id);
106
if (hdata == NULL)
107
continue;
108
} else
109
hdata = NULL;
110
111
/*
112
* XXXLAS: We currently ignore the int returned by the hook,
113
* but will likely want to handle it in future to allow hhook to
114
* be used like pfil and effect changes at the hhook calling
115
* site e.g. we could define a new hook type of HHOOK_TYPE_PFIL
116
* and standardise what particular return values mean and set
117
* the context data to pass exactly the same information as pfil
118
* hooks currently receive, thus replicating pfil with hhook.
119
*/
120
hhk->hhk_func(hhh->hhh_type, hhh->hhh_id, hhk->hhk_udata,
121
ctx_data, hdata, hosd);
122
}
123
HHH_RUNLOCK(hhh, &rmpt);
124
}
125
126
/*
127
* Register a new helper hook function with a helper hook point.
128
*/
129
int
130
hhook_add_hook(struct hhook_head *hhh, const struct hookinfo *hki, uint32_t flags)
131
{
132
struct hhook *hhk, *tmp;
133
int error;
134
135
error = 0;
136
137
if (hhh == NULL)
138
return (ENOENT);
139
140
hhk = malloc(sizeof(struct hhook), M_HHOOK,
141
M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT));
142
143
if (hhk == NULL)
144
return (ENOMEM);
145
146
hhk->hhk_helper = hki->hook_helper;
147
hhk->hhk_func = hki->hook_func;
148
hhk->hhk_udata = hki->hook_udata;
149
150
HHH_WLOCK(hhh);
151
STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) {
152
if (tmp->hhk_func == hki->hook_func &&
153
tmp->hhk_udata == hki->hook_udata) {
154
/* The helper hook function is already registered. */
155
error = EEXIST;
156
break;
157
}
158
}
159
160
if (!error) {
161
STAILQ_INSERT_TAIL(&hhh->hhh_hooks, hhk, hhk_next);
162
hhh->hhh_nhooks++;
163
} else
164
free(hhk, M_HHOOK);
165
166
HHH_WUNLOCK(hhh);
167
168
return (error);
169
}
170
171
/*
172
* Register a helper hook function with a helper hook point (including all
173
* virtual instances of the hook point if it is virtualised).
174
*
175
* The logic is unfortunately far more complex than for
176
* hhook_remove_hook_lookup() because hhook_add_hook() can call malloc() with
177
* M_WAITOK and thus we cannot call hhook_add_hook() with the
178
* hhook_head_list_lock held.
179
*
180
* The logic assembles an array of hhook_head structs that correspond to the
181
* helper hook point being hooked and bumps the refcount on each (all done with
182
* the hhook_head_list_lock held). The hhook_head_list_lock is then dropped, and
183
* hhook_add_hook() is called and the refcount dropped for each hhook_head
184
* struct in the array.
185
*/
186
int
187
hhook_add_hook_lookup(const struct hookinfo *hki, uint32_t flags)
188
{
189
struct hhook_head **heads_to_hook, *hhh;
190
int error, i, n_heads_to_hook;
191
192
tryagain:
193
error = i = 0;
194
/*
195
* Accessing n_hhookheads without hhook_head_list_lock held opens up a
196
* race with hhook_head_register() which we are unlikely to lose, but
197
* nonetheless have to cope with - hence the complex goto logic.
198
*/
199
n_heads_to_hook = n_hhookheads;
200
heads_to_hook = malloc(n_heads_to_hook * sizeof(struct hhook_head *),
201
M_HHOOK, flags & HHOOK_WAITOK ? M_WAITOK : M_NOWAIT);
202
if (heads_to_hook == NULL)
203
return (ENOMEM);
204
205
HHHLIST_LOCK();
206
LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
207
if (hhh->hhh_type == hki->hook_type &&
208
hhh->hhh_id == hki->hook_id) {
209
if (i < n_heads_to_hook) {
210
heads_to_hook[i] = hhh;
211
refcount_acquire(&heads_to_hook[i]->hhh_refcount);
212
i++;
213
} else {
214
/*
215
* We raced with hhook_head_register() which
216
* inserted a hhook_head that we need to hook
217
* but did not malloc space for. Abort this run
218
* and try again.
219
*/
220
for (i--; i >= 0; i--)
221
refcount_release(&heads_to_hook[i]->hhh_refcount);
222
free(heads_to_hook, M_HHOOK);
223
HHHLIST_UNLOCK();
224
goto tryagain;
225
}
226
}
227
}
228
HHHLIST_UNLOCK();
229
230
for (i--; i >= 0; i--) {
231
if (!error)
232
error = hhook_add_hook(heads_to_hook[i], hki, flags);
233
refcount_release(&heads_to_hook[i]->hhh_refcount);
234
}
235
236
free(heads_to_hook, M_HHOOK);
237
238
return (error);
239
}
240
241
/*
242
* Remove a helper hook function from a helper hook point.
243
*/
244
int
245
hhook_remove_hook(struct hhook_head *hhh, const struct hookinfo *hki)
246
{
247
struct hhook *tmp;
248
249
if (hhh == NULL)
250
return (ENOENT);
251
252
HHH_WLOCK(hhh);
253
STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) {
254
if (tmp->hhk_func == hki->hook_func &&
255
tmp->hhk_udata == hki->hook_udata) {
256
STAILQ_REMOVE(&hhh->hhh_hooks, tmp, hhook, hhk_next);
257
free(tmp, M_HHOOK);
258
hhh->hhh_nhooks--;
259
break;
260
}
261
}
262
HHH_WUNLOCK(hhh);
263
264
return (0);
265
}
266
267
/*
268
* Remove a helper hook function from a helper hook point (including all
269
* virtual instances of the hook point if it is virtualised).
270
*/
271
int
272
hhook_remove_hook_lookup(const struct hookinfo *hki)
273
{
274
struct hhook_head *hhh;
275
276
HHHLIST_LOCK();
277
LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
278
if (hhh->hhh_type == hki->hook_type &&
279
hhh->hhh_id == hki->hook_id)
280
hhook_remove_hook(hhh, hki);
281
}
282
HHHLIST_UNLOCK();
283
284
return (0);
285
}
286
287
/*
288
* Register a new helper hook point.
289
*/
290
int
291
hhook_head_register(int32_t hhook_type, int32_t hhook_id, struct hhook_head **hhh,
292
uint32_t flags)
293
{
294
struct hhook_head *tmphhh;
295
296
tmphhh = hhook_head_get(hhook_type, hhook_id);
297
298
if (tmphhh != NULL) {
299
/* Hook point previously registered. */
300
hhook_head_release(tmphhh);
301
return (EEXIST);
302
}
303
304
tmphhh = malloc(sizeof(struct hhook_head), M_HHOOK,
305
M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT));
306
307
if (tmphhh == NULL)
308
return (ENOMEM);
309
310
tmphhh->hhh_type = hhook_type;
311
tmphhh->hhh_id = hhook_id;
312
tmphhh->hhh_nhooks = 0;
313
STAILQ_INIT(&tmphhh->hhh_hooks);
314
HHH_LOCK_INIT(tmphhh);
315
refcount_init(&tmphhh->hhh_refcount, 1);
316
317
HHHLIST_LOCK();
318
if (flags & HHOOK_HEADISINVNET) {
319
tmphhh->hhh_flags |= HHH_ISINVNET;
320
#ifdef VIMAGE
321
KASSERT(curvnet != NULL, ("curvnet is NULL"));
322
tmphhh->hhh_vid = (uintptr_t)curvnet;
323
LIST_INSERT_HEAD(&V_hhook_vhead_list, tmphhh, hhh_vnext);
324
#endif
325
}
326
LIST_INSERT_HEAD(&hhook_head_list, tmphhh, hhh_next);
327
n_hhookheads++;
328
HHHLIST_UNLOCK();
329
330
khelp_new_hhook_registered(tmphhh, flags);
331
332
if (hhh != NULL)
333
*hhh = tmphhh;
334
else
335
refcount_release(&tmphhh->hhh_refcount);
336
337
return (0);
338
}
339
340
static void
341
hhook_head_destroy(struct hhook_head *hhh)
342
{
343
struct hhook *tmp, *tmp2;
344
345
HHHLIST_LOCK_ASSERT();
346
KASSERT(n_hhookheads > 0, ("n_hhookheads should be > 0"));
347
348
LIST_REMOVE(hhh, hhh_next);
349
#ifdef VIMAGE
350
if (hhook_head_is_virtualised(hhh) == HHOOK_HEADISINVNET)
351
LIST_REMOVE(hhh, hhh_vnext);
352
#endif
353
HHH_WLOCK(hhh);
354
STAILQ_FOREACH_SAFE(tmp, &hhh->hhh_hooks, hhk_next, tmp2)
355
free(tmp, M_HHOOK);
356
HHH_WUNLOCK(hhh);
357
HHH_LOCK_DESTROY(hhh);
358
free(hhh, M_HHOOK);
359
n_hhookheads--;
360
}
361
362
/*
363
* Remove a helper hook point.
364
*/
365
int
366
hhook_head_deregister(struct hhook_head *hhh)
367
{
368
int error;
369
370
error = 0;
371
372
HHHLIST_LOCK();
373
if (hhh == NULL)
374
error = ENOENT;
375
else if (hhh->hhh_refcount > 1)
376
error = EBUSY;
377
else
378
hhook_head_destroy(hhh);
379
HHHLIST_UNLOCK();
380
381
return (error);
382
}
383
384
/*
385
* Remove a helper hook point via a hhook_head lookup.
386
*/
387
int
388
hhook_head_deregister_lookup(int32_t hhook_type, int32_t hhook_id)
389
{
390
struct hhook_head *hhh;
391
int error;
392
393
hhh = hhook_head_get(hhook_type, hhook_id);
394
error = hhook_head_deregister(hhh);
395
396
if (error == EBUSY)
397
hhook_head_release(hhh);
398
399
return (error);
400
}
401
402
/*
403
* Lookup and return the hhook_head struct associated with the specified type
404
* and id, or NULL if not found. If found, the hhook_head's refcount is bumped.
405
*/
406
struct hhook_head *
407
hhook_head_get(int32_t hhook_type, int32_t hhook_id)
408
{
409
struct hhook_head *hhh;
410
411
HHHLIST_LOCK();
412
LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
413
if (hhh->hhh_type == hhook_type && hhh->hhh_id == hhook_id) {
414
#ifdef VIMAGE
415
if (hhook_head_is_virtualised(hhh) ==
416
HHOOK_HEADISINVNET) {
417
KASSERT(curvnet != NULL, ("curvnet is NULL"));
418
if (hhh->hhh_vid != (uintptr_t)curvnet)
419
continue;
420
}
421
#endif
422
refcount_acquire(&hhh->hhh_refcount);
423
break;
424
}
425
}
426
HHHLIST_UNLOCK();
427
428
return (hhh);
429
}
430
431
void
432
hhook_head_release(struct hhook_head *hhh)
433
{
434
435
refcount_release(&hhh->hhh_refcount);
436
}
437
438
/*
439
* Check the hhook_head private flags and return the appropriate public
440
* representation of the flag to the caller. The function is implemented in a
441
* way that allows us to cope with other subsystems becoming virtualised in the
442
* future.
443
*/
444
uint32_t
445
hhook_head_is_virtualised(struct hhook_head *hhh)
446
{
447
uint32_t ret;
448
449
ret = 0;
450
451
if (hhh != NULL) {
452
if (hhh->hhh_flags & HHH_ISINVNET)
453
ret = HHOOK_HEADISINVNET;
454
}
455
456
return (ret);
457
}
458
459
uint32_t
460
hhook_head_is_virtualised_lookup(int32_t hook_type, int32_t hook_id)
461
{
462
struct hhook_head *hhh;
463
uint32_t ret;
464
465
hhh = hhook_head_get(hook_type, hook_id);
466
467
if (hhh == NULL)
468
return (0);
469
470
ret = hhook_head_is_virtualised(hhh);
471
hhook_head_release(hhh);
472
473
return (ret);
474
}
475
476
/*
477
* Vnet created and being initialised.
478
*/
479
static void
480
hhook_vnet_init(const void *unused __unused)
481
{
482
483
LIST_INIT(&V_hhook_vhead_list);
484
}
485
486
/*
487
* Vnet being torn down and destroyed.
488
*/
489
static void
490
hhook_vnet_uninit(const void *unused __unused)
491
{
492
struct hhook_head *hhh, *tmphhh;
493
494
/*
495
* If subsystems which export helper hook points use the hhook KPI
496
* correctly, the loop below should have no work to do because the
497
* subsystem should have already called hhook_head_deregister().
498
*/
499
HHHLIST_LOCK();
500
LIST_FOREACH_SAFE(hhh, &V_hhook_vhead_list, hhh_vnext, tmphhh) {
501
printf("%s: hhook_head type=%d, id=%d cleanup required\n",
502
__func__, hhh->hhh_type, hhh->hhh_id);
503
hhook_head_destroy(hhh);
504
}
505
HHHLIST_UNLOCK();
506
}
507
508
/*
509
* When a vnet is created and being initialised, init the V_hhook_vhead_list.
510
*/
511
VNET_SYSINIT(hhook_vnet_init, SI_SUB_INIT_IF, SI_ORDER_FIRST,
512
hhook_vnet_init, NULL);
513
514
/*
515
* The hhook KPI provides a mechanism for subsystems which export helper hook
516
* points to clean up on vnet tear down, but in case the KPI is misused,
517
* provide a function to clean up and free memory for a vnet being destroyed.
518
*/
519
VNET_SYSUNINIT(hhook_vnet_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST,
520
hhook_vnet_uninit, NULL);
521
522