Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/compat/linuxkpi/common/src/linux_slab.c
39586 views
1
/*-
2
* Copyright (c) 2017 Mellanox Technologies, Ltd.
3
* All rights reserved.
4
* Copyright (c) 2024-2025 The FreeBSD Foundation
5
*
6
* Portions of this software were developed by Björn Zeeb
7
* under sponsorship from the FreeBSD Foundation.
8
*
9
* Redistribution and use in source and binary forms, with or without
10
* modification, are permitted provided that the following conditions
11
* are met:
12
* 1. Redistributions of source code must retain the above copyright
13
* notice unmodified, this list of conditions, and the following
14
* disclaimer.
15
* 2. Redistributions in binary form must reproduce the above copyright
16
* notice, this list of conditions and the following disclaimer in the
17
* documentation and/or other materials provided with the distribution.
18
*
19
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
*/
30
31
#include <sys/cdefs.h>
32
#include <linux/compat.h>
33
#include <linux/slab.h>
34
#include <linux/rcupdate.h>
35
#include <linux/kernel.h>
36
#include <linux/irq_work.h>
37
#include <linux/llist.h>
38
39
#include <sys/param.h>
40
#include <sys/taskqueue.h>
41
#include <vm/uma.h>
42
43
struct linux_kmem_rcu {
44
struct rcu_head rcu_head;
45
struct linux_kmem_cache *cache;
46
};
47
48
struct linux_kmem_cache {
49
uma_zone_t cache_zone;
50
linux_kmem_ctor_t *cache_ctor;
51
unsigned cache_flags;
52
unsigned cache_size;
53
struct llist_head cache_items;
54
struct task cache_task;
55
};
56
57
#define LINUX_KMEM_TO_RCU(c, m) \
58
((struct linux_kmem_rcu *)((char *)(m) + \
59
(c)->cache_size - sizeof(struct linux_kmem_rcu)))
60
61
#define LINUX_RCU_TO_KMEM(r) \
62
((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
63
(r)->cache->cache_size))
64
65
static LLIST_HEAD(linux_kfree_async_list);
66
67
static void lkpi_kmem_cache_free_async_fn(void *, int);
68
69
void *
70
lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
71
{
72
return (uma_zalloc_arg(c->cache_zone, c,
73
linux_check_m_flags(flags)));
74
}
75
76
void *
77
lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
78
{
79
return (uma_zalloc_arg(c->cache_zone, c,
80
linux_check_m_flags(flags | M_ZERO)));
81
}
82
83
static int
84
linux_kmem_ctor(void *mem, int size, void *arg, int flags)
85
{
86
struct linux_kmem_cache *c = arg;
87
88
if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
89
struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem);
90
91
/* duplicate cache pointer */
92
rcu->cache = c;
93
}
94
95
/* check for constructor */
96
if (likely(c->cache_ctor != NULL))
97
c->cache_ctor(mem);
98
99
return (0);
100
}
101
102
static void
103
linux_kmem_cache_free_rcu_callback(struct rcu_head *head)
104
{
105
struct linux_kmem_rcu *rcu =
106
container_of(head, struct linux_kmem_rcu, rcu_head);
107
108
uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu));
109
}
110
111
struct linux_kmem_cache *
112
linux_kmem_cache_create(const char *name, size_t size, size_t align,
113
unsigned flags, linux_kmem_ctor_t *ctor)
114
{
115
struct linux_kmem_cache *c;
116
117
c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
118
119
if (flags & SLAB_HWCACHE_ALIGN)
120
align = UMA_ALIGN_CACHE;
121
else if (align != 0)
122
align--;
123
124
if (flags & SLAB_TYPESAFE_BY_RCU) {
125
/* make room for RCU structure */
126
size = ALIGN(size, sizeof(void *));
127
size += sizeof(struct linux_kmem_rcu);
128
129
/* create cache_zone */
130
c->cache_zone = uma_zcreate(name, size,
131
linux_kmem_ctor, NULL, NULL, NULL,
132
align, UMA_ZONE_ZINIT);
133
} else {
134
/* make room for async task list items */
135
size = MAX(size, sizeof(struct llist_node));
136
137
/* create cache_zone */
138
c->cache_zone = uma_zcreate(name, size,
139
ctor ? linux_kmem_ctor : NULL, NULL,
140
NULL, NULL, align, 0);
141
}
142
143
c->cache_flags = flags;
144
c->cache_ctor = ctor;
145
c->cache_size = size;
146
init_llist_head(&c->cache_items);
147
TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c);
148
return (c);
149
}
150
151
static inline void
152
lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
153
{
154
struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
155
156
call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
157
}
158
159
static inline void
160
lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m)
161
{
162
uma_zfree(c->cache_zone, m);
163
}
164
165
static void
166
lkpi_kmem_cache_free_async_fn(void *context, int pending)
167
{
168
struct linux_kmem_cache *c = context;
169
struct llist_node *freed, *next;
170
171
llist_for_each_safe(freed, next, llist_del_all(&c->cache_items))
172
lkpi_kmem_cache_free_sync(c, freed);
173
}
174
175
static inline void
176
lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m)
177
{
178
if (m == NULL)
179
return;
180
181
llist_add(m, &c->cache_items);
182
taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
183
}
184
185
void
186
lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m)
187
{
188
if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
189
lkpi_kmem_cache_free_rcu(c, m);
190
else if (unlikely(curthread->td_critnest != 0))
191
lkpi_kmem_cache_free_async(c, m);
192
else
193
lkpi_kmem_cache_free_sync(c, m);
194
}
195
196
void
197
linux_kmem_cache_destroy(struct linux_kmem_cache *c)
198
{
199
if (c == NULL)
200
return;
201
202
if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
203
/* make sure all free callbacks have been called */
204
rcu_barrier();
205
}
206
207
if (!llist_empty(&c->cache_items))
208
taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
209
taskqueue_drain(linux_irq_work_tq, &c->cache_task);
210
uma_zdestroy(c->cache_zone);
211
free(c, M_KMALLOC);
212
}
213
214
void *
215
lkpi___kmalloc_node(size_t size, gfp_t flags, int node)
216
{
217
if (size <= PAGE_SIZE)
218
return (malloc_domainset(size, M_KMALLOC,
219
linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
220
else
221
return (contigmalloc_domainset(size, M_KMALLOC,
222
linux_get_vm_domain_set(node), linux_check_m_flags(flags),
223
0, -1UL, PAGE_SIZE, 0));
224
}
225
226
void *
227
lkpi___kmalloc(size_t size, gfp_t flags)
228
{
229
size_t _s;
230
231
/* sizeof(struct llist_node) is used for kfree_async(). */
232
_s = MAX(size, sizeof(struct llist_node));
233
234
if (_s <= PAGE_SIZE)
235
return (malloc(_s, M_KMALLOC, linux_check_m_flags(flags)));
236
else
237
return (contigmalloc(_s, M_KMALLOC, linux_check_m_flags(flags),
238
0, -1UL, PAGE_SIZE, 0));
239
}
240
241
void *
242
lkpi_krealloc(void *ptr, size_t size, gfp_t flags)
243
{
244
void *nptr;
245
size_t osize;
246
247
/*
248
* First handle invariants based on function arguments.
249
*/
250
if (ptr == NULL)
251
return (kmalloc(size, flags));
252
253
osize = ksize(ptr);
254
if (size <= osize)
255
return (ptr);
256
257
/*
258
* We know the new size > original size. realloc(9) does not (and cannot)
259
* know about our requirements for physically contiguous memory, so we can
260
* only call it for sizes up to and including PAGE_SIZE, and otherwise have
261
* to replicate its functionality using kmalloc to get the contigmalloc(9)
262
* backing.
263
*/
264
if (size <= PAGE_SIZE)
265
return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags)));
266
267
nptr = kmalloc(size, flags);
268
if (nptr == NULL)
269
return (NULL);
270
271
memcpy(nptr, ptr, osize);
272
kfree(ptr);
273
return (nptr);
274
}
275
276
struct lkpi_kmalloc_ctx {
277
size_t size;
278
gfp_t flags;
279
void *addr;
280
};
281
282
static void
283
lkpi_kmalloc_cb(void *ctx)
284
{
285
struct lkpi_kmalloc_ctx *lmc = ctx;
286
287
lmc->addr = __kmalloc(lmc->size, lmc->flags);
288
}
289
290
void *
291
lkpi_kmalloc(size_t size, gfp_t flags)
292
{
293
struct lkpi_kmalloc_ctx lmc = { .size = size, .flags = flags };
294
295
lkpi_fpu_safe_exec(&lkpi_kmalloc_cb, &lmc);
296
return(lmc.addr);
297
}
298
299
static void
300
lkpi_kvmalloc_cb(void *ctx)
301
{
302
struct lkpi_kmalloc_ctx *lmc = ctx;
303
304
lmc->addr = malloc(lmc->size, M_KMALLOC, linux_check_m_flags(lmc->flags));
305
}
306
307
void *
308
lkpi_kvmalloc(size_t size, gfp_t flags)
309
{
310
struct lkpi_kmalloc_ctx lmc = { .size = size, .flags = flags };
311
312
lkpi_fpu_safe_exec(&lkpi_kvmalloc_cb, &lmc);
313
return(lmc.addr);
314
}
315
316
static void
317
linux_kfree_async_fn(void *context, int pending)
318
{
319
struct llist_node *freed;
320
321
while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
322
kfree(freed);
323
}
324
static struct task linux_kfree_async_task =
325
TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
326
327
static void
328
linux_kfree_async(void *addr)
329
{
330
if (addr == NULL)
331
return;
332
llist_add(addr, &linux_kfree_async_list);
333
taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
334
}
335
336
void
337
lkpi_kfree(const void *ptr)
338
{
339
if (ZERO_OR_NULL_PTR(ptr))
340
return;
341
342
if (curthread->td_critnest != 0)
343
linux_kfree_async(__DECONST(void *, ptr));
344
else
345
free(__DECONST(void *, ptr), M_KMALLOC);
346
}
347
348
349