Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
48775 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
4
* Copyright (C) 2007 The Regents of the University of California.
5
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
6
* Written by Brian Behlendorf <[email protected]>.
7
* UCRL-CODE-235197
8
*
9
* This file is part of the SPL, Solaris Porting Layer.
10
*
11
* The SPL is free software; you can redistribute it and/or modify it
12
* under the terms of the GNU General Public License as published by the
13
* Free Software Foundation; either version 2 of the License, or (at your
14
* option) any later version.
15
*
16
* The SPL is distributed in the hope that it will be useful, but WITHOUT
17
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19
* for more details.
20
*
21
* You should have received a copy of the GNU General Public License along
22
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
23
*/
24
25
#define SPL_KMEM_CACHE_IMPLEMENTING
26
27
#include <sys/kmem.h>
28
#include <sys/kmem_cache.h>
29
#include <sys/taskq.h>
30
#include <sys/timer.h>
31
#include <sys/vmem.h>
32
#include <sys/wait.h>
33
#include <sys/string.h>
34
#include <linux/slab.h>
35
#include <linux/swap.h>
36
#include <linux/prefetch.h>
37
38
/*
39
* Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
40
* with smp_mb__{before,after}_atomic() because they were redundant. This is
41
* only used inside our SLAB allocator, so we implement an internal wrapper
42
* here to give us smp_mb__{before,after}_atomic() on older kernels.
43
*/
44
#ifndef smp_mb__before_atomic
45
#define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
46
#endif
47
48
#ifndef smp_mb__after_atomic
49
#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
50
#endif
51
52
/*
53
* Cache magazines are an optimization designed to minimize the cost of
54
* allocating memory. They do this by keeping a per-cpu cache of recently
55
* freed objects, which can then be reallocated without taking a lock. This
56
* can improve performance on highly contended caches. However, because
57
* objects in magazines will prevent otherwise empty slabs from being
58
* immediately released this may not be ideal for low memory machines.
59
*
60
* For this reason spl_kmem_cache_magazine_size can be used to set a maximum
61
* magazine size. When this value is set to 0 the magazine size will be
62
* automatically determined based on the object size. Otherwise magazines
63
* will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
64
* may never be entirely disabled in this implementation.
65
*/
66
static unsigned int spl_kmem_cache_magazine_size = 0;
67
module_param(spl_kmem_cache_magazine_size, uint, 0444);
68
MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
69
"Default magazine size (2-256), set automatically (0)");
70
71
static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
72
module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
73
MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
74
75
static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
76
module_param(spl_kmem_cache_max_size, uint, 0644);
77
MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
78
79
/*
80
* For small objects the Linux slab allocator should be used to make the most
81
* efficient use of the memory. However, large objects are not supported by
82
* the Linux slab and therefore the SPL implementation is preferred. A cutoff
83
* of 16K was determined to be optimal for architectures using 4K pages and
84
* to also work well on architecutres using larger 64K page sizes.
85
*/
86
static unsigned int spl_kmem_cache_slab_limit =
87
SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE;
88
module_param(spl_kmem_cache_slab_limit, uint, 0644);
89
MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
90
"Objects less than N bytes use the Linux slab");
91
92
/*
93
* The number of threads available to allocate new slabs for caches. This
94
* should not need to be tuned but it is available for performance analysis.
95
*/
96
static unsigned int spl_kmem_cache_kmem_threads = 4;
97
module_param(spl_kmem_cache_kmem_threads, uint, 0444);
98
MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
99
"Number of spl_kmem_cache threads");
100
101
/*
102
* Slab allocation interfaces
103
*
104
* While the Linux slab implementation was inspired by the Solaris
105
* implementation I cannot use it to emulate the Solaris APIs. I
106
* require two features which are not provided by the Linux slab.
107
*
108
* 1) Constructors AND destructors. Recent versions of the Linux
109
* kernel have removed support for destructors. This is a deal
110
* breaker for the SPL which contains particularly expensive
111
* initializers for mutex's, condition variables, etc. We also
112
* require a minimal level of cleanup for these data types unlike
113
* many Linux data types which do need to be explicitly destroyed.
114
*
115
* 2) Virtual address space backed slab. Callers of the Solaris slab
116
* expect it to work well for both small are very large allocations.
117
* Because of memory fragmentation the Linux slab which is backed
118
* by kmalloc'ed memory performs very badly when confronted with
119
* large numbers of large allocations. Basing the slab on the
120
* virtual address space removes the need for contiguous pages
121
* and greatly improve performance for large allocations.
122
*
123
* For these reasons, the SPL has its own slab implementation with
124
* the needed features. It is not as highly optimized as either the
125
* Solaris or Linux slabs, but it should get me most of what is
126
* needed until it can be optimized or obsoleted by another approach.
127
*
128
* One serious concern I do have about this method is the relatively
129
* small virtual address space on 32bit arches. This will seriously
130
* constrain the size of the slab caches and their performance.
131
*/
132
133
struct list_head spl_kmem_cache_list; /* List of caches */
134
struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
135
static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
136
137
static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
138
139
static void *
140
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
141
{
142
gfp_t lflags = kmem_flags_convert(flags);
143
void *ptr;
144
145
if (skc->skc_flags & KMC_RECLAIMABLE)
146
lflags |= __GFP_RECLAIMABLE;
147
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
148
149
/* Resulting allocated memory will be page aligned */
150
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
151
152
return (ptr);
153
}
154
155
static void
156
kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
157
{
158
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
159
160
/*
161
* The Linux direct reclaim path uses this out of band value to
162
* determine if forward progress is being made. Normally this is
163
* incremented by kmem_freepages() which is part of the various
164
* Linux slab implementations. However, since we are using none
165
* of that infrastructure we are responsible for incrementing it.
166
*/
167
if (current->reclaim_state)
168
#ifdef HAVE_RECLAIM_STATE_RECLAIMED
169
current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
170
#else
171
current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
172
#endif
173
vfree(ptr);
174
}
175
176
/*
177
* Required space for each aligned sks.
178
*/
179
static inline uint32_t
180
spl_sks_size(spl_kmem_cache_t *skc)
181
{
182
return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
183
skc->skc_obj_align, uint32_t));
184
}
185
186
/*
187
* Required space for each aligned object.
188
*/
189
static inline uint32_t
190
spl_obj_size(spl_kmem_cache_t *skc)
191
{
192
uint32_t align = skc->skc_obj_align;
193
194
return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
195
P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
196
}
197
198
uint64_t
199
spl_kmem_cache_inuse(kmem_cache_t *cache)
200
{
201
return (cache->skc_obj_total);
202
}
203
EXPORT_SYMBOL(spl_kmem_cache_inuse);
204
205
uint64_t
206
spl_kmem_cache_entry_size(kmem_cache_t *cache)
207
{
208
return (cache->skc_obj_size);
209
}
210
EXPORT_SYMBOL(spl_kmem_cache_entry_size);
211
212
/*
213
* Lookup the spl_kmem_object_t for an object given that object.
214
*/
215
static inline spl_kmem_obj_t *
216
spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
217
{
218
return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
219
skc->skc_obj_align, uint32_t));
220
}
221
222
/*
223
* It's important that we pack the spl_kmem_obj_t structure and the
224
* actual objects in to one large address space to minimize the number
225
* of calls to the allocator. It is far better to do a few large
226
* allocations and then subdivide it ourselves. Now which allocator
227
* we use requires balancing a few trade offs.
228
*
229
* For small objects we use kmem_alloc() because as long as you are
230
* only requesting a small number of pages (ideally just one) its cheap.
231
* However, when you start requesting multiple pages with kmem_alloc()
232
* it gets increasingly expensive since it requires contiguous pages.
233
* For this reason we shift to vmem_alloc() for slabs of large objects
234
* which removes the need for contiguous pages. We do not use
235
* vmem_alloc() in all cases because there is significant locking
236
* overhead in __get_vm_area_node(). This function takes a single
237
* global lock when acquiring an available virtual address range which
238
* serializes all vmem_alloc()'s for all slab caches. Using slightly
239
* different allocation functions for small and large objects should
240
* give us the best of both worlds.
241
*
242
* +------------------------+
243
* | spl_kmem_slab_t --+-+ |
244
* | skc_obj_size <-+ | |
245
* | spl_kmem_obj_t | |
246
* | skc_obj_size <---+ |
247
* | spl_kmem_obj_t | |
248
* | ... v |
249
* +------------------------+
250
*/
251
static spl_kmem_slab_t *
252
spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
253
{
254
spl_kmem_slab_t *sks;
255
void *base;
256
uint32_t obj_size;
257
258
base = kv_alloc(skc, skc->skc_slab_size, flags);
259
if (base == NULL)
260
return (NULL);
261
262
sks = (spl_kmem_slab_t *)base;
263
sks->sks_magic = SKS_MAGIC;
264
sks->sks_objs = skc->skc_slab_objs;
265
sks->sks_age = jiffies;
266
sks->sks_cache = skc;
267
INIT_LIST_HEAD(&sks->sks_list);
268
INIT_LIST_HEAD(&sks->sks_free_list);
269
sks->sks_ref = 0;
270
obj_size = spl_obj_size(skc);
271
272
for (int i = 0; i < sks->sks_objs; i++) {
273
void *obj = base + spl_sks_size(skc) + (i * obj_size);
274
275
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
276
spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
277
sko->sko_addr = obj;
278
sko->sko_magic = SKO_MAGIC;
279
sko->sko_slab = sks;
280
INIT_LIST_HEAD(&sko->sko_list);
281
list_add_tail(&sko->sko_list, &sks->sks_free_list);
282
}
283
284
return (sks);
285
}
286
287
/*
288
* Remove a slab from complete or partial list, it must be called with
289
* the 'skc->skc_lock' held but the actual free must be performed
290
* outside the lock to prevent deadlocking on vmem addresses.
291
*/
292
static void
293
spl_slab_free(spl_kmem_slab_t *sks,
294
struct list_head *sks_list, struct list_head *sko_list)
295
{
296
spl_kmem_cache_t *skc;
297
298
ASSERT(sks->sks_magic == SKS_MAGIC);
299
ASSERT0(sks->sks_ref);
300
301
skc = sks->sks_cache;
302
ASSERT(skc->skc_magic == SKC_MAGIC);
303
304
/*
305
* Update slab/objects counters in the cache, then remove the
306
* slab from the skc->skc_partial_list. Finally add the slab
307
* and all its objects in to the private work lists where the
308
* destructors will be called and the memory freed to the system.
309
*/
310
skc->skc_obj_total -= sks->sks_objs;
311
skc->skc_slab_total--;
312
list_del(&sks->sks_list);
313
list_add(&sks->sks_list, sks_list);
314
list_splice_init(&sks->sks_free_list, sko_list);
315
}
316
317
/*
318
* Reclaim empty slabs at the end of the partial list.
319
*/
320
static void
321
spl_slab_reclaim(spl_kmem_cache_t *skc)
322
{
323
spl_kmem_slab_t *sks = NULL, *m = NULL;
324
spl_kmem_obj_t *sko = NULL, *n = NULL;
325
LIST_HEAD(sks_list);
326
LIST_HEAD(sko_list);
327
328
/*
329
* Empty slabs and objects must be moved to a private list so they
330
* can be safely freed outside the spin lock. All empty slabs are
331
* at the end of skc->skc_partial_list, therefore once a non-empty
332
* slab is found we can stop scanning.
333
*/
334
spin_lock(&skc->skc_lock);
335
list_for_each_entry_safe_reverse(sks, m,
336
&skc->skc_partial_list, sks_list) {
337
338
if (sks->sks_ref > 0)
339
break;
340
341
spl_slab_free(sks, &sks_list, &sko_list);
342
}
343
spin_unlock(&skc->skc_lock);
344
345
/*
346
* The following two loops ensure all the object destructors are run,
347
* and the slabs themselves are freed. This is all done outside the
348
* skc->skc_lock since this allows the destructor to sleep, and
349
* allows us to perform a conditional reschedule when a freeing a
350
* large number of objects and slabs back to the system.
351
*/
352
353
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
354
ASSERT(sko->sko_magic == SKO_MAGIC);
355
}
356
357
list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
358
ASSERT(sks->sks_magic == SKS_MAGIC);
359
kv_free(skc, sks, skc->skc_slab_size);
360
}
361
}
362
363
static spl_kmem_emergency_t *
364
spl_emergency_search(struct rb_root *root, void *obj)
365
{
366
struct rb_node *node = root->rb_node;
367
spl_kmem_emergency_t *ske;
368
unsigned long address = (unsigned long)obj;
369
370
while (node) {
371
ske = container_of(node, spl_kmem_emergency_t, ske_node);
372
373
if (address < ske->ske_obj)
374
node = node->rb_left;
375
else if (address > ske->ske_obj)
376
node = node->rb_right;
377
else
378
return (ske);
379
}
380
381
return (NULL);
382
}
383
384
static int
385
spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
386
{
387
struct rb_node **new = &(root->rb_node), *parent = NULL;
388
spl_kmem_emergency_t *ske_tmp;
389
unsigned long address = ske->ske_obj;
390
391
while (*new) {
392
ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
393
394
parent = *new;
395
if (address < ske_tmp->ske_obj)
396
new = &((*new)->rb_left);
397
else if (address > ske_tmp->ske_obj)
398
new = &((*new)->rb_right);
399
else
400
return (0);
401
}
402
403
rb_link_node(&ske->ske_node, parent, new);
404
rb_insert_color(&ske->ske_node, root);
405
406
return (1);
407
}
408
409
/*
410
* Allocate a single emergency object and track it in a red black tree.
411
*/
412
static int
413
spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
414
{
415
gfp_t lflags = kmem_flags_convert(flags);
416
spl_kmem_emergency_t *ske;
417
int order = get_order(skc->skc_obj_size);
418
int empty;
419
420
/* Last chance use a partial slab if one now exists */
421
spin_lock(&skc->skc_lock);
422
empty = list_empty(&skc->skc_partial_list);
423
spin_unlock(&skc->skc_lock);
424
if (!empty)
425
return (-EEXIST);
426
427
if (skc->skc_flags & KMC_RECLAIMABLE)
428
lflags |= __GFP_RECLAIMABLE;
429
ske = kmalloc(sizeof (*ske), lflags);
430
if (ske == NULL)
431
return (-ENOMEM);
432
433
ske->ske_obj = __get_free_pages(lflags, order);
434
if (ske->ske_obj == 0) {
435
kfree(ske);
436
return (-ENOMEM);
437
}
438
439
spin_lock(&skc->skc_lock);
440
empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
441
if (likely(empty)) {
442
skc->skc_obj_total++;
443
skc->skc_obj_emergency++;
444
if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
445
skc->skc_obj_emergency_max = skc->skc_obj_emergency;
446
}
447
spin_unlock(&skc->skc_lock);
448
449
if (unlikely(!empty)) {
450
free_pages(ske->ske_obj, order);
451
kfree(ske);
452
return (-EINVAL);
453
}
454
455
*obj = (void *)ske->ske_obj;
456
457
return (0);
458
}
459
460
/*
461
* Locate the passed object in the red black tree and free it.
462
*/
463
static int
464
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
465
{
466
spl_kmem_emergency_t *ske;
467
int order = get_order(skc->skc_obj_size);
468
469
spin_lock(&skc->skc_lock);
470
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
471
if (ske) {
472
rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
473
skc->skc_obj_emergency--;
474
skc->skc_obj_total--;
475
}
476
spin_unlock(&skc->skc_lock);
477
478
if (ske == NULL)
479
return (-ENOENT);
480
481
free_pages(ske->ske_obj, order);
482
kfree(ske);
483
484
return (0);
485
}
486
487
/*
488
* Release objects from the per-cpu magazine back to their slab. The flush
489
* argument contains the max number of entries to remove from the magazine.
490
*/
491
static void
492
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
493
{
494
spin_lock(&skc->skc_lock);
495
496
ASSERT(skc->skc_magic == SKC_MAGIC);
497
ASSERT(skm->skm_magic == SKM_MAGIC);
498
499
int count = MIN(flush, skm->skm_avail);
500
for (int i = 0; i < count; i++)
501
spl_cache_shrink(skc, skm->skm_objs[i]);
502
503
skm->skm_avail -= count;
504
memmove(skm->skm_objs, &(skm->skm_objs[count]),
505
sizeof (void *) * skm->skm_avail);
506
507
spin_unlock(&skc->skc_lock);
508
}
509
510
/*
511
* Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
512
* When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
513
* for very small objects we may end up with more than this so as not
514
* to waste space in the minimal allocation of a single page.
515
*/
516
static int
517
spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
518
{
519
uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
520
521
sks_size = spl_sks_size(skc);
522
obj_size = spl_obj_size(skc);
523
max_size = (spl_kmem_cache_max_size * 1024 * 1024);
524
tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
525
526
if (tgt_size <= max_size) {
527
tgt_objs = (tgt_size - sks_size) / obj_size;
528
} else {
529
tgt_objs = (max_size - sks_size) / obj_size;
530
tgt_size = (tgt_objs * obj_size) + sks_size;
531
}
532
533
if (tgt_objs == 0)
534
return (-ENOSPC);
535
536
*objs = tgt_objs;
537
*size = tgt_size;
538
539
return (0);
540
}
541
542
/*
543
* Make a guess at reasonable per-cpu magazine size based on the size of
544
* each object and the cost of caching N of them in each magazine. Long
545
* term this should really adapt based on an observed usage heuristic.
546
*/
547
static int
548
spl_magazine_size(spl_kmem_cache_t *skc)
549
{
550
uint32_t obj_size = spl_obj_size(skc);
551
int size;
552
553
if (spl_kmem_cache_magazine_size > 0)
554
return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
555
556
/* Per-magazine sizes below assume a 4Kib page size */
557
if (obj_size > (PAGE_SIZE * 256))
558
size = 4; /* Minimum 4Mib per-magazine */
559
else if (obj_size > (PAGE_SIZE * 32))
560
size = 16; /* Minimum 2Mib per-magazine */
561
else if (obj_size > (PAGE_SIZE))
562
size = 64; /* Minimum 256Kib per-magazine */
563
else if (obj_size > (PAGE_SIZE / 4))
564
size = 128; /* Minimum 128Kib per-magazine */
565
else
566
size = 256;
567
568
return (size);
569
}
570
571
/*
572
* Allocate a per-cpu magazine to associate with a specific core.
573
*/
574
static spl_kmem_magazine_t *
575
spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
576
{
577
spl_kmem_magazine_t *skm;
578
int size = sizeof (spl_kmem_magazine_t) +
579
sizeof (void *) * skc->skc_mag_size;
580
581
skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
582
if (skm) {
583
skm->skm_magic = SKM_MAGIC;
584
skm->skm_avail = 0;
585
skm->skm_size = skc->skc_mag_size;
586
skm->skm_refill = skc->skc_mag_refill;
587
skm->skm_cache = skc;
588
skm->skm_cpu = cpu;
589
}
590
591
return (skm);
592
}
593
594
/*
595
* Free a per-cpu magazine associated with a specific core.
596
*/
597
static void
598
spl_magazine_free(spl_kmem_magazine_t *skm)
599
{
600
ASSERT(skm->skm_magic == SKM_MAGIC);
601
ASSERT0(skm->skm_avail);
602
kfree(skm);
603
}
604
605
/*
606
* Create all pre-cpu magazines of reasonable sizes.
607
*/
608
static int
609
spl_magazine_create(spl_kmem_cache_t *skc)
610
{
611
int i = 0;
612
613
ASSERT0((skc->skc_flags & KMC_SLAB));
614
615
skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
616
num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
617
skc->skc_mag_size = spl_magazine_size(skc);
618
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
619
620
for_each_possible_cpu(i) {
621
skc->skc_mag[i] = spl_magazine_alloc(skc, i);
622
if (!skc->skc_mag[i]) {
623
for (i--; i >= 0; i--)
624
spl_magazine_free(skc->skc_mag[i]);
625
626
kfree(skc->skc_mag);
627
return (-ENOMEM);
628
}
629
}
630
631
return (0);
632
}
633
634
/*
635
* Destroy all pre-cpu magazines.
636
*/
637
static void
638
spl_magazine_destroy(spl_kmem_cache_t *skc)
639
{
640
spl_kmem_magazine_t *skm;
641
int i = 0;
642
643
ASSERT0((skc->skc_flags & KMC_SLAB));
644
645
for_each_possible_cpu(i) {
646
skm = skc->skc_mag[i];
647
spl_cache_flush(skc, skm, skm->skm_avail);
648
spl_magazine_free(skm);
649
}
650
651
kfree(skc->skc_mag);
652
}
653
654
/*
655
* Create a object cache based on the following arguments:
656
* name cache name
657
* size cache object size
658
* align cache object alignment
659
* ctor cache object constructor
660
* dtor cache object destructor
661
* reclaim cache object reclaim
662
* priv cache private data for ctor/dtor/reclaim
663
* vmp unused must be NULL
664
* flags
665
* KMC_KVMEM Force kvmem backed SPL cache
666
* KMC_SLAB Force Linux slab backed cache
667
* KMC_NODEBUG Disable debugging (unsupported)
668
* KMC_RECLAIMABLE Memory can be freed under pressure
669
*/
670
spl_kmem_cache_t *
671
spl_kmem_cache_create(const char *name, size_t size, size_t align,
672
spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
673
void *priv, void *vmp, int flags)
674
{
675
gfp_t lflags = kmem_flags_convert(KM_SLEEP);
676
spl_kmem_cache_t *skc;
677
int rc;
678
679
/*
680
* Unsupported flags
681
*/
682
ASSERT0P(vmp);
683
ASSERT0P(reclaim);
684
685
might_sleep();
686
687
skc = kzalloc(sizeof (*skc), lflags);
688
if (skc == NULL)
689
return (NULL);
690
691
skc->skc_magic = SKC_MAGIC;
692
skc->skc_name_size = strlen(name) + 1;
693
skc->skc_name = kmalloc(skc->skc_name_size, lflags);
694
if (skc->skc_name == NULL) {
695
kfree(skc);
696
return (NULL);
697
}
698
strlcpy(skc->skc_name, name, skc->skc_name_size);
699
700
skc->skc_ctor = ctor;
701
skc->skc_dtor = dtor;
702
skc->skc_private = priv;
703
skc->skc_vmp = vmp;
704
skc->skc_linux_cache = NULL;
705
skc->skc_flags = flags;
706
skc->skc_obj_size = size;
707
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
708
atomic_set(&skc->skc_ref, 0);
709
710
INIT_LIST_HEAD(&skc->skc_list);
711
INIT_LIST_HEAD(&skc->skc_complete_list);
712
INIT_LIST_HEAD(&skc->skc_partial_list);
713
skc->skc_emergency_tree = RB_ROOT;
714
spin_lock_init(&skc->skc_lock);
715
init_waitqueue_head(&skc->skc_waitq);
716
skc->skc_slab_fail = 0;
717
skc->skc_slab_create = 0;
718
skc->skc_slab_destroy = 0;
719
skc->skc_slab_total = 0;
720
skc->skc_slab_alloc = 0;
721
skc->skc_slab_max = 0;
722
skc->skc_obj_total = 0;
723
skc->skc_obj_alloc = 0;
724
skc->skc_obj_max = 0;
725
skc->skc_obj_deadlock = 0;
726
skc->skc_obj_emergency = 0;
727
skc->skc_obj_emergency_max = 0;
728
729
rc = percpu_counter_init(&skc->skc_linux_alloc, 0, GFP_KERNEL);
730
if (rc != 0) {
731
kfree(skc->skc_name);
732
kfree(skc);
733
return (NULL);
734
}
735
736
/*
737
* Verify the requested alignment restriction is sane.
738
*/
739
if (align) {
740
VERIFY(ISP2(align));
741
VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
742
VERIFY3U(align, <=, PAGE_SIZE);
743
skc->skc_obj_align = align;
744
}
745
746
/*
747
* When no specific type of slab is requested (kmem, vmem, or
748
* linuxslab) then select a cache type based on the object size
749
* and default tunables.
750
*/
751
if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
752
if (spl_kmem_cache_slab_limit &&
753
size <= (size_t)spl_kmem_cache_slab_limit) {
754
/*
755
* Objects smaller than spl_kmem_cache_slab_limit can
756
* use the Linux slab for better space-efficiency.
757
*/
758
skc->skc_flags |= KMC_SLAB;
759
} else {
760
/*
761
* All other objects are considered large and are
762
* placed on kvmem backed slabs.
763
*/
764
skc->skc_flags |= KMC_KVMEM;
765
}
766
}
767
768
/*
769
* Given the type of slab allocate the required resources.
770
*/
771
if (skc->skc_flags & KMC_KVMEM) {
772
rc = spl_slab_size(skc,
773
&skc->skc_slab_objs, &skc->skc_slab_size);
774
if (rc)
775
goto out;
776
777
rc = spl_magazine_create(skc);
778
if (rc)
779
goto out;
780
} else {
781
unsigned long slabflags = 0;
782
783
if (size > spl_kmem_cache_slab_limit)
784
goto out;
785
786
if (skc->skc_flags & KMC_RECLAIMABLE)
787
slabflags |= SLAB_RECLAIM_ACCOUNT;
788
789
skc->skc_linux_cache = kmem_cache_create_usercopy(
790
skc->skc_name, size, align, slabflags, 0, size, NULL);
791
if (skc->skc_linux_cache == NULL)
792
goto out;
793
}
794
795
down_write(&spl_kmem_cache_sem);
796
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
797
up_write(&spl_kmem_cache_sem);
798
799
return (skc);
800
out:
801
kfree(skc->skc_name);
802
percpu_counter_destroy(&skc->skc_linux_alloc);
803
kfree(skc);
804
return (NULL);
805
}
806
EXPORT_SYMBOL(spl_kmem_cache_create);
807
808
/*
809
* Register a move callback for cache defragmentation.
810
* XXX: Unimplemented but harmless to stub out for now.
811
*/
812
void
813
spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
814
kmem_cbrc_t (move)(void *, void *, size_t, void *))
815
{
816
ASSERT(move != NULL);
817
}
818
EXPORT_SYMBOL(spl_kmem_cache_set_move);
819
820
/*
821
* Destroy a cache and all objects associated with the cache.
822
*/
823
void
824
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
825
{
826
DECLARE_WAIT_QUEUE_HEAD(wq);
827
taskqid_t id;
828
829
ASSERT(skc->skc_magic == SKC_MAGIC);
830
ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
831
832
down_write(&spl_kmem_cache_sem);
833
list_del_init(&skc->skc_list);
834
up_write(&spl_kmem_cache_sem);
835
836
/* Cancel any and wait for any pending delayed tasks */
837
VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
838
839
spin_lock(&skc->skc_lock);
840
id = skc->skc_taskqid;
841
spin_unlock(&skc->skc_lock);
842
843
taskq_cancel_id(spl_kmem_cache_taskq, id);
844
845
/*
846
* Wait until all current callers complete, this is mainly
847
* to catch the case where a low memory situation triggers a
848
* cache reaping action which races with this destroy.
849
*/
850
wait_event(wq, atomic_read(&skc->skc_ref) == 0);
851
852
if (skc->skc_flags & KMC_KVMEM) {
853
spl_magazine_destroy(skc);
854
spl_slab_reclaim(skc);
855
} else {
856
ASSERT(skc->skc_flags & KMC_SLAB);
857
kmem_cache_destroy(skc->skc_linux_cache);
858
}
859
860
spin_lock(&skc->skc_lock);
861
862
/*
863
* Validate there are no objects in use and free all the
864
* spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
865
*/
866
ASSERT0(skc->skc_slab_alloc);
867
ASSERT0(skc->skc_obj_alloc);
868
ASSERT0(skc->skc_slab_total);
869
ASSERT0(skc->skc_obj_total);
870
ASSERT0(skc->skc_obj_emergency);
871
ASSERT(list_empty(&skc->skc_complete_list));
872
873
ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
874
percpu_counter_destroy(&skc->skc_linux_alloc);
875
876
spin_unlock(&skc->skc_lock);
877
878
kfree(skc->skc_name);
879
kfree(skc);
880
}
881
EXPORT_SYMBOL(spl_kmem_cache_destroy);
882
883
/*
884
* Allocate an object from a slab attached to the cache. This is used to
885
* repopulate the per-cpu magazine caches in batches when they run low.
886
*/
887
static void *
888
spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
889
{
890
spl_kmem_obj_t *sko;
891
892
ASSERT(skc->skc_magic == SKC_MAGIC);
893
ASSERT(sks->sks_magic == SKS_MAGIC);
894
895
sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
896
ASSERT(sko->sko_magic == SKO_MAGIC);
897
ASSERT(sko->sko_addr != NULL);
898
899
/* Remove from sks_free_list */
900
list_del_init(&sko->sko_list);
901
902
sks->sks_age = jiffies;
903
sks->sks_ref++;
904
skc->skc_obj_alloc++;
905
906
/* Track max obj usage statistics */
907
if (skc->skc_obj_alloc > skc->skc_obj_max)
908
skc->skc_obj_max = skc->skc_obj_alloc;
909
910
/* Track max slab usage statistics */
911
if (sks->sks_ref == 1) {
912
skc->skc_slab_alloc++;
913
914
if (skc->skc_slab_alloc > skc->skc_slab_max)
915
skc->skc_slab_max = skc->skc_slab_alloc;
916
}
917
918
return (sko->sko_addr);
919
}
920
921
/*
922
* Generic slab allocation function to run by the global work queues.
923
* It is responsible for allocating a new slab, linking it in to the list
924
* of partial slabs, and then waking any waiters.
925
*/
926
static int
927
__spl_cache_grow(spl_kmem_cache_t *skc, int flags)
928
{
929
spl_kmem_slab_t *sks;
930
931
fstrans_cookie_t cookie = spl_fstrans_mark();
932
sks = spl_slab_alloc(skc, flags);
933
spl_fstrans_unmark(cookie);
934
935
spin_lock(&skc->skc_lock);
936
if (sks) {
937
skc->skc_slab_total++;
938
skc->skc_obj_total += sks->sks_objs;
939
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
940
941
smp_mb__before_atomic();
942
clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
943
smp_mb__after_atomic();
944
}
945
spin_unlock(&skc->skc_lock);
946
947
return (sks == NULL ? -ENOMEM : 0);
948
}
949
950
static void
951
spl_cache_grow_work(void *data)
952
{
953
spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
954
spl_kmem_cache_t *skc = ska->ska_cache;
955
956
int error = __spl_cache_grow(skc, ska->ska_flags);
957
958
atomic_dec(&skc->skc_ref);
959
smp_mb__before_atomic();
960
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
961
smp_mb__after_atomic();
962
if (error == 0)
963
wake_up_all(&skc->skc_waitq);
964
965
kfree(ska);
966
}
967
968
/*
969
* Returns non-zero when a new slab should be available.
970
*/
971
static int
972
spl_cache_grow_wait(spl_kmem_cache_t *skc)
973
{
974
return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
975
}
976
977
/*
978
* No available objects on any slabs, create a new slab. Note that this
979
* functionality is disabled for KMC_SLAB caches which are backed by the
980
* Linux slab.
981
*/
982
static int
983
spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
984
{
985
int remaining, rc = 0;
986
987
ASSERT0(flags & ~KM_PUBLIC_MASK);
988
ASSERT(skc->skc_magic == SKC_MAGIC);
989
ASSERT0((skc->skc_flags & KMC_SLAB));
990
991
*obj = NULL;
992
993
/*
994
* Since we can't sleep attempt an emergency allocation to satisfy
995
* the request. The only alterative is to fail the allocation but
996
* it's preferable try. The use of KM_NOSLEEP is expected to be rare.
997
*/
998
if (flags & KM_NOSLEEP)
999
return (spl_emergency_alloc(skc, flags, obj));
1000
1001
might_sleep();
1002
1003
/*
1004
* Before allocating a new slab wait for any reaping to complete and
1005
* then return so the local magazine can be rechecked for new objects.
1006
*/
1007
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1008
rc = wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1009
TASK_UNINTERRUPTIBLE);
1010
return (rc ? rc : -EAGAIN);
1011
}
1012
1013
/*
1014
* Note: It would be nice to reduce the overhead of context switch
1015
* and improve NUMA locality, by trying to allocate a new slab in the
1016
* current process context with KM_NOSLEEP flag.
1017
*
1018
* However, this can't be applied to vmem/kvmem due to a bug that
1019
* spl_vmalloc() doesn't honor gfp flags in page table allocation.
1020
*/
1021
1022
/*
1023
* This is handled by dispatching a work request to the global work
1024
* queue. This allows us to asynchronously allocate a new slab while
1025
* retaining the ability to safely fall back to a smaller synchronous
1026
* allocations to ensure forward progress is always maintained.
1027
*/
1028
if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1029
spl_kmem_alloc_t *ska;
1030
1031
ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
1032
if (ska == NULL) {
1033
clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
1034
smp_mb__after_atomic();
1035
wake_up_all(&skc->skc_waitq);
1036
return (-ENOMEM);
1037
}
1038
1039
atomic_inc(&skc->skc_ref);
1040
ska->ska_cache = skc;
1041
ska->ska_flags = flags;
1042
taskq_init_ent(&ska->ska_tqe);
1043
taskq_dispatch_ent(spl_kmem_cache_taskq,
1044
spl_cache_grow_work, ska, 0, &ska->ska_tqe);
1045
}
1046
1047
/*
1048
* The goal here is to only detect the rare case where a virtual slab
1049
* allocation has deadlocked. We must be careful to minimize the use
1050
* of emergency objects which are more expensive to track. Therefore,
1051
* we set a very long timeout for the asynchronous allocation and if
1052
* the timeout is reached the cache is flagged as deadlocked. From
1053
* this point only new emergency objects will be allocated until the
1054
* asynchronous allocation completes and clears the deadlocked flag.
1055
*/
1056
if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1057
rc = spl_emergency_alloc(skc, flags, obj);
1058
} else {
1059
remaining = wait_event_timeout(skc->skc_waitq,
1060
spl_cache_grow_wait(skc), HZ / 10);
1061
1062
if (!remaining) {
1063
spin_lock(&skc->skc_lock);
1064
if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1065
set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1066
skc->skc_obj_deadlock++;
1067
}
1068
spin_unlock(&skc->skc_lock);
1069
}
1070
1071
rc = -ENOMEM;
1072
}
1073
1074
return (rc);
1075
}
1076
1077
/*
1078
* Refill a per-cpu magazine with objects from the slabs for this cache.
1079
* Ideally the magazine can be repopulated using existing objects which have
1080
* been released, however if we are unable to locate enough free objects new
1081
* slabs of objects will be created. On success NULL is returned, otherwise
1082
* the address of a single emergency object is returned for use by the caller.
1083
*/
1084
static void *
1085
spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
1086
{
1087
spl_kmem_slab_t *sks;
1088
int count = 0, rc, refill;
1089
void *obj = NULL;
1090
1091
ASSERT(skc->skc_magic == SKC_MAGIC);
1092
ASSERT(skm->skm_magic == SKM_MAGIC);
1093
1094
refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
1095
spin_lock(&skc->skc_lock);
1096
1097
while (refill > 0) {
1098
/* No slabs available we may need to grow the cache */
1099
if (list_empty(&skc->skc_partial_list)) {
1100
spin_unlock(&skc->skc_lock);
1101
1102
local_irq_enable();
1103
rc = spl_cache_grow(skc, flags, &obj);
1104
local_irq_disable();
1105
1106
/* Emergency object for immediate use by caller */
1107
if (rc == 0 && obj != NULL)
1108
return (obj);
1109
1110
if (rc)
1111
goto out;
1112
1113
/* Rescheduled to different CPU skm is not local */
1114
if (skm != skc->skc_mag[smp_processor_id()])
1115
goto out;
1116
1117
/*
1118
* Potentially rescheduled to the same CPU but
1119
* allocations may have occurred from this CPU while
1120
* we were sleeping so recalculate max refill.
1121
*/
1122
refill = MIN(refill, skm->skm_size - skm->skm_avail);
1123
1124
spin_lock(&skc->skc_lock);
1125
continue;
1126
}
1127
1128
/* Grab the next available slab */
1129
sks = list_entry((&skc->skc_partial_list)->next,
1130
spl_kmem_slab_t, sks_list);
1131
ASSERT(sks->sks_magic == SKS_MAGIC);
1132
ASSERT(sks->sks_ref < sks->sks_objs);
1133
ASSERT(!list_empty(&sks->sks_free_list));
1134
1135
/*
1136
* Consume as many objects as needed to refill the requested
1137
* cache. We must also be careful not to overfill it.
1138
*/
1139
while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
1140
++count) {
1141
ASSERT(skm->skm_avail < skm->skm_size);
1142
ASSERT(count < skm->skm_size);
1143
skm->skm_objs[skm->skm_avail++] =
1144
spl_cache_obj(skc, sks);
1145
}
1146
1147
/* Move slab to skc_complete_list when full */
1148
if (sks->sks_ref == sks->sks_objs) {
1149
list_del(&sks->sks_list);
1150
list_add(&sks->sks_list, &skc->skc_complete_list);
1151
}
1152
}
1153
1154
spin_unlock(&skc->skc_lock);
1155
out:
1156
return (NULL);
1157
}
1158
1159
/*
1160
* Release an object back to the slab from which it came.
1161
*/
1162
static void
1163
spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
1164
{
1165
spl_kmem_slab_t *sks = NULL;
1166
spl_kmem_obj_t *sko = NULL;
1167
1168
ASSERT(skc->skc_magic == SKC_MAGIC);
1169
1170
sko = spl_sko_from_obj(skc, obj);
1171
ASSERT(sko->sko_magic == SKO_MAGIC);
1172
sks = sko->sko_slab;
1173
ASSERT(sks->sks_magic == SKS_MAGIC);
1174
ASSERT(sks->sks_cache == skc);
1175
list_add(&sko->sko_list, &sks->sks_free_list);
1176
1177
sks->sks_age = jiffies;
1178
sks->sks_ref--;
1179
skc->skc_obj_alloc--;
1180
1181
/*
1182
* Move slab to skc_partial_list when no longer full. Slabs
1183
* are added to the head to keep the partial list is quasi-full
1184
* sorted order. Fuller at the head, emptier at the tail.
1185
*/
1186
if (sks->sks_ref == (sks->sks_objs - 1)) {
1187
list_del(&sks->sks_list);
1188
list_add(&sks->sks_list, &skc->skc_partial_list);
1189
}
1190
1191
/*
1192
* Move empty slabs to the end of the partial list so
1193
* they can be easily found and freed during reclamation.
1194
*/
1195
if (sks->sks_ref == 0) {
1196
list_del(&sks->sks_list);
1197
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1198
skc->skc_slab_alloc--;
1199
}
1200
}
1201
1202
/*
1203
* Allocate an object from the per-cpu magazine, or if the magazine
1204
* is empty directly allocate from a slab and repopulate the magazine.
1205
*/
1206
void *
1207
spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
1208
{
1209
spl_kmem_magazine_t *skm;
1210
void *obj = NULL;
1211
1212
ASSERT0(flags & ~KM_PUBLIC_MASK);
1213
ASSERT(skc->skc_magic == SKC_MAGIC);
1214
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1215
1216
/*
1217
* Allocate directly from a Linux slab. All optimizations are left
1218
* to the underlying cache we only need to guarantee that KM_SLEEP
1219
* callers will never fail.
1220
*/
1221
if (skc->skc_flags & KMC_SLAB) {
1222
struct kmem_cache *slc = skc->skc_linux_cache;
1223
do {
1224
obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
1225
} while ((obj == NULL) && !(flags & KM_NOSLEEP));
1226
1227
if (obj != NULL) {
1228
/*
1229
* Even though we leave everything up to the
1230
* underlying cache we still keep track of
1231
* how many objects we've allocated in it for
1232
* better debuggability.
1233
*/
1234
percpu_counter_inc(&skc->skc_linux_alloc);
1235
}
1236
goto ret;
1237
}
1238
1239
local_irq_disable();
1240
1241
restart:
1242
/*
1243
* Safe to update per-cpu structure without lock, but
1244
* in the restart case we must be careful to reacquire
1245
* the local magazine since this may have changed
1246
* when we need to grow the cache.
1247
*/
1248
skm = skc->skc_mag[smp_processor_id()];
1249
ASSERT(skm->skm_magic == SKM_MAGIC);
1250
1251
if (likely(skm->skm_avail)) {
1252
/* Object available in CPU cache, use it */
1253
obj = skm->skm_objs[--skm->skm_avail];
1254
} else {
1255
obj = spl_cache_refill(skc, skm, flags);
1256
if ((obj == NULL) && !(flags & KM_NOSLEEP))
1257
goto restart;
1258
1259
local_irq_enable();
1260
goto ret;
1261
}
1262
1263
local_irq_enable();
1264
ASSERT(obj);
1265
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1266
1267
ret:
1268
/* Pre-emptively migrate object to CPU L1 cache */
1269
if (obj) {
1270
if (obj && skc->skc_ctor)
1271
skc->skc_ctor(obj, skc->skc_private, flags);
1272
else
1273
prefetchw(obj);
1274
}
1275
1276
return (obj);
1277
}
1278
EXPORT_SYMBOL(spl_kmem_cache_alloc);
1279
1280
/*
1281
* Free an object back to the local per-cpu magazine, there is no
1282
* guarantee that this is the same magazine the object was originally
1283
* allocated from. We may need to flush entire from the magazine
1284
* back to the slabs to make space.
1285
*/
1286
void
1287
spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
1288
{
1289
spl_kmem_magazine_t *skm;
1290
unsigned long flags;
1291
int do_reclaim = 0;
1292
int do_emergency = 0;
1293
1294
ASSERT(skc->skc_magic == SKC_MAGIC);
1295
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1296
1297
/*
1298
* Run the destructor
1299
*/
1300
if (skc->skc_dtor)
1301
skc->skc_dtor(obj, skc->skc_private);
1302
1303
/*
1304
* Free the object from the Linux underlying Linux slab.
1305
*/
1306
if (skc->skc_flags & KMC_SLAB) {
1307
kmem_cache_free(skc->skc_linux_cache, obj);
1308
percpu_counter_dec(&skc->skc_linux_alloc);
1309
return;
1310
}
1311
1312
/*
1313
* While a cache has outstanding emergency objects all freed objects
1314
* must be checked. However, since emergency objects will never use
1315
* a virtual address these objects can be safely excluded as an
1316
* optimization.
1317
*/
1318
if (!is_vmalloc_addr(obj)) {
1319
spin_lock(&skc->skc_lock);
1320
do_emergency = (skc->skc_obj_emergency > 0);
1321
spin_unlock(&skc->skc_lock);
1322
1323
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
1324
return;
1325
}
1326
1327
local_irq_save(flags);
1328
1329
/*
1330
* Safe to update per-cpu structure without lock, but
1331
* no remote memory allocation tracking is being performed
1332
* it is entirely possible to allocate an object from one
1333
* CPU cache and return it to another.
1334
*/
1335
skm = skc->skc_mag[smp_processor_id()];
1336
ASSERT(skm->skm_magic == SKM_MAGIC);
1337
1338
/*
1339
* Per-CPU cache full, flush it to make space for this object,
1340
* this may result in an empty slab which can be reclaimed once
1341
* interrupts are re-enabled.
1342
*/
1343
if (unlikely(skm->skm_avail >= skm->skm_size)) {
1344
spl_cache_flush(skc, skm, skm->skm_refill);
1345
do_reclaim = 1;
1346
}
1347
1348
/* Available space in cache, use it */
1349
skm->skm_objs[skm->skm_avail++] = obj;
1350
1351
local_irq_restore(flags);
1352
1353
if (do_reclaim)
1354
spl_slab_reclaim(skc);
1355
}
1356
EXPORT_SYMBOL(spl_kmem_cache_free);
1357
1358
/*
1359
* Depending on how many and which objects are released it may simply
1360
* repopulate the local magazine which will then need to age-out. Objects
1361
* which cannot fit in the magazine will be released back to their slabs
1362
* which will also need to age out before being released. This is all just
1363
* best effort and we do not want to thrash creating and destroying slabs.
1364
*/
1365
void
1366
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
1367
{
1368
ASSERT(skc->skc_magic == SKC_MAGIC);
1369
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1370
1371
if (skc->skc_flags & KMC_SLAB)
1372
return;
1373
1374
atomic_inc(&skc->skc_ref);
1375
1376
/*
1377
* Prevent concurrent cache reaping when contended.
1378
*/
1379
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
1380
goto out;
1381
1382
/* Reclaim from the magazine and free all now empty slabs. */
1383
unsigned long irq_flags;
1384
local_irq_save(irq_flags);
1385
spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1386
spl_cache_flush(skc, skm, skm->skm_avail);
1387
local_irq_restore(irq_flags);
1388
1389
spl_slab_reclaim(skc);
1390
clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
1391
smp_mb__after_atomic();
1392
wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
1393
out:
1394
atomic_dec(&skc->skc_ref);
1395
}
1396
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
1397
1398
/*
1399
* This is stubbed out for code consistency with other platforms. There
1400
* is existing logic to prevent concurrent reaping so while this is ugly
1401
* it should do no harm.
1402
*/
1403
int
1404
spl_kmem_cache_reap_active(void)
1405
{
1406
return (0);
1407
}
1408
EXPORT_SYMBOL(spl_kmem_cache_reap_active);
1409
1410
/*
1411
* Reap all free slabs from all registered caches.
1412
*/
1413
void
1414
spl_kmem_reap(void)
1415
{
1416
spl_kmem_cache_t *skc = NULL;
1417
1418
down_read(&spl_kmem_cache_sem);
1419
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
1420
spl_kmem_cache_reap_now(skc);
1421
}
1422
up_read(&spl_kmem_cache_sem);
1423
}
1424
EXPORT_SYMBOL(spl_kmem_reap);
1425
1426
int
1427
spl_kmem_cache_init(void)
1428
{
1429
init_rwsem(&spl_kmem_cache_sem);
1430
INIT_LIST_HEAD(&spl_kmem_cache_list);
1431
spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
1432
spl_kmem_cache_kmem_threads, maxclsyspri,
1433
spl_kmem_cache_kmem_threads * 8, INT_MAX,
1434
TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
1435
1436
if (spl_kmem_cache_taskq == NULL)
1437
return (-ENOMEM);
1438
1439
return (0);
1440
}
1441
1442
void
1443
spl_kmem_cache_fini(void)
1444
{
1445
taskq_destroy(spl_kmem_cache_taskq);
1446
}
1447
1448