Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/powerpc/kernel/cacheinfo.c
10817 views
1
/*
2
* Processor cache information made available to userspace via sysfs;
3
* intended to be compatible with x86 intel_cacheinfo implementation.
4
*
5
* Copyright 2008 IBM Corporation
6
* Author: Nathan Lynch
7
*
8
* This program is free software; you can redistribute it and/or
9
* modify it under the terms of the GNU General Public License version
10
* 2 as published by the Free Software Foundation.
11
*/
12
13
#include <linux/cpu.h>
14
#include <linux/cpumask.h>
15
#include <linux/init.h>
16
#include <linux/kernel.h>
17
#include <linux/kobject.h>
18
#include <linux/list.h>
19
#include <linux/notifier.h>
20
#include <linux/of.h>
21
#include <linux/percpu.h>
22
#include <linux/slab.h>
23
#include <asm/prom.h>
24
25
#include "cacheinfo.h"
26
27
/* per-cpu object for tracking:
28
* - a "cache" kobject for the top-level directory
29
* - a list of "index" objects representing the cpu's local cache hierarchy
30
*/
31
struct cache_dir {
32
struct kobject *kobj; /* bare (not embedded) kobject for cache
33
* directory */
34
struct cache_index_dir *index; /* list of index objects */
35
};
36
37
/* "index" object: each cpu's cache directory has an index
38
* subdirectory corresponding to a cache object associated with the
39
* cpu. This object's lifetime is managed via the embedded kobject.
40
*/
41
struct cache_index_dir {
42
struct kobject kobj;
43
struct cache_index_dir *next; /* next index in parent directory */
44
struct cache *cache;
45
};
46
47
/* Template for determining which OF properties to query for a given
48
* cache type */
49
struct cache_type_info {
50
const char *name;
51
const char *size_prop;
52
53
/* Allow for both [di]-cache-line-size and
54
* [di]-cache-block-size properties. According to the PowerPC
55
* Processor binding, -line-size should be provided if it
56
* differs from the cache block size (that which is operated
57
* on by cache instructions), so we look for -line-size first.
58
* See cache_get_line_size(). */
59
60
const char *line_size_props[2];
61
const char *nr_sets_prop;
62
};
63
64
/* These are used to index the cache_type_info array. */
65
#define CACHE_TYPE_UNIFIED 0
66
#define CACHE_TYPE_INSTRUCTION 1
67
#define CACHE_TYPE_DATA 2
68
69
static const struct cache_type_info cache_type_info[] = {
70
{
71
/* PowerPC Processor binding says the [di]-cache-*
72
* must be equal on unified caches, so just use
73
* d-cache properties. */
74
.name = "Unified",
75
.size_prop = "d-cache-size",
76
.line_size_props = { "d-cache-line-size",
77
"d-cache-block-size", },
78
.nr_sets_prop = "d-cache-sets",
79
},
80
{
81
.name = "Instruction",
82
.size_prop = "i-cache-size",
83
.line_size_props = { "i-cache-line-size",
84
"i-cache-block-size", },
85
.nr_sets_prop = "i-cache-sets",
86
},
87
{
88
.name = "Data",
89
.size_prop = "d-cache-size",
90
.line_size_props = { "d-cache-line-size",
91
"d-cache-block-size", },
92
.nr_sets_prop = "d-cache-sets",
93
},
94
};
95
96
/* Cache object: each instance of this corresponds to a distinct cache
97
* in the system. There are separate objects for Harvard caches: one
98
* each for instruction and data, and each refers to the same OF node.
99
* The refcount of the OF node is elevated for the lifetime of the
100
* cache object. A cache object is released when its shared_cpu_map
101
* is cleared (see cache_cpu_clear).
102
*
103
* A cache object is on two lists: an unsorted global list
104
* (cache_list) of cache objects; and a singly-linked list
105
* representing the local cache hierarchy, which is ordered by level
106
* (e.g. L1d -> L1i -> L2 -> L3).
107
*/
108
struct cache {
109
struct device_node *ofnode; /* OF node for this cache, may be cpu */
110
struct cpumask shared_cpu_map; /* online CPUs using this cache */
111
int type; /* split cache disambiguation */
112
int level; /* level not explicit in device tree */
113
struct list_head list; /* global list of cache objects */
114
struct cache *next_local; /* next cache of >= level */
115
};
116
117
static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
118
119
/* traversal/modification of this list occurs only at cpu hotplug time;
120
* access is serialized by cpu hotplug locking
121
*/
122
static LIST_HEAD(cache_list);
123
124
static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
125
{
126
return container_of(k, struct cache_index_dir, kobj);
127
}
128
129
static const char *cache_type_string(const struct cache *cache)
130
{
131
return cache_type_info[cache->type].name;
132
}
133
134
static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
135
{
136
cache->type = type;
137
cache->level = level;
138
cache->ofnode = of_node_get(ofnode);
139
INIT_LIST_HEAD(&cache->list);
140
list_add(&cache->list, &cache_list);
141
}
142
143
static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
144
{
145
struct cache *cache;
146
147
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
148
if (cache)
149
cache_init(cache, type, level, ofnode);
150
151
return cache;
152
}
153
154
static void release_cache_debugcheck(struct cache *cache)
155
{
156
struct cache *iter;
157
158
list_for_each_entry(iter, &cache_list, list)
159
WARN_ONCE(iter->next_local == cache,
160
"cache for %s(%s) refers to cache for %s(%s)\n",
161
iter->ofnode->full_name,
162
cache_type_string(iter),
163
cache->ofnode->full_name,
164
cache_type_string(cache));
165
}
166
167
static void release_cache(struct cache *cache)
168
{
169
if (!cache)
170
return;
171
172
pr_debug("freeing L%d %s cache for %s\n", cache->level,
173
cache_type_string(cache), cache->ofnode->full_name);
174
175
release_cache_debugcheck(cache);
176
list_del(&cache->list);
177
of_node_put(cache->ofnode);
178
kfree(cache);
179
}
180
181
static void cache_cpu_set(struct cache *cache, int cpu)
182
{
183
struct cache *next = cache;
184
185
while (next) {
186
WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
187
"CPU %i already accounted in %s(%s)\n",
188
cpu, next->ofnode->full_name,
189
cache_type_string(next));
190
cpumask_set_cpu(cpu, &next->shared_cpu_map);
191
next = next->next_local;
192
}
193
}
194
195
static int cache_size(const struct cache *cache, unsigned int *ret)
196
{
197
const char *propname;
198
const u32 *cache_size;
199
200
propname = cache_type_info[cache->type].size_prop;
201
202
cache_size = of_get_property(cache->ofnode, propname, NULL);
203
if (!cache_size)
204
return -ENODEV;
205
206
*ret = *cache_size;
207
return 0;
208
}
209
210
static int cache_size_kb(const struct cache *cache, unsigned int *ret)
211
{
212
unsigned int size;
213
214
if (cache_size(cache, &size))
215
return -ENODEV;
216
217
*ret = size / 1024;
218
return 0;
219
}
220
221
/* not cache_line_size() because that's a macro in include/linux/cache.h */
222
static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
223
{
224
const u32 *line_size;
225
int i, lim;
226
227
lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
228
229
for (i = 0; i < lim; i++) {
230
const char *propname;
231
232
propname = cache_type_info[cache->type].line_size_props[i];
233
line_size = of_get_property(cache->ofnode, propname, NULL);
234
if (line_size)
235
break;
236
}
237
238
if (!line_size)
239
return -ENODEV;
240
241
*ret = *line_size;
242
return 0;
243
}
244
245
static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
246
{
247
const char *propname;
248
const u32 *nr_sets;
249
250
propname = cache_type_info[cache->type].nr_sets_prop;
251
252
nr_sets = of_get_property(cache->ofnode, propname, NULL);
253
if (!nr_sets)
254
return -ENODEV;
255
256
*ret = *nr_sets;
257
return 0;
258
}
259
260
static int cache_associativity(const struct cache *cache, unsigned int *ret)
261
{
262
unsigned int line_size;
263
unsigned int nr_sets;
264
unsigned int size;
265
266
if (cache_nr_sets(cache, &nr_sets))
267
goto err;
268
269
/* If the cache is fully associative, there is no need to
270
* check the other properties.
271
*/
272
if (nr_sets == 1) {
273
*ret = 0;
274
return 0;
275
}
276
277
if (cache_get_line_size(cache, &line_size))
278
goto err;
279
if (cache_size(cache, &size))
280
goto err;
281
282
if (!(nr_sets > 0 && size > 0 && line_size > 0))
283
goto err;
284
285
*ret = (size / nr_sets) / line_size;
286
return 0;
287
err:
288
return -ENODEV;
289
}
290
291
/* helper for dealing with split caches */
292
static struct cache *cache_find_first_sibling(struct cache *cache)
293
{
294
struct cache *iter;
295
296
if (cache->type == CACHE_TYPE_UNIFIED)
297
return cache;
298
299
list_for_each_entry(iter, &cache_list, list)
300
if (iter->ofnode == cache->ofnode && iter->next_local == cache)
301
return iter;
302
303
return cache;
304
}
305
306
/* return the first cache on a local list matching node */
307
static struct cache *cache_lookup_by_node(const struct device_node *node)
308
{
309
struct cache *cache = NULL;
310
struct cache *iter;
311
312
list_for_each_entry(iter, &cache_list, list) {
313
if (iter->ofnode != node)
314
continue;
315
cache = cache_find_first_sibling(iter);
316
break;
317
}
318
319
return cache;
320
}
321
322
static bool cache_node_is_unified(const struct device_node *np)
323
{
324
return of_get_property(np, "cache-unified", NULL);
325
}
326
327
static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
328
{
329
struct cache *cache;
330
331
pr_debug("creating L%d ucache for %s\n", level, node->full_name);
332
333
cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
334
335
return cache;
336
}
337
338
static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
339
{
340
struct cache *dcache, *icache;
341
342
pr_debug("creating L%d dcache and icache for %s\n", level,
343
node->full_name);
344
345
dcache = new_cache(CACHE_TYPE_DATA, level, node);
346
icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
347
348
if (!dcache || !icache)
349
goto err;
350
351
dcache->next_local = icache;
352
353
return dcache;
354
err:
355
release_cache(dcache);
356
release_cache(icache);
357
return NULL;
358
}
359
360
static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
361
{
362
struct cache *cache;
363
364
if (cache_node_is_unified(node))
365
cache = cache_do_one_devnode_unified(node, level);
366
else
367
cache = cache_do_one_devnode_split(node, level);
368
369
return cache;
370
}
371
372
static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
373
{
374
struct cache *cache;
375
376
cache = cache_lookup_by_node(node);
377
378
WARN_ONCE(cache && cache->level != level,
379
"cache level mismatch on lookup (got %d, expected %d)\n",
380
cache->level, level);
381
382
if (!cache)
383
cache = cache_do_one_devnode(node, level);
384
385
return cache;
386
}
387
388
static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
389
{
390
while (smaller->next_local) {
391
if (smaller->next_local == bigger)
392
return; /* already linked */
393
smaller = smaller->next_local;
394
}
395
396
smaller->next_local = bigger;
397
}
398
399
static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
400
{
401
WARN_ON_ONCE(cache->level != 1);
402
WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
403
}
404
405
static void __cpuinit do_subsidiary_caches(struct cache *cache)
406
{
407
struct device_node *subcache_node;
408
int level = cache->level;
409
410
do_subsidiary_caches_debugcheck(cache);
411
412
while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
413
struct cache *subcache;
414
415
level++;
416
subcache = cache_lookup_or_instantiate(subcache_node, level);
417
of_node_put(subcache_node);
418
if (!subcache)
419
break;
420
421
link_cache_lists(cache, subcache);
422
cache = subcache;
423
}
424
}
425
426
static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
427
{
428
struct device_node *cpu_node;
429
struct cache *cpu_cache = NULL;
430
431
pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
432
433
cpu_node = of_get_cpu_node(cpu_id, NULL);
434
WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
435
if (!cpu_node)
436
goto out;
437
438
cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
439
if (!cpu_cache)
440
goto out;
441
442
do_subsidiary_caches(cpu_cache);
443
444
cache_cpu_set(cpu_cache, cpu_id);
445
out:
446
of_node_put(cpu_node);
447
448
return cpu_cache;
449
}
450
451
static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
452
{
453
struct cache_dir *cache_dir;
454
struct sys_device *sysdev;
455
struct kobject *kobj = NULL;
456
457
sysdev = get_cpu_sysdev(cpu_id);
458
WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
459
if (!sysdev)
460
goto err;
461
462
kobj = kobject_create_and_add("cache", &sysdev->kobj);
463
if (!kobj)
464
goto err;
465
466
cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
467
if (!cache_dir)
468
goto err;
469
470
cache_dir->kobj = kobj;
471
472
WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
473
474
per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
475
476
return cache_dir;
477
err:
478
kobject_put(kobj);
479
return NULL;
480
}
481
482
static void cache_index_release(struct kobject *kobj)
483
{
484
struct cache_index_dir *index;
485
486
index = kobj_to_cache_index_dir(kobj);
487
488
pr_debug("freeing index directory for L%d %s cache\n",
489
index->cache->level, cache_type_string(index->cache));
490
491
kfree(index);
492
}
493
494
static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
495
{
496
struct kobj_attribute *kobj_attr;
497
498
kobj_attr = container_of(attr, struct kobj_attribute, attr);
499
500
return kobj_attr->show(k, kobj_attr, buf);
501
}
502
503
static struct cache *index_kobj_to_cache(struct kobject *k)
504
{
505
struct cache_index_dir *index;
506
507
index = kobj_to_cache_index_dir(k);
508
509
return index->cache;
510
}
511
512
static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
513
{
514
unsigned int size_kb;
515
struct cache *cache;
516
517
cache = index_kobj_to_cache(k);
518
519
if (cache_size_kb(cache, &size_kb))
520
return -ENODEV;
521
522
return sprintf(buf, "%uK\n", size_kb);
523
}
524
525
static struct kobj_attribute cache_size_attr =
526
__ATTR(size, 0444, size_show, NULL);
527
528
529
static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
530
{
531
unsigned int line_size;
532
struct cache *cache;
533
534
cache = index_kobj_to_cache(k);
535
536
if (cache_get_line_size(cache, &line_size))
537
return -ENODEV;
538
539
return sprintf(buf, "%u\n", line_size);
540
}
541
542
static struct kobj_attribute cache_line_size_attr =
543
__ATTR(coherency_line_size, 0444, line_size_show, NULL);
544
545
static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
546
{
547
unsigned int nr_sets;
548
struct cache *cache;
549
550
cache = index_kobj_to_cache(k);
551
552
if (cache_nr_sets(cache, &nr_sets))
553
return -ENODEV;
554
555
return sprintf(buf, "%u\n", nr_sets);
556
}
557
558
static struct kobj_attribute cache_nr_sets_attr =
559
__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
560
561
static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
562
{
563
unsigned int associativity;
564
struct cache *cache;
565
566
cache = index_kobj_to_cache(k);
567
568
if (cache_associativity(cache, &associativity))
569
return -ENODEV;
570
571
return sprintf(buf, "%u\n", associativity);
572
}
573
574
static struct kobj_attribute cache_assoc_attr =
575
__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
576
577
static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
578
{
579
struct cache *cache;
580
581
cache = index_kobj_to_cache(k);
582
583
return sprintf(buf, "%s\n", cache_type_string(cache));
584
}
585
586
static struct kobj_attribute cache_type_attr =
587
__ATTR(type, 0444, type_show, NULL);
588
589
static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
590
{
591
struct cache_index_dir *index;
592
struct cache *cache;
593
594
index = kobj_to_cache_index_dir(k);
595
cache = index->cache;
596
597
return sprintf(buf, "%d\n", cache->level);
598
}
599
600
static struct kobj_attribute cache_level_attr =
601
__ATTR(level, 0444, level_show, NULL);
602
603
static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
604
{
605
struct cache_index_dir *index;
606
struct cache *cache;
607
int len;
608
int n = 0;
609
610
index = kobj_to_cache_index_dir(k);
611
cache = index->cache;
612
len = PAGE_SIZE - 2;
613
614
if (len > 1) {
615
n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
616
buf[n++] = '\n';
617
buf[n] = '\0';
618
}
619
return n;
620
}
621
622
static struct kobj_attribute cache_shared_cpu_map_attr =
623
__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
624
625
/* Attributes which should always be created -- the kobject/sysfs core
626
* does this automatically via kobj_type->default_attrs. This is the
627
* minimum data required to uniquely identify a cache.
628
*/
629
static struct attribute *cache_index_default_attrs[] = {
630
&cache_type_attr.attr,
631
&cache_level_attr.attr,
632
&cache_shared_cpu_map_attr.attr,
633
NULL,
634
};
635
636
/* Attributes which should be created if the cache device node has the
637
* right properties -- see cacheinfo_create_index_opt_attrs
638
*/
639
static struct kobj_attribute *cache_index_opt_attrs[] = {
640
&cache_size_attr,
641
&cache_line_size_attr,
642
&cache_nr_sets_attr,
643
&cache_assoc_attr,
644
};
645
646
static const struct sysfs_ops cache_index_ops = {
647
.show = cache_index_show,
648
};
649
650
static struct kobj_type cache_index_type = {
651
.release = cache_index_release,
652
.sysfs_ops = &cache_index_ops,
653
.default_attrs = cache_index_default_attrs,
654
};
655
656
static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
657
{
658
const char *cache_name;
659
const char *cache_type;
660
struct cache *cache;
661
char *buf;
662
int i;
663
664
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
665
if (!buf)
666
return;
667
668
cache = dir->cache;
669
cache_name = cache->ofnode->full_name;
670
cache_type = cache_type_string(cache);
671
672
/* We don't want to create an attribute that can't provide a
673
* meaningful value. Check the return value of each optional
674
* attribute's ->show method before registering the
675
* attribute.
676
*/
677
for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
678
struct kobj_attribute *attr;
679
ssize_t rc;
680
681
attr = cache_index_opt_attrs[i];
682
683
rc = attr->show(&dir->kobj, attr, buf);
684
if (rc <= 0) {
685
pr_debug("not creating %s attribute for "
686
"%s(%s) (rc = %zd)\n",
687
attr->attr.name, cache_name,
688
cache_type, rc);
689
continue;
690
}
691
if (sysfs_create_file(&dir->kobj, &attr->attr))
692
pr_debug("could not create %s attribute for %s(%s)\n",
693
attr->attr.name, cache_name, cache_type);
694
}
695
696
kfree(buf);
697
}
698
699
static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
700
{
701
struct cache_index_dir *index_dir;
702
int rc;
703
704
index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
705
if (!index_dir)
706
goto err;
707
708
index_dir->cache = cache;
709
710
rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
711
cache_dir->kobj, "index%d", index);
712
if (rc)
713
goto err;
714
715
index_dir->next = cache_dir->index;
716
cache_dir->index = index_dir;
717
718
cacheinfo_create_index_opt_attrs(index_dir);
719
720
return;
721
err:
722
kfree(index_dir);
723
}
724
725
static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
726
{
727
struct cache_dir *cache_dir;
728
struct cache *cache;
729
int index = 0;
730
731
cache_dir = cacheinfo_create_cache_dir(cpu_id);
732
if (!cache_dir)
733
return;
734
735
cache = cache_list;
736
while (cache) {
737
cacheinfo_create_index_dir(cache, index, cache_dir);
738
index++;
739
cache = cache->next_local;
740
}
741
}
742
743
void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
744
{
745
struct cache *cache;
746
747
cache = cache_chain_instantiate(cpu_id);
748
if (!cache)
749
return;
750
751
cacheinfo_sysfs_populate(cpu_id, cache);
752
}
753
754
#ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
755
756
static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
757
{
758
struct device_node *cpu_node;
759
struct cache *cache;
760
761
cpu_node = of_get_cpu_node(cpu_id, NULL);
762
WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
763
if (!cpu_node)
764
return NULL;
765
766
cache = cache_lookup_by_node(cpu_node);
767
of_node_put(cpu_node);
768
769
return cache;
770
}
771
772
static void remove_index_dirs(struct cache_dir *cache_dir)
773
{
774
struct cache_index_dir *index;
775
776
index = cache_dir->index;
777
778
while (index) {
779
struct cache_index_dir *next;
780
781
next = index->next;
782
kobject_put(&index->kobj);
783
index = next;
784
}
785
}
786
787
static void remove_cache_dir(struct cache_dir *cache_dir)
788
{
789
remove_index_dirs(cache_dir);
790
791
kobject_put(cache_dir->kobj);
792
793
kfree(cache_dir);
794
}
795
796
static void cache_cpu_clear(struct cache *cache, int cpu)
797
{
798
while (cache) {
799
struct cache *next = cache->next_local;
800
801
WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
802
"CPU %i not accounted in %s(%s)\n",
803
cpu, cache->ofnode->full_name,
804
cache_type_string(cache));
805
806
cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
807
808
/* Release the cache object if all the cpus using it
809
* are offline */
810
if (cpumask_empty(&cache->shared_cpu_map))
811
release_cache(cache);
812
813
cache = next;
814
}
815
}
816
817
void cacheinfo_cpu_offline(unsigned int cpu_id)
818
{
819
struct cache_dir *cache_dir;
820
struct cache *cache;
821
822
/* Prevent userspace from seeing inconsistent state - remove
823
* the sysfs hierarchy first */
824
cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
825
826
/* careful, sysfs population may have failed */
827
if (cache_dir)
828
remove_cache_dir(cache_dir);
829
830
per_cpu(cache_dir_pcpu, cpu_id) = NULL;
831
832
/* clear the CPU's bit in its cache chain, possibly freeing
833
* cache objects */
834
cache = cache_lookup_by_cpu(cpu_id);
835
if (cache)
836
cache_cpu_clear(cache, cpu_id);
837
}
838
#endif /* CONFIG_HOTPLUG_CPU */
839
840