Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/cma_debug.c
26135 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* CMA DebugFS Interface
4
*
5
* Copyright (c) 2015 Sasha Levin <[email protected]>
6
*/
7
8
9
#include <linux/debugfs.h>
10
#include <linux/cma.h>
11
#include <linux/list.h>
12
#include <linux/kernel.h>
13
#include <linux/slab.h>
14
#include <linux/mm_types.h>
15
16
#include "cma.h"
17
18
struct cma_mem {
19
struct hlist_node node;
20
struct page *p;
21
unsigned long n;
22
};
23
24
static int cma_debugfs_get(void *data, u64 *val)
25
{
26
unsigned long *p = data;
27
28
*val = *p;
29
30
return 0;
31
}
32
DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
33
34
static int cma_used_get(void *data, u64 *val)
35
{
36
struct cma *cma = data;
37
38
spin_lock_irq(&cma->lock);
39
*val = cma->count - cma->available_count;
40
spin_unlock_irq(&cma->lock);
41
42
return 0;
43
}
44
DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
45
46
static int cma_maxchunk_get(void *data, u64 *val)
47
{
48
struct cma *cma = data;
49
struct cma_memrange *cmr;
50
unsigned long maxchunk = 0;
51
unsigned long start, end;
52
unsigned long bitmap_maxno;
53
int r;
54
55
spin_lock_irq(&cma->lock);
56
for (r = 0; r < cma->nranges; r++) {
57
cmr = &cma->ranges[r];
58
bitmap_maxno = cma_bitmap_maxno(cma, cmr);
59
for_each_clear_bitrange(start, end, cmr->bitmap, bitmap_maxno)
60
maxchunk = max(end - start, maxchunk);
61
}
62
spin_unlock_irq(&cma->lock);
63
*val = (u64)maxchunk << cma->order_per_bit;
64
65
return 0;
66
}
67
DEFINE_DEBUGFS_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
68
69
static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
70
{
71
spin_lock(&cma->mem_head_lock);
72
hlist_add_head(&mem->node, &cma->mem_head);
73
spin_unlock(&cma->mem_head_lock);
74
}
75
76
static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
77
{
78
struct cma_mem *mem = NULL;
79
80
spin_lock(&cma->mem_head_lock);
81
if (!hlist_empty(&cma->mem_head)) {
82
mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
83
hlist_del_init(&mem->node);
84
}
85
spin_unlock(&cma->mem_head_lock);
86
87
return mem;
88
}
89
90
static int cma_free_mem(struct cma *cma, int count)
91
{
92
struct cma_mem *mem = NULL;
93
94
while (count) {
95
mem = cma_get_entry_from_list(cma);
96
if (mem == NULL)
97
return 0;
98
99
if (mem->n <= count) {
100
cma_release(cma, mem->p, mem->n);
101
count -= mem->n;
102
kfree(mem);
103
} else if (cma->order_per_bit == 0) {
104
cma_release(cma, mem->p, count);
105
mem->p += count;
106
mem->n -= count;
107
count = 0;
108
cma_add_to_cma_mem_list(cma, mem);
109
} else {
110
pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
111
cma_add_to_cma_mem_list(cma, mem);
112
break;
113
}
114
}
115
116
return 0;
117
118
}
119
120
static int cma_free_write(void *data, u64 val)
121
{
122
int pages = val;
123
struct cma *cma = data;
124
125
return cma_free_mem(cma, pages);
126
}
127
DEFINE_DEBUGFS_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
128
129
static int cma_alloc_mem(struct cma *cma, int count)
130
{
131
struct cma_mem *mem;
132
struct page *p;
133
134
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
135
if (!mem)
136
return -ENOMEM;
137
138
p = cma_alloc(cma, count, 0, false);
139
if (!p) {
140
kfree(mem);
141
return -ENOMEM;
142
}
143
144
mem->p = p;
145
mem->n = count;
146
147
cma_add_to_cma_mem_list(cma, mem);
148
149
return 0;
150
}
151
152
static int cma_alloc_write(void *data, u64 val)
153
{
154
int pages = val;
155
struct cma *cma = data;
156
157
return cma_alloc_mem(cma, pages);
158
}
159
DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
160
161
static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
162
{
163
struct dentry *tmp, *dir, *rangedir;
164
int r;
165
char rdirname[12];
166
struct cma_memrange *cmr;
167
168
tmp = debugfs_create_dir(cma->name, root_dentry);
169
170
debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
171
debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
172
debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops);
173
debugfs_create_file("order_per_bit", 0444, tmp,
174
&cma->order_per_bit, &cma_debugfs_fops);
175
debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
176
debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
177
178
rangedir = debugfs_create_dir("ranges", tmp);
179
for (r = 0; r < cma->nranges; r++) {
180
cmr = &cma->ranges[r];
181
snprintf(rdirname, sizeof(rdirname), "%d", r);
182
dir = debugfs_create_dir(rdirname, rangedir);
183
debugfs_create_file("base_pfn", 0444, dir,
184
&cmr->base_pfn, &cma_debugfs_fops);
185
cmr->dfs_bitmap.array = (u32 *)cmr->bitmap;
186
cmr->dfs_bitmap.n_elements =
187
DIV_ROUND_UP(cma_bitmap_maxno(cma, cmr),
188
BITS_PER_BYTE * sizeof(u32));
189
debugfs_create_u32_array("bitmap", 0444, dir,
190
&cmr->dfs_bitmap);
191
}
192
193
/*
194
* Backward compatible symlinks to range 0 for base_pfn and bitmap.
195
*/
196
debugfs_create_symlink("base_pfn", tmp, "ranges/0/base_pfn");
197
debugfs_create_symlink("bitmap", tmp, "ranges/0/bitmap");
198
}
199
200
static int __init cma_debugfs_init(void)
201
{
202
struct dentry *cma_debugfs_root;
203
int i;
204
205
cma_debugfs_root = debugfs_create_dir("cma", NULL);
206
207
for (i = 0; i < cma_area_count; i++)
208
cma_debugfs_add_one(&cma_areas[i], cma_debugfs_root);
209
210
return 0;
211
}
212
late_initcall(cma_debugfs_init);
213
214