Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/bpf_memcontrol.c
121770 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Memory Controller-related BPF kfuncs and auxiliary code
4
*
5
* Author: Roman Gushchin <[email protected]>
6
*/
7
8
#include <linux/memcontrol.h>
9
#include <linux/bpf.h>
10
11
__bpf_kfunc_start_defs();
12
13
/**
14
* bpf_get_root_mem_cgroup - Returns a pointer to the root memory cgroup
15
*
16
* The function has KF_ACQUIRE semantics, even though the root memory
17
* cgroup is never destroyed after being created and doesn't require
18
* reference counting. And it's perfectly safe to pass it to
19
* bpf_put_mem_cgroup()
20
*
21
* Return: A pointer to the root memory cgroup.
22
*/
23
__bpf_kfunc struct mem_cgroup *bpf_get_root_mem_cgroup(void)
24
{
25
if (mem_cgroup_disabled())
26
return NULL;
27
28
/* css_get() is not needed */
29
return root_mem_cgroup;
30
}
31
32
/**
33
* bpf_get_mem_cgroup - Get a reference to a memory cgroup
34
* @css: pointer to the css structure
35
*
36
* It's fine to pass a css which belongs to any cgroup controller,
37
* e.g. unified hierarchy's main css.
38
*
39
* Implements KF_ACQUIRE semantics.
40
*
41
* Return: A pointer to a mem_cgroup structure after bumping
42
* the corresponding css's reference counter.
43
*/
44
__bpf_kfunc struct mem_cgroup *
45
bpf_get_mem_cgroup(struct cgroup_subsys_state *css)
46
{
47
struct mem_cgroup *memcg = NULL;
48
bool rcu_unlock = false;
49
50
if (mem_cgroup_disabled() || !root_mem_cgroup)
51
return NULL;
52
53
if (root_mem_cgroup->css.ss != css->ss) {
54
struct cgroup *cgroup = css->cgroup;
55
int ssid = root_mem_cgroup->css.ss->id;
56
57
rcu_read_lock();
58
rcu_unlock = true;
59
css = rcu_dereference_raw(cgroup->subsys[ssid]);
60
}
61
62
if (css && css_tryget(css))
63
memcg = container_of(css, struct mem_cgroup, css);
64
65
if (rcu_unlock)
66
rcu_read_unlock();
67
68
return memcg;
69
}
70
71
/**
72
* bpf_put_mem_cgroup - Put a reference to a memory cgroup
73
* @memcg: memory cgroup to release
74
*
75
* Releases a previously acquired memcg reference.
76
* Implements KF_RELEASE semantics.
77
*/
78
__bpf_kfunc void bpf_put_mem_cgroup(struct mem_cgroup *memcg)
79
{
80
css_put(&memcg->css);
81
}
82
83
/**
84
* bpf_mem_cgroup_vm_events - Read memory cgroup's vm event counter
85
* @memcg: memory cgroup
86
* @event: event id
87
*
88
* Allows to read memory cgroup event counters.
89
*
90
* Return: The current value of the corresponding events counter.
91
*/
92
__bpf_kfunc unsigned long bpf_mem_cgroup_vm_events(struct mem_cgroup *memcg,
93
enum vm_event_item event)
94
{
95
if (unlikely(!memcg_vm_event_item_valid(event)))
96
return (unsigned long)-1;
97
98
return memcg_events(memcg, event);
99
}
100
101
/**
102
* bpf_mem_cgroup_usage - Read memory cgroup's usage
103
* @memcg: memory cgroup
104
*
105
* Please, note that the root memory cgroup it special and is exempt
106
* from the memory accounting. The returned value is a sum of sub-cgroup's
107
* usages and it not reflecting the size of the root memory cgroup itself.
108
* If you need to get an approximation, you can use root level statistics:
109
* e.g. NR_FILE_PAGES + NR_ANON_MAPPED.
110
*
111
* Return: The current memory cgroup size in bytes.
112
*/
113
__bpf_kfunc unsigned long bpf_mem_cgroup_usage(struct mem_cgroup *memcg)
114
{
115
return page_counter_read(&memcg->memory) * PAGE_SIZE;
116
}
117
118
/**
119
* bpf_mem_cgroup_memory_events - Read memory cgroup's memory event value
120
* @memcg: memory cgroup
121
* @event: memory event id
122
*
123
* Return: The current value of the memory event counter.
124
*/
125
__bpf_kfunc unsigned long bpf_mem_cgroup_memory_events(struct mem_cgroup *memcg,
126
enum memcg_memory_event event)
127
{
128
if (unlikely(event >= MEMCG_NR_MEMORY_EVENTS))
129
return (unsigned long)-1;
130
131
return atomic_long_read(&memcg->memory_events[event]);
132
}
133
134
/**
135
* bpf_mem_cgroup_page_state - Read memory cgroup's page state counter
136
* @memcg: memory cgroup
137
* @idx: counter idx
138
*
139
* Allows to read memory cgroup statistics. The output is in bytes.
140
*
141
* Return: The value of the page state counter in bytes.
142
*/
143
__bpf_kfunc unsigned long bpf_mem_cgroup_page_state(struct mem_cgroup *memcg, int idx)
144
{
145
if (unlikely(!memcg_stat_item_valid(idx)))
146
return (unsigned long)-1;
147
148
return memcg_page_state_output(memcg, idx);
149
}
150
151
/**
152
* bpf_mem_cgroup_flush_stats - Flush memory cgroup's statistics
153
* @memcg: memory cgroup
154
*
155
* Propagate memory cgroup's statistics up the cgroup tree.
156
*/
157
__bpf_kfunc void bpf_mem_cgroup_flush_stats(struct mem_cgroup *memcg)
158
{
159
mem_cgroup_flush_stats(memcg);
160
}
161
162
__bpf_kfunc_end_defs();
163
164
BTF_KFUNCS_START(bpf_memcontrol_kfuncs)
165
BTF_ID_FLAGS(func, bpf_get_root_mem_cgroup, KF_ACQUIRE | KF_RET_NULL)
166
BTF_ID_FLAGS(func, bpf_get_mem_cgroup, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
167
BTF_ID_FLAGS(func, bpf_put_mem_cgroup, KF_RELEASE)
168
169
BTF_ID_FLAGS(func, bpf_mem_cgroup_vm_events)
170
BTF_ID_FLAGS(func, bpf_mem_cgroup_memory_events)
171
BTF_ID_FLAGS(func, bpf_mem_cgroup_usage)
172
BTF_ID_FLAGS(func, bpf_mem_cgroup_page_state)
173
BTF_ID_FLAGS(func, bpf_mem_cgroup_flush_stats, KF_SLEEPABLE)
174
175
BTF_KFUNCS_END(bpf_memcontrol_kfuncs)
176
177
static const struct btf_kfunc_id_set bpf_memcontrol_kfunc_set = {
178
.owner = THIS_MODULE,
179
.set = &bpf_memcontrol_kfuncs,
180
};
181
182
static int __init bpf_memcontrol_init(void)
183
{
184
int err;
185
186
err = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC,
187
&bpf_memcontrol_kfunc_set);
188
if (err)
189
pr_warn("error while registering bpf memcontrol kfuncs: %d", err);
190
191
return err;
192
}
193
late_initcall(bpf_memcontrol_init);
194
195