Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/damon/stat.c
49686 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Shows data access monitoring resutls in simple metrics.
4
*/
5
6
#define pr_fmt(fmt) "damon-stat: " fmt
7
8
#include <linux/damon.h>
9
#include <linux/init.h>
10
#include <linux/kernel.h>
11
#include <linux/module.h>
12
#include <linux/sort.h>
13
14
#ifdef MODULE_PARAM_PREFIX
15
#undef MODULE_PARAM_PREFIX
16
#endif
17
#define MODULE_PARAM_PREFIX "damon_stat."
18
19
static int damon_stat_enabled_store(
20
const char *val, const struct kernel_param *kp);
21
22
static const struct kernel_param_ops enabled_param_ops = {
23
.set = damon_stat_enabled_store,
24
.get = param_get_bool,
25
};
26
27
static bool enabled __read_mostly = IS_ENABLED(
28
CONFIG_DAMON_STAT_ENABLED_DEFAULT);
29
module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
30
MODULE_PARM_DESC(enabled, "Enable of disable DAMON_STAT");
31
32
static unsigned long estimated_memory_bandwidth __read_mostly;
33
module_param(estimated_memory_bandwidth, ulong, 0400);
34
MODULE_PARM_DESC(estimated_memory_bandwidth,
35
"Estimated memory bandwidth usage in bytes per second");
36
37
static long memory_idle_ms_percentiles[101] __read_mostly = {0,};
38
module_param_array(memory_idle_ms_percentiles, long, NULL, 0400);
39
MODULE_PARM_DESC(memory_idle_ms_percentiles,
40
"Memory idle time percentiles in milliseconds");
41
42
static unsigned long aggr_interval_us;
43
module_param(aggr_interval_us, ulong, 0400);
44
MODULE_PARM_DESC(aggr_interval_us,
45
"Current tuned aggregation interval in microseconds");
46
47
static struct damon_ctx *damon_stat_context;
48
49
static unsigned long damon_stat_last_refresh_jiffies;
50
51
static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c)
52
{
53
struct damon_target *t;
54
struct damon_region *r;
55
unsigned long access_bytes = 0;
56
57
damon_for_each_target(t, c) {
58
damon_for_each_region(r, t)
59
access_bytes += (r->ar.end - r->ar.start) *
60
r->nr_accesses;
61
}
62
estimated_memory_bandwidth = access_bytes * USEC_PER_MSEC *
63
MSEC_PER_SEC / c->attrs.aggr_interval;
64
}
65
66
static int damon_stat_idletime(const struct damon_region *r)
67
{
68
if (r->nr_accesses)
69
return -1 * (r->age + 1);
70
return r->age + 1;
71
}
72
73
static int damon_stat_cmp_regions(const void *a, const void *b)
74
{
75
const struct damon_region *ra = *(const struct damon_region **)a;
76
const struct damon_region *rb = *(const struct damon_region **)b;
77
78
return damon_stat_idletime(ra) - damon_stat_idletime(rb);
79
}
80
81
static int damon_stat_sort_regions(struct damon_ctx *c,
82
struct damon_region ***sorted_ptr, int *nr_regions_ptr,
83
unsigned long *total_sz_ptr)
84
{
85
struct damon_target *t;
86
struct damon_region *r;
87
struct damon_region **region_pointers;
88
unsigned int nr_regions = 0;
89
unsigned long total_sz = 0;
90
91
damon_for_each_target(t, c) {
92
/* there is only one target */
93
region_pointers = kmalloc_array(damon_nr_regions(t),
94
sizeof(*region_pointers), GFP_KERNEL);
95
if (!region_pointers)
96
return -ENOMEM;
97
damon_for_each_region(r, t) {
98
region_pointers[nr_regions++] = r;
99
total_sz += r->ar.end - r->ar.start;
100
}
101
}
102
sort(region_pointers, nr_regions, sizeof(*region_pointers),
103
damon_stat_cmp_regions, NULL);
104
*sorted_ptr = region_pointers;
105
*nr_regions_ptr = nr_regions;
106
*total_sz_ptr = total_sz;
107
return 0;
108
}
109
110
static void damon_stat_set_idletime_percentiles(struct damon_ctx *c)
111
{
112
struct damon_region **sorted_regions, *region;
113
int nr_regions;
114
unsigned long total_sz, accounted_bytes = 0;
115
int err, i, next_percentile = 0;
116
117
err = damon_stat_sort_regions(c, &sorted_regions, &nr_regions,
118
&total_sz);
119
if (err)
120
return;
121
for (i = 0; i < nr_regions; i++) {
122
region = sorted_regions[i];
123
accounted_bytes += region->ar.end - region->ar.start;
124
while (next_percentile <= accounted_bytes * 100 / total_sz)
125
memory_idle_ms_percentiles[next_percentile++] =
126
damon_stat_idletime(region) *
127
(long)c->attrs.aggr_interval / USEC_PER_MSEC;
128
}
129
kfree(sorted_regions);
130
}
131
132
static int damon_stat_damon_call_fn(void *data)
133
{
134
struct damon_ctx *c = data;
135
136
/* avoid unnecessarily frequent stat update */
137
if (time_before_eq(jiffies, damon_stat_last_refresh_jiffies +
138
msecs_to_jiffies(5 * MSEC_PER_SEC)))
139
return 0;
140
damon_stat_last_refresh_jiffies = jiffies;
141
142
aggr_interval_us = c->attrs.aggr_interval;
143
damon_stat_set_estimated_memory_bandwidth(c);
144
damon_stat_set_idletime_percentiles(c);
145
return 0;
146
}
147
148
static struct damon_ctx *damon_stat_build_ctx(void)
149
{
150
struct damon_ctx *ctx;
151
struct damon_attrs attrs;
152
struct damon_target *target;
153
unsigned long start = 0, end = 0;
154
155
ctx = damon_new_ctx();
156
if (!ctx)
157
return NULL;
158
attrs = (struct damon_attrs) {
159
.sample_interval = 5 * USEC_PER_MSEC,
160
.aggr_interval = 100 * USEC_PER_MSEC,
161
.ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
162
.min_nr_regions = 10,
163
.max_nr_regions = 1000,
164
};
165
/*
166
* auto-tune sampling and aggregation interval aiming 4% DAMON-observed
167
* accesses ratio, keeping sampling interval in [5ms, 10s] range.
168
*/
169
attrs.intervals_goal = (struct damon_intervals_goal) {
170
.access_bp = 400, .aggrs = 3,
171
.min_sample_us = 5000, .max_sample_us = 10000000,
172
};
173
if (damon_set_attrs(ctx, &attrs))
174
goto free_out;
175
176
/*
177
* auto-tune sampling and aggregation interval aiming 4% DAMON-observed
178
* accesses ratio, keeping sampling interval in [5ms, 10s] range.
179
*/
180
ctx->attrs.intervals_goal = (struct damon_intervals_goal) {
181
.access_bp = 400, .aggrs = 3,
182
.min_sample_us = 5000, .max_sample_us = 10000000,
183
};
184
if (damon_select_ops(ctx, DAMON_OPS_PADDR))
185
goto free_out;
186
187
target = damon_new_target();
188
if (!target)
189
goto free_out;
190
damon_add_target(ctx, target);
191
if (damon_set_region_biggest_system_ram_default(target, &start, &end,
192
ctx->min_sz_region))
193
goto free_out;
194
return ctx;
195
free_out:
196
damon_destroy_ctx(ctx);
197
return NULL;
198
}
199
200
static struct damon_call_control call_control = {
201
.fn = damon_stat_damon_call_fn,
202
.repeat = true,
203
};
204
205
static int damon_stat_start(void)
206
{
207
int err;
208
209
damon_stat_context = damon_stat_build_ctx();
210
if (!damon_stat_context)
211
return -ENOMEM;
212
err = damon_start(&damon_stat_context, 1, true);
213
if (err)
214
return err;
215
216
damon_stat_last_refresh_jiffies = jiffies;
217
call_control.data = damon_stat_context;
218
return damon_call(damon_stat_context, &call_control);
219
}
220
221
static void damon_stat_stop(void)
222
{
223
damon_stop(&damon_stat_context, 1);
224
damon_destroy_ctx(damon_stat_context);
225
}
226
227
static int damon_stat_enabled_store(
228
const char *val, const struct kernel_param *kp)
229
{
230
bool is_enabled = enabled;
231
int err;
232
233
err = kstrtobool(val, &enabled);
234
if (err)
235
return err;
236
237
if (is_enabled == enabled)
238
return 0;
239
240
if (!damon_initialized())
241
/*
242
* probably called from command line parsing (parse_args()).
243
* Cannot call damon_new_ctx(). Let damon_stat_init() handle.
244
*/
245
return 0;
246
247
if (enabled) {
248
err = damon_stat_start();
249
if (err)
250
enabled = false;
251
return err;
252
}
253
damon_stat_stop();
254
return 0;
255
}
256
257
static int __init damon_stat_init(void)
258
{
259
int err = 0;
260
261
if (!damon_initialized()) {
262
err = -ENOMEM;
263
goto out;
264
}
265
266
/* probably set via command line */
267
if (enabled)
268
err = damon_stat_start();
269
270
out:
271
if (err && enabled)
272
enabled = false;
273
return err;
274
}
275
276
module_init(damon_stat_init);
277
278