Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/damon/core.c
49639 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Data Access Monitor
4
*
5
* Author: SeongJae Park <[email protected]>
6
*/
7
8
#define pr_fmt(fmt) "damon: " fmt
9
10
#include <linux/damon.h>
11
#include <linux/delay.h>
12
#include <linux/kthread.h>
13
#include <linux/memcontrol.h>
14
#include <linux/mm.h>
15
#include <linux/psi.h>
16
#include <linux/slab.h>
17
#include <linux/string.h>
18
#include <linux/string_choices.h>
19
20
#define CREATE_TRACE_POINTS
21
#include <trace/events/damon.h>
22
23
static DEFINE_MUTEX(damon_lock);
24
static int nr_running_ctxs;
25
static bool running_exclusive_ctxs;
26
27
static DEFINE_MUTEX(damon_ops_lock);
28
static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
29
30
static struct kmem_cache *damon_region_cache __ro_after_init;
31
32
/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
33
static bool __damon_is_registered_ops(enum damon_ops_id id)
34
{
35
struct damon_operations empty_ops = {};
36
37
if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
38
return false;
39
return true;
40
}
41
42
/**
43
* damon_is_registered_ops() - Check if a given damon_operations is registered.
44
* @id: Id of the damon_operations to check if registered.
45
*
46
* Return: true if the ops is set, false otherwise.
47
*/
48
bool damon_is_registered_ops(enum damon_ops_id id)
49
{
50
bool registered;
51
52
if (id >= NR_DAMON_OPS)
53
return false;
54
mutex_lock(&damon_ops_lock);
55
registered = __damon_is_registered_ops(id);
56
mutex_unlock(&damon_ops_lock);
57
return registered;
58
}
59
60
/**
61
* damon_register_ops() - Register a monitoring operations set to DAMON.
62
* @ops: monitoring operations set to register.
63
*
64
* This function registers a monitoring operations set of valid &struct
65
* damon_operations->id so that others can find and use them later.
66
*
67
* Return: 0 on success, negative error code otherwise.
68
*/
69
int damon_register_ops(struct damon_operations *ops)
70
{
71
int err = 0;
72
73
if (ops->id >= NR_DAMON_OPS)
74
return -EINVAL;
75
76
mutex_lock(&damon_ops_lock);
77
/* Fail for already registered ops */
78
if (__damon_is_registered_ops(ops->id))
79
err = -EINVAL;
80
else
81
damon_registered_ops[ops->id] = *ops;
82
mutex_unlock(&damon_ops_lock);
83
return err;
84
}
85
86
/**
87
* damon_select_ops() - Select a monitoring operations to use with the context.
88
* @ctx: monitoring context to use the operations.
89
* @id: id of the registered monitoring operations to select.
90
*
91
* This function finds registered monitoring operations set of @id and make
92
* @ctx to use it.
93
*
94
* Return: 0 on success, negative error code otherwise.
95
*/
96
int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
97
{
98
int err = 0;
99
100
if (id >= NR_DAMON_OPS)
101
return -EINVAL;
102
103
mutex_lock(&damon_ops_lock);
104
if (!__damon_is_registered_ops(id))
105
err = -EINVAL;
106
else
107
ctx->ops = damon_registered_ops[id];
108
mutex_unlock(&damon_ops_lock);
109
return err;
110
}
111
112
/*
113
* Construct a damon_region struct
114
*
115
* Returns the pointer to the new struct if success, or NULL otherwise
116
*/
117
struct damon_region *damon_new_region(unsigned long start, unsigned long end)
118
{
119
struct damon_region *region;
120
121
region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
122
if (!region)
123
return NULL;
124
125
region->ar.start = start;
126
region->ar.end = end;
127
region->nr_accesses = 0;
128
region->nr_accesses_bp = 0;
129
INIT_LIST_HEAD(&region->list);
130
131
region->age = 0;
132
region->last_nr_accesses = 0;
133
134
return region;
135
}
136
137
void damon_add_region(struct damon_region *r, struct damon_target *t)
138
{
139
list_add_tail(&r->list, &t->regions_list);
140
t->nr_regions++;
141
}
142
143
static void damon_del_region(struct damon_region *r, struct damon_target *t)
144
{
145
list_del(&r->list);
146
t->nr_regions--;
147
}
148
149
static void damon_free_region(struct damon_region *r)
150
{
151
kmem_cache_free(damon_region_cache, r);
152
}
153
154
void damon_destroy_region(struct damon_region *r, struct damon_target *t)
155
{
156
damon_del_region(r, t);
157
damon_free_region(r);
158
}
159
160
/*
161
* Check whether a region is intersecting an address range
162
*
163
* Returns true if it is.
164
*/
165
static bool damon_intersect(struct damon_region *r,
166
struct damon_addr_range *re)
167
{
168
return !(r->ar.end <= re->start || re->end <= r->ar.start);
169
}
170
171
/*
172
* Fill holes in regions with new regions.
173
*/
174
static int damon_fill_regions_holes(struct damon_region *first,
175
struct damon_region *last, struct damon_target *t)
176
{
177
struct damon_region *r = first;
178
179
damon_for_each_region_from(r, t) {
180
struct damon_region *next, *newr;
181
182
if (r == last)
183
break;
184
next = damon_next_region(r);
185
if (r->ar.end != next->ar.start) {
186
newr = damon_new_region(r->ar.end, next->ar.start);
187
if (!newr)
188
return -ENOMEM;
189
damon_insert_region(newr, r, next, t);
190
}
191
}
192
return 0;
193
}
194
195
/*
196
* damon_set_regions() - Set regions of a target for given address ranges.
197
* @t: the given target.
198
* @ranges: array of new monitoring target ranges.
199
* @nr_ranges: length of @ranges.
200
* @min_sz_region: minimum region size.
201
*
202
* This function adds new regions to, or modify existing regions of a
203
* monitoring target to fit in specific ranges.
204
*
205
* Return: 0 if success, or negative error code otherwise.
206
*/
207
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
208
unsigned int nr_ranges, unsigned long min_sz_region)
209
{
210
struct damon_region *r, *next;
211
unsigned int i;
212
int err;
213
214
/* Remove regions which are not in the new ranges */
215
damon_for_each_region_safe(r, next, t) {
216
for (i = 0; i < nr_ranges; i++) {
217
if (damon_intersect(r, &ranges[i]))
218
break;
219
}
220
if (i == nr_ranges)
221
damon_destroy_region(r, t);
222
}
223
224
r = damon_first_region(t);
225
/* Add new regions or resize existing regions to fit in the ranges */
226
for (i = 0; i < nr_ranges; i++) {
227
struct damon_region *first = NULL, *last, *newr;
228
struct damon_addr_range *range;
229
230
range = &ranges[i];
231
/* Get the first/last regions intersecting with the range */
232
damon_for_each_region_from(r, t) {
233
if (damon_intersect(r, range)) {
234
if (!first)
235
first = r;
236
last = r;
237
}
238
if (r->ar.start >= range->end)
239
break;
240
}
241
if (!first) {
242
/* no region intersects with this range */
243
newr = damon_new_region(
244
ALIGN_DOWN(range->start,
245
min_sz_region),
246
ALIGN(range->end, min_sz_region));
247
if (!newr)
248
return -ENOMEM;
249
damon_insert_region(newr, damon_prev_region(r), r, t);
250
} else {
251
/* resize intersecting regions to fit in this range */
252
first->ar.start = ALIGN_DOWN(range->start,
253
min_sz_region);
254
last->ar.end = ALIGN(range->end, min_sz_region);
255
256
/* fill possible holes in the range */
257
err = damon_fill_regions_holes(first, last, t);
258
if (err)
259
return err;
260
}
261
}
262
return 0;
263
}
264
265
struct damos_filter *damos_new_filter(enum damos_filter_type type,
266
bool matching, bool allow)
267
{
268
struct damos_filter *filter;
269
270
filter = kmalloc(sizeof(*filter), GFP_KERNEL);
271
if (!filter)
272
return NULL;
273
filter->type = type;
274
filter->matching = matching;
275
filter->allow = allow;
276
INIT_LIST_HEAD(&filter->list);
277
return filter;
278
}
279
280
/**
281
* damos_filter_for_ops() - Return if the filter is ops-hndled one.
282
* @type: type of the filter.
283
*
284
* Return: true if the filter of @type needs to be handled by ops layer, false
285
* otherwise.
286
*/
287
bool damos_filter_for_ops(enum damos_filter_type type)
288
{
289
switch (type) {
290
case DAMOS_FILTER_TYPE_ADDR:
291
case DAMOS_FILTER_TYPE_TARGET:
292
return false;
293
default:
294
break;
295
}
296
return true;
297
}
298
299
void damos_add_filter(struct damos *s, struct damos_filter *f)
300
{
301
if (damos_filter_for_ops(f->type))
302
list_add_tail(&f->list, &s->ops_filters);
303
else
304
list_add_tail(&f->list, &s->core_filters);
305
}
306
307
static void damos_del_filter(struct damos_filter *f)
308
{
309
list_del(&f->list);
310
}
311
312
static void damos_free_filter(struct damos_filter *f)
313
{
314
kfree(f);
315
}
316
317
void damos_destroy_filter(struct damos_filter *f)
318
{
319
damos_del_filter(f);
320
damos_free_filter(f);
321
}
322
323
struct damos_quota_goal *damos_new_quota_goal(
324
enum damos_quota_goal_metric metric,
325
unsigned long target_value)
326
{
327
struct damos_quota_goal *goal;
328
329
goal = kmalloc(sizeof(*goal), GFP_KERNEL);
330
if (!goal)
331
return NULL;
332
goal->metric = metric;
333
goal->target_value = target_value;
334
INIT_LIST_HEAD(&goal->list);
335
return goal;
336
}
337
338
void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
339
{
340
list_add_tail(&g->list, &q->goals);
341
}
342
343
static void damos_del_quota_goal(struct damos_quota_goal *g)
344
{
345
list_del(&g->list);
346
}
347
348
static void damos_free_quota_goal(struct damos_quota_goal *g)
349
{
350
kfree(g);
351
}
352
353
void damos_destroy_quota_goal(struct damos_quota_goal *g)
354
{
355
damos_del_quota_goal(g);
356
damos_free_quota_goal(g);
357
}
358
359
/* initialize fields of @quota that normally API users wouldn't set */
360
static struct damos_quota *damos_quota_init(struct damos_quota *quota)
361
{
362
quota->esz = 0;
363
quota->total_charged_sz = 0;
364
quota->total_charged_ns = 0;
365
quota->charged_sz = 0;
366
quota->charged_from = 0;
367
quota->charge_target_from = NULL;
368
quota->charge_addr_from = 0;
369
quota->esz_bp = 0;
370
return quota;
371
}
372
373
struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
374
enum damos_action action,
375
unsigned long apply_interval_us,
376
struct damos_quota *quota,
377
struct damos_watermarks *wmarks,
378
int target_nid)
379
{
380
struct damos *scheme;
381
382
scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
383
if (!scheme)
384
return NULL;
385
scheme->pattern = *pattern;
386
scheme->action = action;
387
scheme->apply_interval_us = apply_interval_us;
388
/*
389
* next_apply_sis will be set when kdamond starts. While kdamond is
390
* running, it will also updated when it is added to the DAMON context,
391
* or damon_attrs are updated.
392
*/
393
scheme->next_apply_sis = 0;
394
scheme->walk_completed = false;
395
INIT_LIST_HEAD(&scheme->core_filters);
396
INIT_LIST_HEAD(&scheme->ops_filters);
397
scheme->stat = (struct damos_stat){};
398
INIT_LIST_HEAD(&scheme->list);
399
400
scheme->quota = *(damos_quota_init(quota));
401
/* quota.goals should be separately set by caller */
402
INIT_LIST_HEAD(&scheme->quota.goals);
403
404
scheme->wmarks = *wmarks;
405
scheme->wmarks.activated = true;
406
407
scheme->migrate_dests = (struct damos_migrate_dests){};
408
scheme->target_nid = target_nid;
409
410
return scheme;
411
}
412
413
static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
414
{
415
unsigned long sample_interval = ctx->attrs.sample_interval ?
416
ctx->attrs.sample_interval : 1;
417
unsigned long apply_interval = s->apply_interval_us ?
418
s->apply_interval_us : ctx->attrs.aggr_interval;
419
420
s->next_apply_sis = ctx->passed_sample_intervals +
421
apply_interval / sample_interval;
422
}
423
424
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
425
{
426
list_add_tail(&s->list, &ctx->schemes);
427
damos_set_next_apply_sis(s, ctx);
428
}
429
430
static void damon_del_scheme(struct damos *s)
431
{
432
list_del(&s->list);
433
}
434
435
static void damon_free_scheme(struct damos *s)
436
{
437
kfree(s);
438
}
439
440
void damon_destroy_scheme(struct damos *s)
441
{
442
struct damos_quota_goal *g, *g_next;
443
struct damos_filter *f, *next;
444
445
damos_for_each_quota_goal_safe(g, g_next, &s->quota)
446
damos_destroy_quota_goal(g);
447
448
damos_for_each_core_filter_safe(f, next, s)
449
damos_destroy_filter(f);
450
451
damos_for_each_ops_filter_safe(f, next, s)
452
damos_destroy_filter(f);
453
454
kfree(s->migrate_dests.node_id_arr);
455
kfree(s->migrate_dests.weight_arr);
456
damon_del_scheme(s);
457
damon_free_scheme(s);
458
}
459
460
/*
461
* Construct a damon_target struct
462
*
463
* Returns the pointer to the new struct if success, or NULL otherwise
464
*/
465
struct damon_target *damon_new_target(void)
466
{
467
struct damon_target *t;
468
469
t = kmalloc(sizeof(*t), GFP_KERNEL);
470
if (!t)
471
return NULL;
472
473
t->pid = NULL;
474
t->nr_regions = 0;
475
INIT_LIST_HEAD(&t->regions_list);
476
INIT_LIST_HEAD(&t->list);
477
t->obsolete = false;
478
479
return t;
480
}
481
482
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
483
{
484
list_add_tail(&t->list, &ctx->adaptive_targets);
485
}
486
487
bool damon_targets_empty(struct damon_ctx *ctx)
488
{
489
return list_empty(&ctx->adaptive_targets);
490
}
491
492
static void damon_del_target(struct damon_target *t)
493
{
494
list_del(&t->list);
495
}
496
497
void damon_free_target(struct damon_target *t)
498
{
499
struct damon_region *r, *next;
500
501
damon_for_each_region_safe(r, next, t)
502
damon_free_region(r);
503
kfree(t);
504
}
505
506
void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx)
507
{
508
509
if (ctx && ctx->ops.cleanup_target)
510
ctx->ops.cleanup_target(t);
511
512
damon_del_target(t);
513
damon_free_target(t);
514
}
515
516
unsigned int damon_nr_regions(struct damon_target *t)
517
{
518
return t->nr_regions;
519
}
520
521
struct damon_ctx *damon_new_ctx(void)
522
{
523
struct damon_ctx *ctx;
524
525
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
526
if (!ctx)
527
return NULL;
528
529
init_completion(&ctx->kdamond_started);
530
531
ctx->attrs.sample_interval = 5 * 1000;
532
ctx->attrs.aggr_interval = 100 * 1000;
533
ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
534
535
ctx->passed_sample_intervals = 0;
536
/* These will be set from kdamond_init_ctx() */
537
ctx->next_aggregation_sis = 0;
538
ctx->next_ops_update_sis = 0;
539
540
mutex_init(&ctx->kdamond_lock);
541
INIT_LIST_HEAD(&ctx->call_controls);
542
mutex_init(&ctx->call_controls_lock);
543
mutex_init(&ctx->walk_control_lock);
544
545
ctx->attrs.min_nr_regions = 10;
546
ctx->attrs.max_nr_regions = 1000;
547
548
ctx->addr_unit = 1;
549
ctx->min_sz_region = DAMON_MIN_REGION;
550
551
INIT_LIST_HEAD(&ctx->adaptive_targets);
552
INIT_LIST_HEAD(&ctx->schemes);
553
554
return ctx;
555
}
556
557
static void damon_destroy_targets(struct damon_ctx *ctx)
558
{
559
struct damon_target *t, *next_t;
560
561
damon_for_each_target_safe(t, next_t, ctx)
562
damon_destroy_target(t, ctx);
563
}
564
565
void damon_destroy_ctx(struct damon_ctx *ctx)
566
{
567
struct damos *s, *next_s;
568
569
damon_destroy_targets(ctx);
570
571
damon_for_each_scheme_safe(s, next_s, ctx)
572
damon_destroy_scheme(s);
573
574
kfree(ctx);
575
}
576
577
static bool damon_attrs_equals(const struct damon_attrs *attrs1,
578
const struct damon_attrs *attrs2)
579
{
580
const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal;
581
const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal;
582
583
return attrs1->sample_interval == attrs2->sample_interval &&
584
attrs1->aggr_interval == attrs2->aggr_interval &&
585
attrs1->ops_update_interval == attrs2->ops_update_interval &&
586
attrs1->min_nr_regions == attrs2->min_nr_regions &&
587
attrs1->max_nr_regions == attrs2->max_nr_regions &&
588
ig1->access_bp == ig2->access_bp &&
589
ig1->aggrs == ig2->aggrs &&
590
ig1->min_sample_us == ig2->min_sample_us &&
591
ig1->max_sample_us == ig2->max_sample_us;
592
}
593
594
static unsigned int damon_age_for_new_attrs(unsigned int age,
595
struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
596
{
597
return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
598
}
599
600
/* convert access ratio in bp (per 10,000) to nr_accesses */
601
static unsigned int damon_accesses_bp_to_nr_accesses(
602
unsigned int accesses_bp, struct damon_attrs *attrs)
603
{
604
return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
605
}
606
607
/*
608
* Convert nr_accesses to access ratio in bp (per 10,000).
609
*
610
* Callers should ensure attrs.aggr_interval is not zero, like
611
* damon_update_monitoring_results() does . Otherwise, divide-by-zero would
612
* happen.
613
*/
614
static unsigned int damon_nr_accesses_to_accesses_bp(
615
unsigned int nr_accesses, struct damon_attrs *attrs)
616
{
617
return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
618
}
619
620
static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
621
struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
622
{
623
return damon_accesses_bp_to_nr_accesses(
624
damon_nr_accesses_to_accesses_bp(
625
nr_accesses, old_attrs),
626
new_attrs);
627
}
628
629
static void damon_update_monitoring_result(struct damon_region *r,
630
struct damon_attrs *old_attrs, struct damon_attrs *new_attrs,
631
bool aggregating)
632
{
633
if (!aggregating) {
634
r->nr_accesses = damon_nr_accesses_for_new_attrs(
635
r->nr_accesses, old_attrs, new_attrs);
636
r->nr_accesses_bp = r->nr_accesses * 10000;
637
} else {
638
/*
639
* if this is called in the middle of the aggregation, reset
640
* the aggregations we made so far for this aggregation
641
* interval. In other words, make the status like
642
* kdamond_reset_aggregated() is called.
643
*/
644
r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
645
r->last_nr_accesses, old_attrs, new_attrs);
646
r->nr_accesses_bp = r->last_nr_accesses * 10000;
647
r->nr_accesses = 0;
648
}
649
r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
650
}
651
652
/*
653
* region->nr_accesses is the number of sampling intervals in the last
654
* aggregation interval that access to the region has found, and region->age is
655
* the number of aggregation intervals that its access pattern has maintained.
656
* For the reason, the real meaning of the two fields depend on current
657
* sampling interval and aggregation interval. This function updates
658
* ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
659
*/
660
static void damon_update_monitoring_results(struct damon_ctx *ctx,
661
struct damon_attrs *new_attrs, bool aggregating)
662
{
663
struct damon_attrs *old_attrs = &ctx->attrs;
664
struct damon_target *t;
665
struct damon_region *r;
666
667
/* if any interval is zero, simply forgive conversion */
668
if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
669
!new_attrs->sample_interval ||
670
!new_attrs->aggr_interval)
671
return;
672
673
damon_for_each_target(t, ctx)
674
damon_for_each_region(r, t)
675
damon_update_monitoring_result(
676
r, old_attrs, new_attrs, aggregating);
677
}
678
679
/*
680
* damon_valid_intervals_goal() - return if the intervals goal of @attrs is
681
* valid.
682
*/
683
static bool damon_valid_intervals_goal(struct damon_attrs *attrs)
684
{
685
struct damon_intervals_goal *goal = &attrs->intervals_goal;
686
687
/* tuning is disabled */
688
if (!goal->aggrs)
689
return true;
690
if (goal->min_sample_us > goal->max_sample_us)
691
return false;
692
if (attrs->sample_interval < goal->min_sample_us ||
693
goal->max_sample_us < attrs->sample_interval)
694
return false;
695
return true;
696
}
697
698
/**
699
* damon_set_attrs() - Set attributes for the monitoring.
700
* @ctx: monitoring context
701
* @attrs: monitoring attributes
702
*
703
* This function should be called while the kdamond is not running, an access
704
* check results aggregation is not ongoing (e.g., from damon_call().
705
*
706
* Every time interval is in micro-seconds.
707
*
708
* Return: 0 on success, negative error code otherwise.
709
*/
710
int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
711
{
712
unsigned long sample_interval = attrs->sample_interval ?
713
attrs->sample_interval : 1;
714
struct damos *s;
715
bool aggregating = ctx->passed_sample_intervals <
716
ctx->next_aggregation_sis;
717
718
if (!damon_valid_intervals_goal(attrs))
719
return -EINVAL;
720
721
if (attrs->min_nr_regions < 3)
722
return -EINVAL;
723
if (attrs->min_nr_regions > attrs->max_nr_regions)
724
return -EINVAL;
725
if (attrs->sample_interval > attrs->aggr_interval)
726
return -EINVAL;
727
728
/* calls from core-external doesn't set this. */
729
if (!attrs->aggr_samples)
730
attrs->aggr_samples = attrs->aggr_interval / sample_interval;
731
732
ctx->next_aggregation_sis = ctx->passed_sample_intervals +
733
attrs->aggr_interval / sample_interval;
734
ctx->next_ops_update_sis = ctx->passed_sample_intervals +
735
attrs->ops_update_interval / sample_interval;
736
737
damon_update_monitoring_results(ctx, attrs, aggregating);
738
ctx->attrs = *attrs;
739
740
damon_for_each_scheme(s, ctx)
741
damos_set_next_apply_sis(s, ctx);
742
743
return 0;
744
}
745
746
/**
747
* damon_set_schemes() - Set data access monitoring based operation schemes.
748
* @ctx: monitoring context
749
* @schemes: array of the schemes
750
* @nr_schemes: number of entries in @schemes
751
*
752
* This function should not be called while the kdamond of the context is
753
* running.
754
*/
755
void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
756
ssize_t nr_schemes)
757
{
758
struct damos *s, *next;
759
ssize_t i;
760
761
damon_for_each_scheme_safe(s, next, ctx)
762
damon_destroy_scheme(s);
763
for (i = 0; i < nr_schemes; i++)
764
damon_add_scheme(ctx, schemes[i]);
765
}
766
767
static struct damos_quota_goal *damos_nth_quota_goal(
768
int n, struct damos_quota *q)
769
{
770
struct damos_quota_goal *goal;
771
int i = 0;
772
773
damos_for_each_quota_goal(goal, q) {
774
if (i++ == n)
775
return goal;
776
}
777
return NULL;
778
}
779
780
static void damos_commit_quota_goal_union(
781
struct damos_quota_goal *dst, struct damos_quota_goal *src)
782
{
783
switch (dst->metric) {
784
case DAMOS_QUOTA_NODE_MEM_USED_BP:
785
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
786
dst->nid = src->nid;
787
break;
788
case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
789
case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
790
dst->nid = src->nid;
791
dst->memcg_id = src->memcg_id;
792
break;
793
default:
794
break;
795
}
796
}
797
798
static void damos_commit_quota_goal(
799
struct damos_quota_goal *dst, struct damos_quota_goal *src)
800
{
801
dst->metric = src->metric;
802
dst->target_value = src->target_value;
803
if (dst->metric == DAMOS_QUOTA_USER_INPUT)
804
dst->current_value = src->current_value;
805
/* keep last_psi_total as is, since it will be updated in next cycle */
806
damos_commit_quota_goal_union(dst, src);
807
}
808
809
/**
810
* damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
811
* @dst: The commit destination DAMOS quota.
812
* @src: The commit source DAMOS quota.
813
*
814
* Copies user-specified parameters for quota goals from @src to @dst. Users
815
* should use this function for quota goals-level parameters update of running
816
* DAMON contexts, instead of manual in-place updates.
817
*
818
* This function should be called from parameters-update safe context, like
819
* damon_call().
820
*/
821
int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
822
{
823
struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
824
int i = 0, j = 0;
825
826
damos_for_each_quota_goal_safe(dst_goal, next, dst) {
827
src_goal = damos_nth_quota_goal(i++, src);
828
if (src_goal)
829
damos_commit_quota_goal(dst_goal, src_goal);
830
else
831
damos_destroy_quota_goal(dst_goal);
832
}
833
damos_for_each_quota_goal_safe(src_goal, next, src) {
834
if (j++ < i)
835
continue;
836
new_goal = damos_new_quota_goal(
837
src_goal->metric, src_goal->target_value);
838
if (!new_goal)
839
return -ENOMEM;
840
damos_commit_quota_goal(new_goal, src_goal);
841
damos_add_quota_goal(dst, new_goal);
842
}
843
return 0;
844
}
845
846
static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
847
{
848
int err;
849
850
dst->reset_interval = src->reset_interval;
851
dst->ms = src->ms;
852
dst->sz = src->sz;
853
err = damos_commit_quota_goals(dst, src);
854
if (err)
855
return err;
856
dst->weight_sz = src->weight_sz;
857
dst->weight_nr_accesses = src->weight_nr_accesses;
858
dst->weight_age = src->weight_age;
859
return 0;
860
}
861
862
static struct damos_filter *damos_nth_core_filter(int n, struct damos *s)
863
{
864
struct damos_filter *filter;
865
int i = 0;
866
867
damos_for_each_core_filter(filter, s) {
868
if (i++ == n)
869
return filter;
870
}
871
return NULL;
872
}
873
874
static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s)
875
{
876
struct damos_filter *filter;
877
int i = 0;
878
879
damos_for_each_ops_filter(filter, s) {
880
if (i++ == n)
881
return filter;
882
}
883
return NULL;
884
}
885
886
static void damos_commit_filter_arg(
887
struct damos_filter *dst, struct damos_filter *src)
888
{
889
switch (dst->type) {
890
case DAMOS_FILTER_TYPE_MEMCG:
891
dst->memcg_id = src->memcg_id;
892
break;
893
case DAMOS_FILTER_TYPE_ADDR:
894
dst->addr_range = src->addr_range;
895
break;
896
case DAMOS_FILTER_TYPE_TARGET:
897
dst->target_idx = src->target_idx;
898
break;
899
case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
900
dst->sz_range = src->sz_range;
901
break;
902
default:
903
break;
904
}
905
}
906
907
static void damos_commit_filter(
908
struct damos_filter *dst, struct damos_filter *src)
909
{
910
dst->type = src->type;
911
dst->matching = src->matching;
912
dst->allow = src->allow;
913
damos_commit_filter_arg(dst, src);
914
}
915
916
static int damos_commit_core_filters(struct damos *dst, struct damos *src)
917
{
918
struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
919
int i = 0, j = 0;
920
921
damos_for_each_core_filter_safe(dst_filter, next, dst) {
922
src_filter = damos_nth_core_filter(i++, src);
923
if (src_filter)
924
damos_commit_filter(dst_filter, src_filter);
925
else
926
damos_destroy_filter(dst_filter);
927
}
928
929
damos_for_each_core_filter_safe(src_filter, next, src) {
930
if (j++ < i)
931
continue;
932
933
new_filter = damos_new_filter(
934
src_filter->type, src_filter->matching,
935
src_filter->allow);
936
if (!new_filter)
937
return -ENOMEM;
938
damos_commit_filter_arg(new_filter, src_filter);
939
damos_add_filter(dst, new_filter);
940
}
941
return 0;
942
}
943
944
static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
945
{
946
struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
947
int i = 0, j = 0;
948
949
damos_for_each_ops_filter_safe(dst_filter, next, dst) {
950
src_filter = damos_nth_ops_filter(i++, src);
951
if (src_filter)
952
damos_commit_filter(dst_filter, src_filter);
953
else
954
damos_destroy_filter(dst_filter);
955
}
956
957
damos_for_each_ops_filter_safe(src_filter, next, src) {
958
if (j++ < i)
959
continue;
960
961
new_filter = damos_new_filter(
962
src_filter->type, src_filter->matching,
963
src_filter->allow);
964
if (!new_filter)
965
return -ENOMEM;
966
damos_commit_filter_arg(new_filter, src_filter);
967
damos_add_filter(dst, new_filter);
968
}
969
return 0;
970
}
971
972
/**
973
* damos_filters_default_reject() - decide whether to reject memory that didn't
974
* match with any given filter.
975
* @filters: Given DAMOS filters of a group.
976
*/
977
static bool damos_filters_default_reject(struct list_head *filters)
978
{
979
struct damos_filter *last_filter;
980
981
if (list_empty(filters))
982
return false;
983
last_filter = list_last_entry(filters, struct damos_filter, list);
984
return last_filter->allow;
985
}
986
987
static void damos_set_filters_default_reject(struct damos *s)
988
{
989
if (!list_empty(&s->ops_filters))
990
s->core_filters_default_reject = false;
991
else
992
s->core_filters_default_reject =
993
damos_filters_default_reject(&s->core_filters);
994
s->ops_filters_default_reject =
995
damos_filters_default_reject(&s->ops_filters);
996
}
997
998
static int damos_commit_dests(struct damos_migrate_dests *dst,
999
struct damos_migrate_dests *src)
1000
{
1001
if (dst->nr_dests != src->nr_dests) {
1002
kfree(dst->node_id_arr);
1003
kfree(dst->weight_arr);
1004
1005
dst->node_id_arr = kmalloc_array(src->nr_dests,
1006
sizeof(*dst->node_id_arr), GFP_KERNEL);
1007
if (!dst->node_id_arr) {
1008
dst->weight_arr = NULL;
1009
return -ENOMEM;
1010
}
1011
1012
dst->weight_arr = kmalloc_array(src->nr_dests,
1013
sizeof(*dst->weight_arr), GFP_KERNEL);
1014
if (!dst->weight_arr) {
1015
/* ->node_id_arr will be freed by scheme destruction */
1016
return -ENOMEM;
1017
}
1018
}
1019
1020
dst->nr_dests = src->nr_dests;
1021
for (int i = 0; i < src->nr_dests; i++) {
1022
dst->node_id_arr[i] = src->node_id_arr[i];
1023
dst->weight_arr[i] = src->weight_arr[i];
1024
}
1025
1026
return 0;
1027
}
1028
1029
static int damos_commit_filters(struct damos *dst, struct damos *src)
1030
{
1031
int err;
1032
1033
err = damos_commit_core_filters(dst, src);
1034
if (err)
1035
return err;
1036
err = damos_commit_ops_filters(dst, src);
1037
if (err)
1038
return err;
1039
damos_set_filters_default_reject(dst);
1040
return 0;
1041
}
1042
1043
static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
1044
{
1045
struct damos *s;
1046
int i = 0;
1047
1048
damon_for_each_scheme(s, ctx) {
1049
if (i++ == n)
1050
return s;
1051
}
1052
return NULL;
1053
}
1054
1055
static int damos_commit(struct damos *dst, struct damos *src)
1056
{
1057
int err;
1058
1059
dst->pattern = src->pattern;
1060
dst->action = src->action;
1061
dst->apply_interval_us = src->apply_interval_us;
1062
1063
err = damos_commit_quota(&dst->quota, &src->quota);
1064
if (err)
1065
return err;
1066
1067
dst->wmarks = src->wmarks;
1068
dst->target_nid = src->target_nid;
1069
1070
err = damos_commit_dests(&dst->migrate_dests, &src->migrate_dests);
1071
if (err)
1072
return err;
1073
1074
err = damos_commit_filters(dst, src);
1075
return err;
1076
}
1077
1078
static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
1079
{
1080
struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
1081
int i = 0, j = 0, err;
1082
1083
damon_for_each_scheme_safe(dst_scheme, next, dst) {
1084
src_scheme = damon_nth_scheme(i++, src);
1085
if (src_scheme) {
1086
err = damos_commit(dst_scheme, src_scheme);
1087
if (err)
1088
return err;
1089
} else {
1090
damon_destroy_scheme(dst_scheme);
1091
}
1092
}
1093
1094
damon_for_each_scheme_safe(src_scheme, next, src) {
1095
if (j++ < i)
1096
continue;
1097
new_scheme = damon_new_scheme(&src_scheme->pattern,
1098
src_scheme->action,
1099
src_scheme->apply_interval_us,
1100
&src_scheme->quota, &src_scheme->wmarks,
1101
NUMA_NO_NODE);
1102
if (!new_scheme)
1103
return -ENOMEM;
1104
err = damos_commit(new_scheme, src_scheme);
1105
if (err) {
1106
damon_destroy_scheme(new_scheme);
1107
return err;
1108
}
1109
damon_add_scheme(dst, new_scheme);
1110
}
1111
return 0;
1112
}
1113
1114
static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
1115
{
1116
struct damon_target *t;
1117
int i = 0;
1118
1119
damon_for_each_target(t, ctx) {
1120
if (i++ == n)
1121
return t;
1122
}
1123
return NULL;
1124
}
1125
1126
/*
1127
* The caller should ensure the regions of @src are
1128
* 1. valid (end >= src) and
1129
* 2. sorted by starting address.
1130
*
1131
* If @src has no region, @dst keeps current regions.
1132
*/
1133
static int damon_commit_target_regions(struct damon_target *dst,
1134
struct damon_target *src, unsigned long src_min_sz_region)
1135
{
1136
struct damon_region *src_region;
1137
struct damon_addr_range *ranges;
1138
int i = 0, err;
1139
1140
damon_for_each_region(src_region, src)
1141
i++;
1142
if (!i)
1143
return 0;
1144
1145
ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1146
if (!ranges)
1147
return -ENOMEM;
1148
i = 0;
1149
damon_for_each_region(src_region, src)
1150
ranges[i++] = src_region->ar;
1151
err = damon_set_regions(dst, ranges, i, src_min_sz_region);
1152
kfree(ranges);
1153
return err;
1154
}
1155
1156
static int damon_commit_target(
1157
struct damon_target *dst, bool dst_has_pid,
1158
struct damon_target *src, bool src_has_pid,
1159
unsigned long src_min_sz_region)
1160
{
1161
int err;
1162
1163
err = damon_commit_target_regions(dst, src, src_min_sz_region);
1164
if (err)
1165
return err;
1166
if (dst_has_pid)
1167
put_pid(dst->pid);
1168
if (src_has_pid)
1169
get_pid(src->pid);
1170
dst->pid = src->pid;
1171
return 0;
1172
}
1173
1174
static int damon_commit_targets(
1175
struct damon_ctx *dst, struct damon_ctx *src)
1176
{
1177
struct damon_target *dst_target, *next, *src_target, *new_target;
1178
int i = 0, j = 0, err;
1179
1180
damon_for_each_target_safe(dst_target, next, dst) {
1181
src_target = damon_nth_target(i++, src);
1182
/*
1183
* If src target is obsolete, do not commit the parameters to
1184
* the dst target, and further remove the dst target.
1185
*/
1186
if (src_target && !src_target->obsolete) {
1187
err = damon_commit_target(
1188
dst_target, damon_target_has_pid(dst),
1189
src_target, damon_target_has_pid(src),
1190
src->min_sz_region);
1191
if (err)
1192
return err;
1193
} else {
1194
struct damos *s;
1195
1196
damon_destroy_target(dst_target, dst);
1197
damon_for_each_scheme(s, dst) {
1198
if (s->quota.charge_target_from == dst_target) {
1199
s->quota.charge_target_from = NULL;
1200
s->quota.charge_addr_from = 0;
1201
}
1202
}
1203
}
1204
}
1205
1206
damon_for_each_target_safe(src_target, next, src) {
1207
if (j++ < i)
1208
continue;
1209
/* target to remove has no matching dst */
1210
if (src_target->obsolete)
1211
return -EINVAL;
1212
new_target = damon_new_target();
1213
if (!new_target)
1214
return -ENOMEM;
1215
err = damon_commit_target(new_target, false,
1216
src_target, damon_target_has_pid(src),
1217
src->min_sz_region);
1218
if (err) {
1219
damon_destroy_target(new_target, NULL);
1220
return err;
1221
}
1222
damon_add_target(dst, new_target);
1223
}
1224
return 0;
1225
}
1226
1227
/**
1228
* damon_commit_ctx() - Commit parameters of a DAMON context to another.
1229
* @dst: The commit destination DAMON context.
1230
* @src: The commit source DAMON context.
1231
*
1232
* This function copies user-specified parameters from @src to @dst and update
1233
* the internal status and results accordingly. Users should use this function
1234
* for context-level parameters update of running context, instead of manual
1235
* in-place updates.
1236
*
1237
* This function should be called from parameters-update safe context, like
1238
* damon_call().
1239
*/
1240
int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
1241
{
1242
int err;
1243
1244
err = damon_commit_schemes(dst, src);
1245
if (err)
1246
return err;
1247
err = damon_commit_targets(dst, src);
1248
if (err)
1249
return err;
1250
/*
1251
* schemes and targets should be updated first, since
1252
* 1. damon_set_attrs() updates monitoring results of targets and
1253
* next_apply_sis of schemes, and
1254
* 2. ops update should be done after pid handling is done (target
1255
* committing require putting pids).
1256
*/
1257
if (!damon_attrs_equals(&dst->attrs, &src->attrs)) {
1258
err = damon_set_attrs(dst, &src->attrs);
1259
if (err)
1260
return err;
1261
}
1262
dst->ops = src->ops;
1263
dst->addr_unit = src->addr_unit;
1264
dst->min_sz_region = src->min_sz_region;
1265
1266
return 0;
1267
}
1268
1269
/**
1270
* damon_nr_running_ctxs() - Return number of currently running contexts.
1271
*/
1272
int damon_nr_running_ctxs(void)
1273
{
1274
int nr_ctxs;
1275
1276
mutex_lock(&damon_lock);
1277
nr_ctxs = nr_running_ctxs;
1278
mutex_unlock(&damon_lock);
1279
1280
return nr_ctxs;
1281
}
1282
1283
/* Returns the size upper limit for each monitoring region */
1284
static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1285
{
1286
struct damon_target *t;
1287
struct damon_region *r;
1288
unsigned long sz = 0;
1289
1290
damon_for_each_target(t, ctx) {
1291
damon_for_each_region(r, t)
1292
sz += damon_sz_region(r);
1293
}
1294
1295
if (ctx->attrs.min_nr_regions)
1296
sz /= ctx->attrs.min_nr_regions;
1297
if (sz < ctx->min_sz_region)
1298
sz = ctx->min_sz_region;
1299
1300
return sz;
1301
}
1302
1303
static int kdamond_fn(void *data);
1304
1305
/*
1306
* __damon_start() - Starts monitoring with given context.
1307
* @ctx: monitoring context
1308
*
1309
* This function should be called while damon_lock is hold.
1310
*
1311
* Return: 0 on success, negative error code otherwise.
1312
*/
1313
static int __damon_start(struct damon_ctx *ctx)
1314
{
1315
int err = -EBUSY;
1316
1317
mutex_lock(&ctx->kdamond_lock);
1318
if (!ctx->kdamond) {
1319
err = 0;
1320
reinit_completion(&ctx->kdamond_started);
1321
ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1322
nr_running_ctxs);
1323
if (IS_ERR(ctx->kdamond)) {
1324
err = PTR_ERR(ctx->kdamond);
1325
ctx->kdamond = NULL;
1326
} else {
1327
wait_for_completion(&ctx->kdamond_started);
1328
}
1329
}
1330
mutex_unlock(&ctx->kdamond_lock);
1331
1332
return err;
1333
}
1334
1335
/**
1336
* damon_start() - Starts the monitorings for a given group of contexts.
1337
* @ctxs: an array of the pointers for contexts to start monitoring
1338
* @nr_ctxs: size of @ctxs
1339
* @exclusive: exclusiveness of this contexts group
1340
*
1341
* This function starts a group of monitoring threads for a group of monitoring
1342
* contexts. One thread per each context is created and run in parallel. The
1343
* caller should handle synchronization between the threads by itself. If
1344
* @exclusive is true and a group of threads that created by other
1345
* 'damon_start()' call is currently running, this function does nothing but
1346
* returns -EBUSY.
1347
*
1348
* Return: 0 on success, negative error code otherwise.
1349
*/
1350
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1351
{
1352
int i;
1353
int err = 0;
1354
1355
mutex_lock(&damon_lock);
1356
if ((exclusive && nr_running_ctxs) ||
1357
(!exclusive && running_exclusive_ctxs)) {
1358
mutex_unlock(&damon_lock);
1359
return -EBUSY;
1360
}
1361
1362
for (i = 0; i < nr_ctxs; i++) {
1363
err = __damon_start(ctxs[i]);
1364
if (err)
1365
break;
1366
nr_running_ctxs++;
1367
}
1368
if (exclusive && nr_running_ctxs)
1369
running_exclusive_ctxs = true;
1370
mutex_unlock(&damon_lock);
1371
1372
return err;
1373
}
1374
1375
/*
1376
* __damon_stop() - Stops monitoring of a given context.
1377
* @ctx: monitoring context
1378
*
1379
* Return: 0 on success, negative error code otherwise.
1380
*/
1381
static int __damon_stop(struct damon_ctx *ctx)
1382
{
1383
struct task_struct *tsk;
1384
1385
mutex_lock(&ctx->kdamond_lock);
1386
tsk = ctx->kdamond;
1387
if (tsk) {
1388
get_task_struct(tsk);
1389
mutex_unlock(&ctx->kdamond_lock);
1390
kthread_stop_put(tsk);
1391
return 0;
1392
}
1393
mutex_unlock(&ctx->kdamond_lock);
1394
1395
return -EPERM;
1396
}
1397
1398
/**
1399
* damon_stop() - Stops the monitorings for a given group of contexts.
1400
* @ctxs: an array of the pointers for contexts to stop monitoring
1401
* @nr_ctxs: size of @ctxs
1402
*
1403
* Return: 0 on success, negative error code otherwise.
1404
*/
1405
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1406
{
1407
int i, err = 0;
1408
1409
for (i = 0; i < nr_ctxs; i++) {
1410
/* nr_running_ctxs is decremented in kdamond_fn */
1411
err = __damon_stop(ctxs[i]);
1412
if (err)
1413
break;
1414
}
1415
return err;
1416
}
1417
1418
/**
1419
* damon_is_running() - Returns if a given DAMON context is running.
1420
* @ctx: The DAMON context to see if running.
1421
*
1422
* Return: true if @ctx is running, false otherwise.
1423
*/
1424
bool damon_is_running(struct damon_ctx *ctx)
1425
{
1426
bool running;
1427
1428
mutex_lock(&ctx->kdamond_lock);
1429
running = ctx->kdamond != NULL;
1430
mutex_unlock(&ctx->kdamond_lock);
1431
return running;
1432
}
1433
1434
/*
1435
* damon_call_handle_inactive_ctx() - handle DAMON call request that added to
1436
* an inactive context.
1437
* @ctx: The inactive DAMON context.
1438
* @control: Control variable of the call request.
1439
*
1440
* This function is called in a case that @control is added to @ctx but @ctx is
1441
* not running (inactive). See if @ctx handled @control or not, and cleanup
1442
* @control if it was not handled.
1443
*
1444
* Returns 0 if @control was handled by @ctx, negative error code otherwise.
1445
*/
1446
static int damon_call_handle_inactive_ctx(
1447
struct damon_ctx *ctx, struct damon_call_control *control)
1448
{
1449
struct damon_call_control *c;
1450
1451
mutex_lock(&ctx->call_controls_lock);
1452
list_for_each_entry(c, &ctx->call_controls, list) {
1453
if (c == control) {
1454
list_del(&control->list);
1455
mutex_unlock(&ctx->call_controls_lock);
1456
return -EINVAL;
1457
}
1458
}
1459
mutex_unlock(&ctx->call_controls_lock);
1460
return 0;
1461
}
1462
1463
/**
1464
* damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1465
* @ctx: DAMON context to call the function for.
1466
* @control: Control variable of the call request.
1467
*
1468
* Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1469
* argument data that respectively passed via &damon_call_control->fn and
1470
* &damon_call_control->data of @control. If &damon_call_control->repeat of
1471
* @control is unset, further wait until the kdamond finishes handling of the
1472
* request. Otherwise, return as soon as the request is made.
1473
*
1474
* The kdamond executes the function with the argument in the main loop, just
1475
* after a sampling of the iteration is finished. The function can hence
1476
* safely access the internal data of the &struct damon_ctx without additional
1477
* synchronization. The return value of the function will be saved in
1478
* &damon_call_control->return_code.
1479
*
1480
* Return: 0 on success, negative error code otherwise.
1481
*/
1482
int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1483
{
1484
if (!control->repeat)
1485
init_completion(&control->completion);
1486
control->canceled = false;
1487
INIT_LIST_HEAD(&control->list);
1488
1489
mutex_lock(&ctx->call_controls_lock);
1490
list_add_tail(&control->list, &ctx->call_controls);
1491
mutex_unlock(&ctx->call_controls_lock);
1492
if (!damon_is_running(ctx))
1493
return damon_call_handle_inactive_ctx(ctx, control);
1494
if (control->repeat)
1495
return 0;
1496
wait_for_completion(&control->completion);
1497
if (control->canceled)
1498
return -ECANCELED;
1499
return 0;
1500
}
1501
1502
/**
1503
* damos_walk() - Invoke a given functions while DAMOS walk regions.
1504
* @ctx: DAMON context to call the functions for.
1505
* @control: Control variable of the walk request.
1506
*
1507
* Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1508
* that the kdamond will apply DAMOS action to, and wait until the kdamond
1509
* finishes handling of the request.
1510
*
1511
* The kdamond executes the given function in the main loop, for each region
1512
* just after it applied any DAMOS actions of @ctx to it. The invocation is
1513
* made only within one &damos->apply_interval_us since damos_walk()
1514
* invocation, for each scheme. The given callback function can hence safely
1515
* access the internal data of &struct damon_ctx and &struct damon_region that
1516
* each of the scheme will apply the action for next interval, without
1517
* additional synchronizations against the kdamond. If every scheme of @ctx
1518
* passed at least one &damos->apply_interval_us, kdamond marks the request as
1519
* completed so that damos_walk() can wakeup and return.
1520
*
1521
* Return: 0 on success, negative error code otherwise.
1522
*/
1523
int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1524
{
1525
init_completion(&control->completion);
1526
control->canceled = false;
1527
mutex_lock(&ctx->walk_control_lock);
1528
if (ctx->walk_control) {
1529
mutex_unlock(&ctx->walk_control_lock);
1530
return -EBUSY;
1531
}
1532
ctx->walk_control = control;
1533
mutex_unlock(&ctx->walk_control_lock);
1534
if (!damon_is_running(ctx))
1535
return -EINVAL;
1536
wait_for_completion(&control->completion);
1537
if (control->canceled)
1538
return -ECANCELED;
1539
return 0;
1540
}
1541
1542
/*
1543
* Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing
1544
* the problem being propagated.
1545
*/
1546
static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r)
1547
{
1548
if (r->nr_accesses_bp == r->nr_accesses * 10000)
1549
return;
1550
WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n",
1551
r->nr_accesses_bp, r->nr_accesses);
1552
r->nr_accesses_bp = r->nr_accesses * 10000;
1553
}
1554
1555
/*
1556
* Reset the aggregated monitoring results ('nr_accesses' of each region).
1557
*/
1558
static void kdamond_reset_aggregated(struct damon_ctx *c)
1559
{
1560
struct damon_target *t;
1561
unsigned int ti = 0; /* target's index */
1562
1563
damon_for_each_target(t, c) {
1564
struct damon_region *r;
1565
1566
damon_for_each_region(r, t) {
1567
trace_damon_aggregated(ti, r, damon_nr_regions(t));
1568
damon_warn_fix_nr_accesses_corruption(r);
1569
r->last_nr_accesses = r->nr_accesses;
1570
r->nr_accesses = 0;
1571
}
1572
ti++;
1573
}
1574
}
1575
1576
static unsigned long damon_get_intervals_score(struct damon_ctx *c)
1577
{
1578
struct damon_target *t;
1579
struct damon_region *r;
1580
unsigned long sz_region, max_access_events = 0, access_events = 0;
1581
unsigned long target_access_events;
1582
unsigned long goal_bp = c->attrs.intervals_goal.access_bp;
1583
1584
damon_for_each_target(t, c) {
1585
damon_for_each_region(r, t) {
1586
sz_region = damon_sz_region(r);
1587
max_access_events += sz_region * c->attrs.aggr_samples;
1588
access_events += sz_region * r->nr_accesses;
1589
}
1590
}
1591
target_access_events = max_access_events * goal_bp / 10000;
1592
target_access_events = target_access_events ? : 1;
1593
return access_events * 10000 / target_access_events;
1594
}
1595
1596
static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1597
unsigned long score);
1598
1599
static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c)
1600
{
1601
unsigned long score_bp, adaptation_bp;
1602
1603
score_bp = damon_get_intervals_score(c);
1604
adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) /
1605
10000;
1606
/*
1607
* adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of
1608
* the intervals by rescaling [1,10,000] to [5000, 10,000].
1609
*/
1610
if (adaptation_bp <= 10000)
1611
adaptation_bp = 5000 + adaptation_bp / 2;
1612
return adaptation_bp;
1613
}
1614
1615
static void kdamond_tune_intervals(struct damon_ctx *c)
1616
{
1617
unsigned long adaptation_bp;
1618
struct damon_attrs new_attrs;
1619
struct damon_intervals_goal *goal;
1620
1621
adaptation_bp = damon_get_intervals_adaptation_bp(c);
1622
if (adaptation_bp == 10000)
1623
return;
1624
1625
new_attrs = c->attrs;
1626
goal = &c->attrs.intervals_goal;
1627
new_attrs.sample_interval = min(goal->max_sample_us,
1628
c->attrs.sample_interval * adaptation_bp / 10000);
1629
new_attrs.sample_interval = max(goal->min_sample_us,
1630
new_attrs.sample_interval);
1631
new_attrs.aggr_interval = new_attrs.sample_interval *
1632
c->attrs.aggr_samples;
1633
trace_damon_monitor_intervals_tune(new_attrs.sample_interval);
1634
damon_set_attrs(c, &new_attrs);
1635
}
1636
1637
static void damon_split_region_at(struct damon_target *t,
1638
struct damon_region *r, unsigned long sz_r);
1639
1640
static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1641
{
1642
unsigned long sz;
1643
unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1644
1645
sz = damon_sz_region(r);
1646
return s->pattern.min_sz_region <= sz &&
1647
sz <= s->pattern.max_sz_region &&
1648
s->pattern.min_nr_accesses <= nr_accesses &&
1649
nr_accesses <= s->pattern.max_nr_accesses &&
1650
s->pattern.min_age_region <= r->age &&
1651
r->age <= s->pattern.max_age_region;
1652
}
1653
1654
static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1655
struct damon_region *r, struct damos *s)
1656
{
1657
bool ret = __damos_valid_target(r, s);
1658
1659
if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1660
return ret;
1661
1662
return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1663
}
1664
1665
/*
1666
* damos_skip_charged_region() - Check if the given region or starting part of
1667
* it is already charged for the DAMOS quota.
1668
* @t: The target of the region.
1669
* @rp: The pointer to the region.
1670
* @s: The scheme to be applied.
1671
* @min_sz_region: minimum region size.
1672
*
1673
* If a quota of a scheme has exceeded in a quota charge window, the scheme's
1674
* action would applied to only a part of the target access pattern fulfilling
1675
* regions. To avoid applying the scheme action to only already applied
1676
* regions, DAMON skips applying the scheme action to the regions that charged
1677
* in the previous charge window.
1678
*
1679
* This function checks if a given region should be skipped or not for the
1680
* reason. If only the starting part of the region has previously charged,
1681
* this function splits the region into two so that the second one covers the
1682
* area that not charged in the previous charge widnow and saves the second
1683
* region in *rp and returns false, so that the caller can apply DAMON action
1684
* to the second one.
1685
*
1686
* Return: true if the region should be entirely skipped, false otherwise.
1687
*/
1688
static bool damos_skip_charged_region(struct damon_target *t,
1689
struct damon_region **rp, struct damos *s, unsigned long min_sz_region)
1690
{
1691
struct damon_region *r = *rp;
1692
struct damos_quota *quota = &s->quota;
1693
unsigned long sz_to_skip;
1694
1695
/* Skip previously charged regions */
1696
if (quota->charge_target_from) {
1697
if (t != quota->charge_target_from)
1698
return true;
1699
if (r == damon_last_region(t)) {
1700
quota->charge_target_from = NULL;
1701
quota->charge_addr_from = 0;
1702
return true;
1703
}
1704
if (quota->charge_addr_from &&
1705
r->ar.end <= quota->charge_addr_from)
1706
return true;
1707
1708
if (quota->charge_addr_from && r->ar.start <
1709
quota->charge_addr_from) {
1710
sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1711
r->ar.start, min_sz_region);
1712
if (!sz_to_skip) {
1713
if (damon_sz_region(r) <= min_sz_region)
1714
return true;
1715
sz_to_skip = min_sz_region;
1716
}
1717
damon_split_region_at(t, r, sz_to_skip);
1718
r = damon_next_region(r);
1719
*rp = r;
1720
}
1721
quota->charge_target_from = NULL;
1722
quota->charge_addr_from = 0;
1723
}
1724
return false;
1725
}
1726
1727
static void damos_update_stat(struct damos *s,
1728
unsigned long sz_tried, unsigned long sz_applied,
1729
unsigned long sz_ops_filter_passed)
1730
{
1731
s->stat.nr_tried++;
1732
s->stat.sz_tried += sz_tried;
1733
if (sz_applied)
1734
s->stat.nr_applied++;
1735
s->stat.sz_applied += sz_applied;
1736
s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1737
}
1738
1739
static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1740
struct damon_region *r, struct damos_filter *filter,
1741
unsigned long min_sz_region)
1742
{
1743
bool matched = false;
1744
struct damon_target *ti;
1745
int target_idx = 0;
1746
unsigned long start, end;
1747
1748
switch (filter->type) {
1749
case DAMOS_FILTER_TYPE_TARGET:
1750
damon_for_each_target(ti, ctx) {
1751
if (ti == t)
1752
break;
1753
target_idx++;
1754
}
1755
matched = target_idx == filter->target_idx;
1756
break;
1757
case DAMOS_FILTER_TYPE_ADDR:
1758
start = ALIGN_DOWN(filter->addr_range.start, min_sz_region);
1759
end = ALIGN_DOWN(filter->addr_range.end, min_sz_region);
1760
1761
/* inside the range */
1762
if (start <= r->ar.start && r->ar.end <= end) {
1763
matched = true;
1764
break;
1765
}
1766
/* outside of the range */
1767
if (r->ar.end <= start || end <= r->ar.start) {
1768
matched = false;
1769
break;
1770
}
1771
/* start before the range and overlap */
1772
if (r->ar.start < start) {
1773
damon_split_region_at(t, r, start - r->ar.start);
1774
matched = false;
1775
break;
1776
}
1777
/* start inside the range */
1778
damon_split_region_at(t, r, end - r->ar.start);
1779
matched = true;
1780
break;
1781
default:
1782
return false;
1783
}
1784
1785
return matched == filter->matching;
1786
}
1787
1788
static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1789
struct damon_region *r, struct damos *s)
1790
{
1791
struct damos_filter *filter;
1792
1793
s->core_filters_allowed = false;
1794
damos_for_each_core_filter(filter, s) {
1795
if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) {
1796
if (filter->allow)
1797
s->core_filters_allowed = true;
1798
return !filter->allow;
1799
}
1800
}
1801
return s->core_filters_default_reject;
1802
}
1803
1804
/*
1805
* damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1806
* @ctx: The context of &damon_ctx->walk_control.
1807
* @t: The monitoring target of @r that @s will be applied.
1808
* @r: The region of @t that @s will be applied.
1809
* @s: The scheme of @ctx that will be applied to @r.
1810
*
1811
* This function is called from kdamond whenever it asked the operation set to
1812
* apply a DAMOS scheme action to a region. If a DAMOS walk request is
1813
* installed by damos_walk() and not yet uninstalled, invoke it.
1814
*/
1815
static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1816
struct damon_region *r, struct damos *s,
1817
unsigned long sz_filter_passed)
1818
{
1819
struct damos_walk_control *control;
1820
1821
if (s->walk_completed)
1822
return;
1823
1824
control = ctx->walk_control;
1825
if (!control)
1826
return;
1827
1828
control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
1829
}
1830
1831
/*
1832
* damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1833
* @ctx: The context of &damon_ctx->walk_control.
1834
* @s: A scheme of @ctx that all walks are now done.
1835
*
1836
* This function is called when kdamond finished applying the action of a DAMOS
1837
* scheme to all regions that eligible for the given &damos->apply_interval_us.
1838
* If every scheme of @ctx including @s now finished walking for at least one
1839
* &damos->apply_interval_us, this function makrs the handling of the given
1840
* DAMOS walk request is done, so that damos_walk() can wake up and return.
1841
*/
1842
static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
1843
{
1844
struct damos *siter;
1845
struct damos_walk_control *control;
1846
1847
control = ctx->walk_control;
1848
if (!control)
1849
return;
1850
1851
s->walk_completed = true;
1852
/* if all schemes completed, signal completion to walker */
1853
damon_for_each_scheme(siter, ctx) {
1854
if (!siter->walk_completed)
1855
return;
1856
}
1857
damon_for_each_scheme(siter, ctx)
1858
siter->walk_completed = false;
1859
1860
complete(&control->completion);
1861
ctx->walk_control = NULL;
1862
}
1863
1864
/*
1865
* damos_walk_cancel() - Cancel the current DAMOS walk request.
1866
* @ctx: The context of &damon_ctx->walk_control.
1867
*
1868
* This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
1869
* walk is requested but there is no DAMOS scheme to walk for, or the kdamond
1870
* is already out of the main loop and therefore gonna be terminated, and hence
1871
* cannot continue the walks. This function therefore marks the walk request
1872
* as canceled, so that damos_walk() can wake up and return.
1873
*/
1874
static void damos_walk_cancel(struct damon_ctx *ctx)
1875
{
1876
struct damos_walk_control *control;
1877
1878
mutex_lock(&ctx->walk_control_lock);
1879
control = ctx->walk_control;
1880
mutex_unlock(&ctx->walk_control_lock);
1881
1882
if (!control)
1883
return;
1884
control->canceled = true;
1885
complete(&control->completion);
1886
mutex_lock(&ctx->walk_control_lock);
1887
ctx->walk_control = NULL;
1888
mutex_unlock(&ctx->walk_control_lock);
1889
}
1890
1891
static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1892
struct damon_region *r, struct damos *s)
1893
{
1894
struct damos_quota *quota = &s->quota;
1895
unsigned long sz = damon_sz_region(r);
1896
struct timespec64 begin, end;
1897
unsigned long sz_applied = 0;
1898
unsigned long sz_ops_filter_passed = 0;
1899
/*
1900
* We plan to support multiple context per kdamond, as DAMON sysfs
1901
* implies with 'nr_contexts' file. Nevertheless, only single context
1902
* per kdamond is supported for now. So, we can simply use '0' context
1903
* index here.
1904
*/
1905
unsigned int cidx = 0;
1906
struct damos *siter; /* schemes iterator */
1907
unsigned int sidx = 0;
1908
struct damon_target *titer; /* targets iterator */
1909
unsigned int tidx = 0;
1910
bool do_trace = false;
1911
1912
/* get indices for trace_damos_before_apply() */
1913
if (trace_damos_before_apply_enabled()) {
1914
damon_for_each_scheme(siter, c) {
1915
if (siter == s)
1916
break;
1917
sidx++;
1918
}
1919
damon_for_each_target(titer, c) {
1920
if (titer == t)
1921
break;
1922
tidx++;
1923
}
1924
do_trace = true;
1925
}
1926
1927
if (c->ops.apply_scheme) {
1928
if (quota->esz && quota->charged_sz + sz > quota->esz) {
1929
sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1930
c->min_sz_region);
1931
if (!sz)
1932
goto update_stat;
1933
damon_split_region_at(t, r, sz);
1934
}
1935
if (damos_filter_out(c, t, r, s))
1936
return;
1937
ktime_get_coarse_ts64(&begin);
1938
trace_damos_before_apply(cidx, sidx, tidx, r,
1939
damon_nr_regions(t), do_trace);
1940
sz_applied = c->ops.apply_scheme(c, t, r, s,
1941
&sz_ops_filter_passed);
1942
damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
1943
ktime_get_coarse_ts64(&end);
1944
quota->total_charged_ns += timespec64_to_ns(&end) -
1945
timespec64_to_ns(&begin);
1946
quota->charged_sz += sz;
1947
if (quota->esz && quota->charged_sz >= quota->esz) {
1948
quota->charge_target_from = t;
1949
quota->charge_addr_from = r->ar.end + 1;
1950
}
1951
}
1952
if (s->action != DAMOS_STAT)
1953
r->age = 0;
1954
1955
update_stat:
1956
damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
1957
}
1958
1959
static void damon_do_apply_schemes(struct damon_ctx *c,
1960
struct damon_target *t,
1961
struct damon_region *r)
1962
{
1963
struct damos *s;
1964
1965
damon_for_each_scheme(s, c) {
1966
struct damos_quota *quota = &s->quota;
1967
1968
if (c->passed_sample_intervals < s->next_apply_sis)
1969
continue;
1970
1971
if (!s->wmarks.activated)
1972
continue;
1973
1974
/* Check the quota */
1975
if (quota->esz && quota->charged_sz >= quota->esz)
1976
continue;
1977
1978
if (damos_skip_charged_region(t, &r, s, c->min_sz_region))
1979
continue;
1980
1981
if (!damos_valid_target(c, t, r, s))
1982
continue;
1983
1984
damos_apply_scheme(c, t, r, s);
1985
}
1986
}
1987
1988
/*
1989
* damon_feed_loop_next_input() - get next input to achieve a target score.
1990
* @last_input The last input.
1991
* @score Current score that made with @last_input.
1992
*
1993
* Calculate next input to achieve the target score, based on the last input
1994
* and current score. Assuming the input and the score are positively
1995
* proportional, calculate how much compensation should be added to or
1996
* subtracted from the last input as a proportion of the last input. Avoid
1997
* next input always being zero by setting it non-zero always. In short form
1998
* (assuming support of float and signed calculations), the algorithm is as
1999
* below.
2000
*
2001
* next_input = max(last_input * ((goal - current) / goal + 1), 1)
2002
*
2003
* For simple implementation, we assume the target score is always 10,000. The
2004
* caller should adjust @score for this.
2005
*
2006
* Returns next input that assumed to achieve the target score.
2007
*/
2008
static unsigned long damon_feed_loop_next_input(unsigned long last_input,
2009
unsigned long score)
2010
{
2011
const unsigned long goal = 10000;
2012
/* Set minimum input as 10000 to avoid compensation be zero */
2013
const unsigned long min_input = 10000;
2014
unsigned long score_goal_diff, compensation;
2015
bool over_achieving = score > goal;
2016
2017
if (score == goal)
2018
return last_input;
2019
if (score >= goal * 2)
2020
return min_input;
2021
2022
if (over_achieving)
2023
score_goal_diff = score - goal;
2024
else
2025
score_goal_diff = goal - score;
2026
2027
if (last_input < ULONG_MAX / score_goal_diff)
2028
compensation = last_input * score_goal_diff / goal;
2029
else
2030
compensation = last_input / goal * score_goal_diff;
2031
2032
if (over_achieving)
2033
return max(last_input - compensation, min_input);
2034
if (last_input < ULONG_MAX - compensation)
2035
return last_input + compensation;
2036
return ULONG_MAX;
2037
}
2038
2039
#ifdef CONFIG_PSI
2040
2041
static u64 damos_get_some_mem_psi_total(void)
2042
{
2043
if (static_branch_likely(&psi_disabled))
2044
return 0;
2045
return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
2046
NSEC_PER_USEC);
2047
}
2048
2049
#else /* CONFIG_PSI */
2050
2051
static inline u64 damos_get_some_mem_psi_total(void)
2052
{
2053
return 0;
2054
};
2055
2056
#endif /* CONFIG_PSI */
2057
2058
#ifdef CONFIG_NUMA
2059
static __kernel_ulong_t damos_get_node_mem_bp(
2060
struct damos_quota_goal *goal)
2061
{
2062
struct sysinfo i;
2063
__kernel_ulong_t numerator;
2064
2065
si_meminfo_node(&i, goal->nid);
2066
if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP)
2067
numerator = i.totalram - i.freeram;
2068
else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */
2069
numerator = i.freeram;
2070
return numerator * 10000 / i.totalram;
2071
}
2072
2073
static unsigned long damos_get_node_memcg_used_bp(
2074
struct damos_quota_goal *goal)
2075
{
2076
struct mem_cgroup *memcg;
2077
struct lruvec *lruvec;
2078
unsigned long used_pages, numerator;
2079
struct sysinfo i;
2080
2081
rcu_read_lock();
2082
memcg = mem_cgroup_from_id(goal->memcg_id);
2083
if (!memcg || !mem_cgroup_tryget(memcg)) {
2084
rcu_read_unlock();
2085
if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
2086
return 0;
2087
else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
2088
return 10000;
2089
}
2090
rcu_read_unlock();
2091
2092
mem_cgroup_flush_stats(memcg);
2093
lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
2094
used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON);
2095
used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON);
2096
used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE);
2097
used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE);
2098
2099
mem_cgroup_put(memcg);
2100
2101
si_meminfo_node(&i, goal->nid);
2102
if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
2103
numerator = used_pages;
2104
else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
2105
numerator = i.totalram - used_pages;
2106
return numerator * 10000 / i.totalram;
2107
}
2108
#else
2109
static __kernel_ulong_t damos_get_node_mem_bp(
2110
struct damos_quota_goal *goal)
2111
{
2112
return 0;
2113
}
2114
2115
static unsigned long damos_get_node_memcg_used_bp(
2116
struct damos_quota_goal *goal)
2117
{
2118
return 0;
2119
}
2120
#endif
2121
2122
2123
static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
2124
{
2125
u64 now_psi_total;
2126
2127
switch (goal->metric) {
2128
case DAMOS_QUOTA_USER_INPUT:
2129
/* User should already set goal->current_value */
2130
break;
2131
case DAMOS_QUOTA_SOME_MEM_PSI_US:
2132
now_psi_total = damos_get_some_mem_psi_total();
2133
goal->current_value = now_psi_total - goal->last_psi_total;
2134
goal->last_psi_total = now_psi_total;
2135
break;
2136
case DAMOS_QUOTA_NODE_MEM_USED_BP:
2137
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
2138
goal->current_value = damos_get_node_mem_bp(goal);
2139
break;
2140
case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
2141
case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
2142
goal->current_value = damos_get_node_memcg_used_bp(goal);
2143
break;
2144
default:
2145
break;
2146
}
2147
}
2148
2149
/* Return the highest score since it makes schemes least aggressive */
2150
static unsigned long damos_quota_score(struct damos_quota *quota)
2151
{
2152
struct damos_quota_goal *goal;
2153
unsigned long highest_score = 0;
2154
2155
damos_for_each_quota_goal(goal, quota) {
2156
damos_set_quota_goal_current_value(goal);
2157
highest_score = max(highest_score,
2158
goal->current_value * 10000 /
2159
goal->target_value);
2160
}
2161
2162
return highest_score;
2163
}
2164
2165
/*
2166
* Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
2167
*/
2168
static void damos_set_effective_quota(struct damos_quota *quota)
2169
{
2170
unsigned long throughput;
2171
unsigned long esz = ULONG_MAX;
2172
2173
if (!quota->ms && list_empty(&quota->goals)) {
2174
quota->esz = quota->sz;
2175
return;
2176
}
2177
2178
if (!list_empty(&quota->goals)) {
2179
unsigned long score = damos_quota_score(quota);
2180
2181
quota->esz_bp = damon_feed_loop_next_input(
2182
max(quota->esz_bp, 10000UL),
2183
score);
2184
esz = quota->esz_bp / 10000;
2185
}
2186
2187
if (quota->ms) {
2188
if (quota->total_charged_ns)
2189
throughput = mult_frac(quota->total_charged_sz, 1000000,
2190
quota->total_charged_ns);
2191
else
2192
throughput = PAGE_SIZE * 1024;
2193
esz = min(throughput * quota->ms, esz);
2194
}
2195
2196
if (quota->sz && quota->sz < esz)
2197
esz = quota->sz;
2198
2199
quota->esz = esz;
2200
}
2201
2202
static void damos_trace_esz(struct damon_ctx *c, struct damos *s,
2203
struct damos_quota *quota)
2204
{
2205
unsigned int cidx = 0, sidx = 0;
2206
struct damos *siter;
2207
2208
damon_for_each_scheme(siter, c) {
2209
if (siter == s)
2210
break;
2211
sidx++;
2212
}
2213
trace_damos_esz(cidx, sidx, quota->esz);
2214
}
2215
2216
static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
2217
{
2218
struct damos_quota *quota = &s->quota;
2219
struct damon_target *t;
2220
struct damon_region *r;
2221
unsigned long cumulated_sz, cached_esz;
2222
unsigned int score, max_score = 0;
2223
2224
if (!quota->ms && !quota->sz && list_empty(&quota->goals))
2225
return;
2226
2227
/* First charge window */
2228
if (!quota->total_charged_sz && !quota->charged_from) {
2229
quota->charged_from = jiffies;
2230
damos_set_effective_quota(quota);
2231
}
2232
2233
/* New charge window starts */
2234
if (time_after_eq(jiffies, quota->charged_from +
2235
msecs_to_jiffies(quota->reset_interval))) {
2236
if (quota->esz && quota->charged_sz >= quota->esz)
2237
s->stat.qt_exceeds++;
2238
quota->total_charged_sz += quota->charged_sz;
2239
quota->charged_from = jiffies;
2240
quota->charged_sz = 0;
2241
if (trace_damos_esz_enabled())
2242
cached_esz = quota->esz;
2243
damos_set_effective_quota(quota);
2244
if (trace_damos_esz_enabled() && quota->esz != cached_esz)
2245
damos_trace_esz(c, s, quota);
2246
}
2247
2248
if (!c->ops.get_scheme_score)
2249
return;
2250
2251
/* Fill up the score histogram */
2252
memset(c->regions_score_histogram, 0,
2253
sizeof(*c->regions_score_histogram) *
2254
(DAMOS_MAX_SCORE + 1));
2255
damon_for_each_target(t, c) {
2256
damon_for_each_region(r, t) {
2257
if (!__damos_valid_target(r, s))
2258
continue;
2259
score = c->ops.get_scheme_score(c, t, r, s);
2260
c->regions_score_histogram[score] +=
2261
damon_sz_region(r);
2262
if (score > max_score)
2263
max_score = score;
2264
}
2265
}
2266
2267
/* Set the min score limit */
2268
for (cumulated_sz = 0, score = max_score; ; score--) {
2269
cumulated_sz += c->regions_score_histogram[score];
2270
if (cumulated_sz >= quota->esz || !score)
2271
break;
2272
}
2273
quota->min_score = score;
2274
}
2275
2276
static void kdamond_apply_schemes(struct damon_ctx *c)
2277
{
2278
struct damon_target *t;
2279
struct damon_region *r, *next_r;
2280
struct damos *s;
2281
unsigned long sample_interval = c->attrs.sample_interval ?
2282
c->attrs.sample_interval : 1;
2283
bool has_schemes_to_apply = false;
2284
2285
damon_for_each_scheme(s, c) {
2286
if (c->passed_sample_intervals < s->next_apply_sis)
2287
continue;
2288
2289
if (!s->wmarks.activated)
2290
continue;
2291
2292
has_schemes_to_apply = true;
2293
2294
damos_adjust_quota(c, s);
2295
}
2296
2297
if (!has_schemes_to_apply)
2298
return;
2299
2300
mutex_lock(&c->walk_control_lock);
2301
damon_for_each_target(t, c) {
2302
damon_for_each_region_safe(r, next_r, t)
2303
damon_do_apply_schemes(c, t, r);
2304
}
2305
2306
damon_for_each_scheme(s, c) {
2307
if (c->passed_sample_intervals < s->next_apply_sis)
2308
continue;
2309
damos_walk_complete(c, s);
2310
s->next_apply_sis = c->passed_sample_intervals +
2311
(s->apply_interval_us ? s->apply_interval_us :
2312
c->attrs.aggr_interval) / sample_interval;
2313
s->last_applied = NULL;
2314
}
2315
mutex_unlock(&c->walk_control_lock);
2316
}
2317
2318
/*
2319
* Merge two adjacent regions into one region
2320
*/
2321
static void damon_merge_two_regions(struct damon_target *t,
2322
struct damon_region *l, struct damon_region *r)
2323
{
2324
unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
2325
2326
l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
2327
(sz_l + sz_r);
2328
l->nr_accesses_bp = l->nr_accesses * 10000;
2329
l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
2330
l->ar.end = r->ar.end;
2331
damon_destroy_region(r, t);
2332
}
2333
2334
/*
2335
* Merge adjacent regions having similar access frequencies
2336
*
2337
* t target affected by this merge operation
2338
* thres '->nr_accesses' diff threshold for the merge
2339
* sz_limit size upper limit of each region
2340
*/
2341
static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
2342
unsigned long sz_limit)
2343
{
2344
struct damon_region *r, *prev = NULL, *next;
2345
2346
damon_for_each_region_safe(r, next, t) {
2347
if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
2348
r->age = 0;
2349
else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0))
2350
r->age = 0;
2351
else
2352
r->age++;
2353
2354
if (prev && prev->ar.end == r->ar.start &&
2355
abs(prev->nr_accesses - r->nr_accesses) <= thres &&
2356
damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
2357
damon_merge_two_regions(t, prev, r);
2358
else
2359
prev = r;
2360
}
2361
}
2362
2363
/*
2364
* Merge adjacent regions having similar access frequencies
2365
*
2366
* threshold '->nr_accesses' diff threshold for the merge
2367
* sz_limit size upper limit of each region
2368
*
2369
* This function merges monitoring target regions which are adjacent and their
2370
* access frequencies are similar. This is for minimizing the monitoring
2371
* overhead under the dynamically changeable access pattern. If a merge was
2372
* unnecessarily made, later 'kdamond_split_regions()' will revert it.
2373
*
2374
* The total number of regions could be higher than the user-defined limit,
2375
* max_nr_regions for some cases. For example, the user can update
2376
* max_nr_regions to a number that lower than the current number of regions
2377
* while DAMON is running. For such a case, repeat merging until the limit is
2378
* met while increasing @threshold up to possible maximum level.
2379
*/
2380
static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
2381
unsigned long sz_limit)
2382
{
2383
struct damon_target *t;
2384
unsigned int nr_regions;
2385
unsigned int max_thres;
2386
2387
max_thres = c->attrs.aggr_interval /
2388
(c->attrs.sample_interval ? c->attrs.sample_interval : 1);
2389
do {
2390
nr_regions = 0;
2391
damon_for_each_target(t, c) {
2392
damon_merge_regions_of(t, threshold, sz_limit);
2393
nr_regions += damon_nr_regions(t);
2394
}
2395
threshold = max(1, threshold * 2);
2396
} while (nr_regions > c->attrs.max_nr_regions &&
2397
threshold / 2 < max_thres);
2398
}
2399
2400
/*
2401
* Split a region in two
2402
*
2403
* r the region to be split
2404
* sz_r size of the first sub-region that will be made
2405
*/
2406
static void damon_split_region_at(struct damon_target *t,
2407
struct damon_region *r, unsigned long sz_r)
2408
{
2409
struct damon_region *new;
2410
2411
new = damon_new_region(r->ar.start + sz_r, r->ar.end);
2412
if (!new)
2413
return;
2414
2415
r->ar.end = new->ar.start;
2416
2417
new->age = r->age;
2418
new->last_nr_accesses = r->last_nr_accesses;
2419
new->nr_accesses_bp = r->nr_accesses_bp;
2420
new->nr_accesses = r->nr_accesses;
2421
2422
damon_insert_region(new, r, damon_next_region(r), t);
2423
}
2424
2425
/* Split every region in the given target into 'nr_subs' regions */
2426
static void damon_split_regions_of(struct damon_target *t, int nr_subs,
2427
unsigned long min_sz_region)
2428
{
2429
struct damon_region *r, *next;
2430
unsigned long sz_region, sz_sub = 0;
2431
int i;
2432
2433
damon_for_each_region_safe(r, next, t) {
2434
sz_region = damon_sz_region(r);
2435
2436
for (i = 0; i < nr_subs - 1 &&
2437
sz_region > 2 * min_sz_region; i++) {
2438
/*
2439
* Randomly select size of left sub-region to be at
2440
* least 10 percent and at most 90% of original region
2441
*/
2442
sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
2443
sz_region / 10, min_sz_region);
2444
/* Do not allow blank region */
2445
if (sz_sub == 0 || sz_sub >= sz_region)
2446
continue;
2447
2448
damon_split_region_at(t, r, sz_sub);
2449
sz_region = sz_sub;
2450
}
2451
}
2452
}
2453
2454
/*
2455
* Split every target region into randomly-sized small regions
2456
*
2457
* This function splits every target region into random-sized small regions if
2458
* current total number of the regions is equal or smaller than half of the
2459
* user-specified maximum number of regions. This is for maximizing the
2460
* monitoring accuracy under the dynamically changeable access patterns. If a
2461
* split was unnecessarily made, later 'kdamond_merge_regions()' will revert
2462
* it.
2463
*/
2464
static void kdamond_split_regions(struct damon_ctx *ctx)
2465
{
2466
struct damon_target *t;
2467
unsigned int nr_regions = 0;
2468
static unsigned int last_nr_regions;
2469
int nr_subregions = 2;
2470
2471
damon_for_each_target(t, ctx)
2472
nr_regions += damon_nr_regions(t);
2473
2474
if (nr_regions > ctx->attrs.max_nr_regions / 2)
2475
return;
2476
2477
/* Maybe the middle of the region has different access frequency */
2478
if (last_nr_regions == nr_regions &&
2479
nr_regions < ctx->attrs.max_nr_regions / 3)
2480
nr_subregions = 3;
2481
2482
damon_for_each_target(t, ctx)
2483
damon_split_regions_of(t, nr_subregions, ctx->min_sz_region);
2484
2485
last_nr_regions = nr_regions;
2486
}
2487
2488
/*
2489
* Check whether current monitoring should be stopped
2490
*
2491
* The monitoring is stopped when either the user requested to stop, or all
2492
* monitoring targets are invalid.
2493
*
2494
* Returns true if need to stop current monitoring.
2495
*/
2496
static bool kdamond_need_stop(struct damon_ctx *ctx)
2497
{
2498
struct damon_target *t;
2499
2500
if (kthread_should_stop())
2501
return true;
2502
2503
if (!ctx->ops.target_valid)
2504
return false;
2505
2506
damon_for_each_target(t, ctx) {
2507
if (ctx->ops.target_valid(t))
2508
return false;
2509
}
2510
2511
return true;
2512
}
2513
2514
static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2515
unsigned long *metric_value)
2516
{
2517
switch (metric) {
2518
case DAMOS_WMARK_FREE_MEM_RATE:
2519
*metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2520
totalram_pages();
2521
return 0;
2522
default:
2523
break;
2524
}
2525
return -EINVAL;
2526
}
2527
2528
/*
2529
* Returns zero if the scheme is active. Else, returns time to wait for next
2530
* watermark check in micro-seconds.
2531
*/
2532
static unsigned long damos_wmark_wait_us(struct damos *scheme)
2533
{
2534
unsigned long metric;
2535
2536
if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2537
return 0;
2538
2539
/* higher than high watermark or lower than low watermark */
2540
if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2541
if (scheme->wmarks.activated)
2542
pr_debug("deactivate a scheme (%d) for %s wmark\n",
2543
scheme->action,
2544
str_high_low(metric > scheme->wmarks.high));
2545
scheme->wmarks.activated = false;
2546
return scheme->wmarks.interval;
2547
}
2548
2549
/* inactive and higher than middle watermark */
2550
if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2551
!scheme->wmarks.activated)
2552
return scheme->wmarks.interval;
2553
2554
if (!scheme->wmarks.activated)
2555
pr_debug("activate a scheme (%d)\n", scheme->action);
2556
scheme->wmarks.activated = true;
2557
return 0;
2558
}
2559
2560
static void kdamond_usleep(unsigned long usecs)
2561
{
2562
if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2563
schedule_timeout_idle(usecs_to_jiffies(usecs));
2564
else
2565
usleep_range_idle(usecs, usecs + 1);
2566
}
2567
2568
/*
2569
* kdamond_call() - handle damon_call_control objects.
2570
* @ctx: The &struct damon_ctx of the kdamond.
2571
* @cancel: Whether to cancel the invocation of the function.
2572
*
2573
* If there are &struct damon_call_control requests that registered via
2574
* &damon_call() on @ctx, do or cancel the invocation of the function depending
2575
* on @cancel. @cancel is set when the kdamond is already out of the main loop
2576
* and therefore will be terminated.
2577
*/
2578
static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2579
{
2580
struct damon_call_control *control;
2581
LIST_HEAD(repeat_controls);
2582
int ret = 0;
2583
2584
while (true) {
2585
mutex_lock(&ctx->call_controls_lock);
2586
control = list_first_entry_or_null(&ctx->call_controls,
2587
struct damon_call_control, list);
2588
mutex_unlock(&ctx->call_controls_lock);
2589
if (!control)
2590
break;
2591
if (cancel) {
2592
control->canceled = true;
2593
} else {
2594
ret = control->fn(control->data);
2595
control->return_code = ret;
2596
}
2597
mutex_lock(&ctx->call_controls_lock);
2598
list_del(&control->list);
2599
mutex_unlock(&ctx->call_controls_lock);
2600
if (!control->repeat) {
2601
complete(&control->completion);
2602
} else if (control->canceled && control->dealloc_on_cancel) {
2603
kfree(control);
2604
continue;
2605
} else {
2606
list_add(&control->list, &repeat_controls);
2607
}
2608
}
2609
control = list_first_entry_or_null(&repeat_controls,
2610
struct damon_call_control, list);
2611
if (!control || cancel)
2612
return;
2613
mutex_lock(&ctx->call_controls_lock);
2614
list_add_tail(&control->list, &ctx->call_controls);
2615
mutex_unlock(&ctx->call_controls_lock);
2616
}
2617
2618
/* Returns negative error code if it's not activated but should return */
2619
static int kdamond_wait_activation(struct damon_ctx *ctx)
2620
{
2621
struct damos *s;
2622
unsigned long wait_time;
2623
unsigned long min_wait_time = 0;
2624
bool init_wait_time = false;
2625
2626
while (!kdamond_need_stop(ctx)) {
2627
damon_for_each_scheme(s, ctx) {
2628
wait_time = damos_wmark_wait_us(s);
2629
if (!init_wait_time || wait_time < min_wait_time) {
2630
init_wait_time = true;
2631
min_wait_time = wait_time;
2632
}
2633
}
2634
if (!min_wait_time)
2635
return 0;
2636
2637
kdamond_usleep(min_wait_time);
2638
2639
kdamond_call(ctx, false);
2640
damos_walk_cancel(ctx);
2641
}
2642
return -EBUSY;
2643
}
2644
2645
static void kdamond_init_ctx(struct damon_ctx *ctx)
2646
{
2647
unsigned long sample_interval = ctx->attrs.sample_interval ?
2648
ctx->attrs.sample_interval : 1;
2649
unsigned long apply_interval;
2650
struct damos *scheme;
2651
2652
ctx->passed_sample_intervals = 0;
2653
ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2654
ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2655
sample_interval;
2656
ctx->next_intervals_tune_sis = ctx->next_aggregation_sis *
2657
ctx->attrs.intervals_goal.aggrs;
2658
2659
damon_for_each_scheme(scheme, ctx) {
2660
apply_interval = scheme->apply_interval_us ?
2661
scheme->apply_interval_us : ctx->attrs.aggr_interval;
2662
scheme->next_apply_sis = apply_interval / sample_interval;
2663
damos_set_filters_default_reject(scheme);
2664
}
2665
}
2666
2667
/*
2668
* The monitoring daemon that runs as a kernel thread
2669
*/
2670
static int kdamond_fn(void *data)
2671
{
2672
struct damon_ctx *ctx = data;
2673
struct damon_target *t;
2674
struct damon_region *r, *next;
2675
unsigned int max_nr_accesses = 0;
2676
unsigned long sz_limit = 0;
2677
2678
pr_debug("kdamond (%d) starts\n", current->pid);
2679
2680
complete(&ctx->kdamond_started);
2681
kdamond_init_ctx(ctx);
2682
2683
if (ctx->ops.init)
2684
ctx->ops.init(ctx);
2685
ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2686
sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2687
if (!ctx->regions_score_histogram)
2688
goto done;
2689
2690
sz_limit = damon_region_sz_limit(ctx);
2691
2692
while (!kdamond_need_stop(ctx)) {
2693
/*
2694
* ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2695
* be changed from kdamond_call(). Read the values here, and
2696
* use those for this iteration. That is, damon_set_attrs()
2697
* updated new values are respected from next iteration.
2698
*/
2699
unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2700
unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2701
unsigned long sample_interval = ctx->attrs.sample_interval;
2702
2703
if (kdamond_wait_activation(ctx))
2704
break;
2705
2706
if (ctx->ops.prepare_access_checks)
2707
ctx->ops.prepare_access_checks(ctx);
2708
2709
kdamond_usleep(sample_interval);
2710
ctx->passed_sample_intervals++;
2711
2712
if (ctx->ops.check_accesses)
2713
max_nr_accesses = ctx->ops.check_accesses(ctx);
2714
2715
if (ctx->passed_sample_intervals >= next_aggregation_sis)
2716
kdamond_merge_regions(ctx,
2717
max_nr_accesses / 10,
2718
sz_limit);
2719
2720
/*
2721
* do kdamond_call() and kdamond_apply_schemes() after
2722
* kdamond_merge_regions() if possible, to reduce overhead
2723
*/
2724
kdamond_call(ctx, false);
2725
if (!list_empty(&ctx->schemes))
2726
kdamond_apply_schemes(ctx);
2727
else
2728
damos_walk_cancel(ctx);
2729
2730
sample_interval = ctx->attrs.sample_interval ?
2731
ctx->attrs.sample_interval : 1;
2732
if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2733
if (ctx->attrs.intervals_goal.aggrs &&
2734
ctx->passed_sample_intervals >=
2735
ctx->next_intervals_tune_sis) {
2736
/*
2737
* ctx->next_aggregation_sis might be updated
2738
* from kdamond_call(). In the case,
2739
* damon_set_attrs() which will be called from
2740
* kdamond_tune_interval() may wrongly think
2741
* this is in the middle of the current
2742
* aggregation, and make aggregation
2743
* information reset for all regions. Then,
2744
* following kdamond_reset_aggregated() call
2745
* will make the region information invalid,
2746
* particularly for ->nr_accesses_bp.
2747
*
2748
* Reset ->next_aggregation_sis to avoid that.
2749
* It will anyway correctly updated after this
2750
* if caluse.
2751
*/
2752
ctx->next_aggregation_sis =
2753
next_aggregation_sis;
2754
ctx->next_intervals_tune_sis +=
2755
ctx->attrs.aggr_samples *
2756
ctx->attrs.intervals_goal.aggrs;
2757
kdamond_tune_intervals(ctx);
2758
sample_interval = ctx->attrs.sample_interval ?
2759
ctx->attrs.sample_interval : 1;
2760
2761
}
2762
ctx->next_aggregation_sis = next_aggregation_sis +
2763
ctx->attrs.aggr_interval / sample_interval;
2764
2765
kdamond_reset_aggregated(ctx);
2766
kdamond_split_regions(ctx);
2767
}
2768
2769
if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2770
ctx->next_ops_update_sis = next_ops_update_sis +
2771
ctx->attrs.ops_update_interval /
2772
sample_interval;
2773
if (ctx->ops.update)
2774
ctx->ops.update(ctx);
2775
sz_limit = damon_region_sz_limit(ctx);
2776
}
2777
}
2778
done:
2779
damon_for_each_target(t, ctx) {
2780
damon_for_each_region_safe(r, next, t)
2781
damon_destroy_region(r, t);
2782
}
2783
2784
if (ctx->ops.cleanup)
2785
ctx->ops.cleanup(ctx);
2786
kfree(ctx->regions_score_histogram);
2787
kdamond_call(ctx, true);
2788
2789
pr_debug("kdamond (%d) finishes\n", current->pid);
2790
mutex_lock(&ctx->kdamond_lock);
2791
ctx->kdamond = NULL;
2792
mutex_unlock(&ctx->kdamond_lock);
2793
2794
damos_walk_cancel(ctx);
2795
2796
mutex_lock(&damon_lock);
2797
nr_running_ctxs--;
2798
if (!nr_running_ctxs && running_exclusive_ctxs)
2799
running_exclusive_ctxs = false;
2800
mutex_unlock(&damon_lock);
2801
2802
damon_destroy_targets(ctx);
2803
return 0;
2804
}
2805
2806
/*
2807
* struct damon_system_ram_region - System RAM resource address region of
2808
* [@start, @end).
2809
* @start: Start address of the region (inclusive).
2810
* @end: End address of the region (exclusive).
2811
*/
2812
struct damon_system_ram_region {
2813
unsigned long start;
2814
unsigned long end;
2815
};
2816
2817
static int walk_system_ram(struct resource *res, void *arg)
2818
{
2819
struct damon_system_ram_region *a = arg;
2820
2821
if (a->end - a->start < resource_size(res)) {
2822
a->start = res->start;
2823
a->end = res->end;
2824
}
2825
return 0;
2826
}
2827
2828
/*
2829
* Find biggest 'System RAM' resource and store its start and end address in
2830
* @start and @end, respectively. If no System RAM is found, returns false.
2831
*/
2832
static bool damon_find_biggest_system_ram(unsigned long *start,
2833
unsigned long *end)
2834
2835
{
2836
struct damon_system_ram_region arg = {};
2837
2838
walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2839
if (arg.end <= arg.start)
2840
return false;
2841
2842
*start = arg.start;
2843
*end = arg.end;
2844
return true;
2845
}
2846
2847
/**
2848
* damon_set_region_biggest_system_ram_default() - Set the region of the given
2849
* monitoring target as requested, or biggest 'System RAM'.
2850
* @t: The monitoring target to set the region.
2851
* @start: The pointer to the start address of the region.
2852
* @end: The pointer to the end address of the region.
2853
* @min_sz_region: Minimum region size.
2854
*
2855
* This function sets the region of @t as requested by @start and @end. If the
2856
* values of @start and @end are zero, however, this function finds the biggest
2857
* 'System RAM' resource and sets the region to cover the resource. In the
2858
* latter case, this function saves the start and end addresses of the resource
2859
* in @start and @end, respectively.
2860
*
2861
* Return: 0 on success, negative error code otherwise.
2862
*/
2863
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2864
unsigned long *start, unsigned long *end,
2865
unsigned long min_sz_region)
2866
{
2867
struct damon_addr_range addr_range;
2868
2869
if (*start > *end)
2870
return -EINVAL;
2871
2872
if (!*start && !*end &&
2873
!damon_find_biggest_system_ram(start, end))
2874
return -EINVAL;
2875
2876
addr_range.start = *start;
2877
addr_range.end = *end;
2878
return damon_set_regions(t, &addr_range, 1, min_sz_region);
2879
}
2880
2881
/*
2882
* damon_moving_sum() - Calculate an inferred moving sum value.
2883
* @mvsum: Inferred sum of the last @len_window values.
2884
* @nomvsum: Non-moving sum of the last discrete @len_window window values.
2885
* @len_window: The number of last values to take care of.
2886
* @new_value: New value that will be added to the pseudo moving sum.
2887
*
2888
* Moving sum (moving average * window size) is good for handling noise, but
2889
* the cost of keeping past values can be high for arbitrary window size. This
2890
* function implements a lightweight pseudo moving sum function that doesn't
2891
* keep the past window values.
2892
*
2893
* It simply assumes there was no noise in the past, and get the no-noise
2894
* assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
2895
* non-moving sum of the last window. For example, if @len_window is 10 and we
2896
* have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2897
* values. Hence, this function simply drops @nomvsum / @len_window from
2898
* given @mvsum and add @new_value.
2899
*
2900
* For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2901
* the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
2902
* calculating next moving sum with a new value, we should drop 0 from 50 and
2903
* add the new value. However, this function assumes it got value 5 for each
2904
* of the last ten times. Based on the assumption, when the next value is
2905
* measured, it drops the assumed past value, 5 from the current sum, and add
2906
* the new value to get the updated pseduo-moving average.
2907
*
2908
* This means the value could have errors, but the errors will be disappeared
2909
* for every @len_window aligned calls. For example, if @len_window is 10, the
2910
* pseudo moving sum with 11th value to 19th value would have an error. But
2911
* the sum with 20th value will not have the error.
2912
*
2913
* Return: Pseudo-moving average after getting the @new_value.
2914
*/
2915
static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2916
unsigned int len_window, unsigned int new_value)
2917
{
2918
return mvsum - nomvsum / len_window + new_value;
2919
}
2920
2921
/**
2922
* damon_update_region_access_rate() - Update the access rate of a region.
2923
* @r: The DAMON region to update for its access check result.
2924
* @accessed: Whether the region has accessed during last sampling interval.
2925
* @attrs: The damon_attrs of the DAMON context.
2926
*
2927
* Update the access rate of a region with the region's last sampling interval
2928
* access check result.
2929
*
2930
* Usually this will be called by &damon_operations->check_accesses callback.
2931
*/
2932
void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2933
struct damon_attrs *attrs)
2934
{
2935
unsigned int len_window = 1;
2936
2937
/*
2938
* sample_interval can be zero, but cannot be larger than
2939
* aggr_interval, owing to validation of damon_set_attrs().
2940
*/
2941
if (attrs->sample_interval)
2942
len_window = damon_max_nr_accesses(attrs);
2943
r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2944
r->last_nr_accesses * 10000, len_window,
2945
accessed ? 10000 : 0);
2946
2947
if (accessed)
2948
r->nr_accesses++;
2949
}
2950
2951
/**
2952
* damon_initialized() - Return if DAMON is ready to be used.
2953
*
2954
* Return: true if DAMON is ready to be used, false otherwise.
2955
*/
2956
bool damon_initialized(void)
2957
{
2958
return damon_region_cache != NULL;
2959
}
2960
2961
static int __init damon_init(void)
2962
{
2963
damon_region_cache = KMEM_CACHE(damon_region, 0);
2964
if (unlikely(!damon_region_cache)) {
2965
pr_err("creating damon_region_cache fails\n");
2966
return -ENOMEM;
2967
}
2968
2969
return 0;
2970
}
2971
2972
subsys_initcall(damon_init);
2973
2974
#include "tests/core-kunit.h"
2975
2976