Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/damon/sysfs.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* DAMON sysfs Interface
4
*
5
* Copyright (c) 2022 SeongJae Park <[email protected]>
6
*/
7
8
#include <linux/pid.h>
9
#include <linux/sched.h>
10
#include <linux/slab.h>
11
12
#include "sysfs-common.h"
13
14
/*
15
* init region directory
16
*/
17
18
struct damon_sysfs_region {
19
struct kobject kobj;
20
struct damon_addr_range ar;
21
};
22
23
static struct damon_sysfs_region *damon_sysfs_region_alloc(void)
24
{
25
return kzalloc(sizeof(struct damon_sysfs_region), GFP_KERNEL);
26
}
27
28
static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
29
char *buf)
30
{
31
struct damon_sysfs_region *region = container_of(kobj,
32
struct damon_sysfs_region, kobj);
33
34
return sysfs_emit(buf, "%lu\n", region->ar.start);
35
}
36
37
static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
38
const char *buf, size_t count)
39
{
40
struct damon_sysfs_region *region = container_of(kobj,
41
struct damon_sysfs_region, kobj);
42
int err = kstrtoul(buf, 0, &region->ar.start);
43
44
return err ? err : count;
45
}
46
47
static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
48
char *buf)
49
{
50
struct damon_sysfs_region *region = container_of(kobj,
51
struct damon_sysfs_region, kobj);
52
53
return sysfs_emit(buf, "%lu\n", region->ar.end);
54
}
55
56
static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
57
const char *buf, size_t count)
58
{
59
struct damon_sysfs_region *region = container_of(kobj,
60
struct damon_sysfs_region, kobj);
61
int err = kstrtoul(buf, 0, &region->ar.end);
62
63
return err ? err : count;
64
}
65
66
static void damon_sysfs_region_release(struct kobject *kobj)
67
{
68
kfree(container_of(kobj, struct damon_sysfs_region, kobj));
69
}
70
71
static struct kobj_attribute damon_sysfs_region_start_attr =
72
__ATTR_RW_MODE(start, 0600);
73
74
static struct kobj_attribute damon_sysfs_region_end_attr =
75
__ATTR_RW_MODE(end, 0600);
76
77
static struct attribute *damon_sysfs_region_attrs[] = {
78
&damon_sysfs_region_start_attr.attr,
79
&damon_sysfs_region_end_attr.attr,
80
NULL,
81
};
82
ATTRIBUTE_GROUPS(damon_sysfs_region);
83
84
static const struct kobj_type damon_sysfs_region_ktype = {
85
.release = damon_sysfs_region_release,
86
.sysfs_ops = &kobj_sysfs_ops,
87
.default_groups = damon_sysfs_region_groups,
88
};
89
90
/*
91
* init_regions directory
92
*/
93
94
struct damon_sysfs_regions {
95
struct kobject kobj;
96
struct damon_sysfs_region **regions_arr;
97
int nr;
98
};
99
100
static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
101
{
102
return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
103
}
104
105
static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
106
{
107
struct damon_sysfs_region **regions_arr = regions->regions_arr;
108
int i;
109
110
for (i = 0; i < regions->nr; i++)
111
kobject_put(&regions_arr[i]->kobj);
112
regions->nr = 0;
113
kfree(regions_arr);
114
regions->regions_arr = NULL;
115
}
116
117
static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
118
int nr_regions)
119
{
120
struct damon_sysfs_region **regions_arr, *region;
121
int err, i;
122
123
damon_sysfs_regions_rm_dirs(regions);
124
if (!nr_regions)
125
return 0;
126
127
regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
128
GFP_KERNEL | __GFP_NOWARN);
129
if (!regions_arr)
130
return -ENOMEM;
131
regions->regions_arr = regions_arr;
132
133
for (i = 0; i < nr_regions; i++) {
134
region = damon_sysfs_region_alloc();
135
if (!region) {
136
damon_sysfs_regions_rm_dirs(regions);
137
return -ENOMEM;
138
}
139
140
err = kobject_init_and_add(&region->kobj,
141
&damon_sysfs_region_ktype, &regions->kobj,
142
"%d", i);
143
if (err) {
144
kobject_put(&region->kobj);
145
damon_sysfs_regions_rm_dirs(regions);
146
return err;
147
}
148
149
regions_arr[i] = region;
150
regions->nr++;
151
}
152
return 0;
153
}
154
155
static ssize_t nr_regions_show(struct kobject *kobj,
156
struct kobj_attribute *attr, char *buf)
157
{
158
struct damon_sysfs_regions *regions = container_of(kobj,
159
struct damon_sysfs_regions, kobj);
160
161
return sysfs_emit(buf, "%d\n", regions->nr);
162
}
163
164
static ssize_t nr_regions_store(struct kobject *kobj,
165
struct kobj_attribute *attr, const char *buf, size_t count)
166
{
167
struct damon_sysfs_regions *regions;
168
int nr, err = kstrtoint(buf, 0, &nr);
169
170
if (err)
171
return err;
172
if (nr < 0)
173
return -EINVAL;
174
175
regions = container_of(kobj, struct damon_sysfs_regions, kobj);
176
177
if (!mutex_trylock(&damon_sysfs_lock))
178
return -EBUSY;
179
err = damon_sysfs_regions_add_dirs(regions, nr);
180
mutex_unlock(&damon_sysfs_lock);
181
if (err)
182
return err;
183
184
return count;
185
}
186
187
static void damon_sysfs_regions_release(struct kobject *kobj)
188
{
189
kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
190
}
191
192
static struct kobj_attribute damon_sysfs_regions_nr_attr =
193
__ATTR_RW_MODE(nr_regions, 0600);
194
195
static struct attribute *damon_sysfs_regions_attrs[] = {
196
&damon_sysfs_regions_nr_attr.attr,
197
NULL,
198
};
199
ATTRIBUTE_GROUPS(damon_sysfs_regions);
200
201
static const struct kobj_type damon_sysfs_regions_ktype = {
202
.release = damon_sysfs_regions_release,
203
.sysfs_ops = &kobj_sysfs_ops,
204
.default_groups = damon_sysfs_regions_groups,
205
};
206
207
/*
208
* target directory
209
*/
210
211
struct damon_sysfs_target {
212
struct kobject kobj;
213
struct damon_sysfs_regions *regions;
214
int pid;
215
};
216
217
static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
218
{
219
return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
220
}
221
222
static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
223
{
224
struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
225
int err;
226
227
if (!regions)
228
return -ENOMEM;
229
230
err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
231
&target->kobj, "regions");
232
if (err)
233
kobject_put(&regions->kobj);
234
else
235
target->regions = regions;
236
return err;
237
}
238
239
static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
240
{
241
damon_sysfs_regions_rm_dirs(target->regions);
242
kobject_put(&target->regions->kobj);
243
}
244
245
static ssize_t pid_target_show(struct kobject *kobj,
246
struct kobj_attribute *attr, char *buf)
247
{
248
struct damon_sysfs_target *target = container_of(kobj,
249
struct damon_sysfs_target, kobj);
250
251
return sysfs_emit(buf, "%d\n", target->pid);
252
}
253
254
static ssize_t pid_target_store(struct kobject *kobj,
255
struct kobj_attribute *attr, const char *buf, size_t count)
256
{
257
struct damon_sysfs_target *target = container_of(kobj,
258
struct damon_sysfs_target, kobj);
259
int err = kstrtoint(buf, 0, &target->pid);
260
261
if (err)
262
return -EINVAL;
263
return count;
264
}
265
266
static void damon_sysfs_target_release(struct kobject *kobj)
267
{
268
kfree(container_of(kobj, struct damon_sysfs_target, kobj));
269
}
270
271
static struct kobj_attribute damon_sysfs_target_pid_attr =
272
__ATTR_RW_MODE(pid_target, 0600);
273
274
static struct attribute *damon_sysfs_target_attrs[] = {
275
&damon_sysfs_target_pid_attr.attr,
276
NULL,
277
};
278
ATTRIBUTE_GROUPS(damon_sysfs_target);
279
280
static const struct kobj_type damon_sysfs_target_ktype = {
281
.release = damon_sysfs_target_release,
282
.sysfs_ops = &kobj_sysfs_ops,
283
.default_groups = damon_sysfs_target_groups,
284
};
285
286
/*
287
* targets directory
288
*/
289
290
struct damon_sysfs_targets {
291
struct kobject kobj;
292
struct damon_sysfs_target **targets_arr;
293
int nr;
294
};
295
296
static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
297
{
298
return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
299
}
300
301
static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
302
{
303
struct damon_sysfs_target **targets_arr = targets->targets_arr;
304
int i;
305
306
for (i = 0; i < targets->nr; i++) {
307
damon_sysfs_target_rm_dirs(targets_arr[i]);
308
kobject_put(&targets_arr[i]->kobj);
309
}
310
targets->nr = 0;
311
kfree(targets_arr);
312
targets->targets_arr = NULL;
313
}
314
315
static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
316
int nr_targets)
317
{
318
struct damon_sysfs_target **targets_arr, *target;
319
int err, i;
320
321
damon_sysfs_targets_rm_dirs(targets);
322
if (!nr_targets)
323
return 0;
324
325
targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
326
GFP_KERNEL | __GFP_NOWARN);
327
if (!targets_arr)
328
return -ENOMEM;
329
targets->targets_arr = targets_arr;
330
331
for (i = 0; i < nr_targets; i++) {
332
target = damon_sysfs_target_alloc();
333
if (!target) {
334
damon_sysfs_targets_rm_dirs(targets);
335
return -ENOMEM;
336
}
337
338
err = kobject_init_and_add(&target->kobj,
339
&damon_sysfs_target_ktype, &targets->kobj,
340
"%d", i);
341
if (err)
342
goto out;
343
344
err = damon_sysfs_target_add_dirs(target);
345
if (err)
346
goto out;
347
348
targets_arr[i] = target;
349
targets->nr++;
350
}
351
return 0;
352
353
out:
354
damon_sysfs_targets_rm_dirs(targets);
355
kobject_put(&target->kobj);
356
return err;
357
}
358
359
static ssize_t nr_targets_show(struct kobject *kobj,
360
struct kobj_attribute *attr, char *buf)
361
{
362
struct damon_sysfs_targets *targets = container_of(kobj,
363
struct damon_sysfs_targets, kobj);
364
365
return sysfs_emit(buf, "%d\n", targets->nr);
366
}
367
368
static ssize_t nr_targets_store(struct kobject *kobj,
369
struct kobj_attribute *attr, const char *buf, size_t count)
370
{
371
struct damon_sysfs_targets *targets;
372
int nr, err = kstrtoint(buf, 0, &nr);
373
374
if (err)
375
return err;
376
if (nr < 0)
377
return -EINVAL;
378
379
targets = container_of(kobj, struct damon_sysfs_targets, kobj);
380
381
if (!mutex_trylock(&damon_sysfs_lock))
382
return -EBUSY;
383
err = damon_sysfs_targets_add_dirs(targets, nr);
384
mutex_unlock(&damon_sysfs_lock);
385
if (err)
386
return err;
387
388
return count;
389
}
390
391
static void damon_sysfs_targets_release(struct kobject *kobj)
392
{
393
kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
394
}
395
396
static struct kobj_attribute damon_sysfs_targets_nr_attr =
397
__ATTR_RW_MODE(nr_targets, 0600);
398
399
static struct attribute *damon_sysfs_targets_attrs[] = {
400
&damon_sysfs_targets_nr_attr.attr,
401
NULL,
402
};
403
ATTRIBUTE_GROUPS(damon_sysfs_targets);
404
405
static const struct kobj_type damon_sysfs_targets_ktype = {
406
.release = damon_sysfs_targets_release,
407
.sysfs_ops = &kobj_sysfs_ops,
408
.default_groups = damon_sysfs_targets_groups,
409
};
410
411
/*
412
* intervals goal directory
413
*/
414
415
struct damon_sysfs_intervals_goal {
416
struct kobject kobj;
417
unsigned long access_bp;
418
unsigned long aggrs;
419
unsigned long min_sample_us;
420
unsigned long max_sample_us;
421
};
422
423
static struct damon_sysfs_intervals_goal *damon_sysfs_intervals_goal_alloc(
424
unsigned long access_bp, unsigned long aggrs,
425
unsigned long min_sample_us, unsigned long max_sample_us)
426
{
427
struct damon_sysfs_intervals_goal *goal = kmalloc(sizeof(*goal),
428
GFP_KERNEL);
429
430
if (!goal)
431
return NULL;
432
433
goal->kobj = (struct kobject){};
434
goal->access_bp = access_bp;
435
goal->aggrs = aggrs;
436
goal->min_sample_us = min_sample_us;
437
goal->max_sample_us = max_sample_us;
438
return goal;
439
}
440
441
static ssize_t access_bp_show(struct kobject *kobj,
442
struct kobj_attribute *attr, char *buf)
443
{
444
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
445
struct damon_sysfs_intervals_goal, kobj);
446
447
return sysfs_emit(buf, "%lu\n", goal->access_bp);
448
}
449
450
static ssize_t access_bp_store(struct kobject *kobj,
451
struct kobj_attribute *attr, const char *buf, size_t count)
452
{
453
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
454
struct damon_sysfs_intervals_goal, kobj);
455
unsigned long nr;
456
int err = kstrtoul(buf, 0, &nr);
457
458
if (err)
459
return err;
460
461
goal->access_bp = nr;
462
return count;
463
}
464
465
static ssize_t aggrs_show(struct kobject *kobj,
466
struct kobj_attribute *attr, char *buf)
467
{
468
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
469
struct damon_sysfs_intervals_goal, kobj);
470
471
return sysfs_emit(buf, "%lu\n", goal->aggrs);
472
}
473
474
static ssize_t aggrs_store(struct kobject *kobj,
475
struct kobj_attribute *attr, const char *buf, size_t count)
476
{
477
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
478
struct damon_sysfs_intervals_goal, kobj);
479
unsigned long nr;
480
int err = kstrtoul(buf, 0, &nr);
481
482
if (err)
483
return err;
484
485
goal->aggrs = nr;
486
return count;
487
}
488
489
static ssize_t min_sample_us_show(struct kobject *kobj,
490
struct kobj_attribute *attr, char *buf)
491
{
492
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
493
struct damon_sysfs_intervals_goal, kobj);
494
495
return sysfs_emit(buf, "%lu\n", goal->min_sample_us);
496
}
497
498
static ssize_t min_sample_us_store(struct kobject *kobj,
499
struct kobj_attribute *attr, const char *buf, size_t count)
500
{
501
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
502
struct damon_sysfs_intervals_goal, kobj);
503
unsigned long nr;
504
int err = kstrtoul(buf, 0, &nr);
505
506
if (err)
507
return err;
508
509
goal->min_sample_us = nr;
510
return count;
511
}
512
513
static ssize_t max_sample_us_show(struct kobject *kobj,
514
struct kobj_attribute *attr, char *buf)
515
{
516
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
517
struct damon_sysfs_intervals_goal, kobj);
518
519
return sysfs_emit(buf, "%lu\n", goal->max_sample_us);
520
}
521
522
static ssize_t max_sample_us_store(struct kobject *kobj,
523
struct kobj_attribute *attr, const char *buf, size_t count)
524
{
525
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
526
struct damon_sysfs_intervals_goal, kobj);
527
unsigned long nr;
528
int err = kstrtoul(buf, 0, &nr);
529
530
if (err)
531
return err;
532
533
goal->max_sample_us = nr;
534
return count;
535
}
536
537
static void damon_sysfs_intervals_goal_release(struct kobject *kobj)
538
{
539
kfree(container_of(kobj, struct damon_sysfs_intervals_goal, kobj));
540
}
541
542
static struct kobj_attribute damon_sysfs_intervals_goal_access_bp_attr =
543
__ATTR_RW_MODE(access_bp, 0600);
544
545
static struct kobj_attribute damon_sysfs_intervals_goal_aggrs_attr =
546
__ATTR_RW_MODE(aggrs, 0600);
547
548
static struct kobj_attribute damon_sysfs_intervals_goal_min_sample_us_attr =
549
__ATTR_RW_MODE(min_sample_us, 0600);
550
551
static struct kobj_attribute damon_sysfs_intervals_goal_max_sample_us_attr =
552
__ATTR_RW_MODE(max_sample_us, 0600);
553
554
static struct attribute *damon_sysfs_intervals_goal_attrs[] = {
555
&damon_sysfs_intervals_goal_access_bp_attr.attr,
556
&damon_sysfs_intervals_goal_aggrs_attr.attr,
557
&damon_sysfs_intervals_goal_min_sample_us_attr.attr,
558
&damon_sysfs_intervals_goal_max_sample_us_attr.attr,
559
NULL,
560
};
561
ATTRIBUTE_GROUPS(damon_sysfs_intervals_goal);
562
563
static const struct kobj_type damon_sysfs_intervals_goal_ktype = {
564
.release = damon_sysfs_intervals_goal_release,
565
.sysfs_ops = &kobj_sysfs_ops,
566
.default_groups = damon_sysfs_intervals_goal_groups,
567
};
568
569
/*
570
* intervals directory
571
*/
572
573
struct damon_sysfs_intervals {
574
struct kobject kobj;
575
unsigned long sample_us;
576
unsigned long aggr_us;
577
unsigned long update_us;
578
struct damon_sysfs_intervals_goal *intervals_goal;
579
};
580
581
static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
582
unsigned long sample_us, unsigned long aggr_us,
583
unsigned long update_us)
584
{
585
struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
586
GFP_KERNEL);
587
588
if (!intervals)
589
return NULL;
590
591
intervals->kobj = (struct kobject){};
592
intervals->sample_us = sample_us;
593
intervals->aggr_us = aggr_us;
594
intervals->update_us = update_us;
595
return intervals;
596
}
597
598
static int damon_sysfs_intervals_add_dirs(struct damon_sysfs_intervals *intervals)
599
{
600
struct damon_sysfs_intervals_goal *goal;
601
int err;
602
603
goal = damon_sysfs_intervals_goal_alloc(0, 0, 0, 0);
604
if (!goal)
605
return -ENOMEM;
606
607
err = kobject_init_and_add(&goal->kobj,
608
&damon_sysfs_intervals_goal_ktype, &intervals->kobj,
609
"intervals_goal");
610
if (err) {
611
kobject_put(&goal->kobj);
612
intervals->intervals_goal = NULL;
613
return err;
614
}
615
intervals->intervals_goal = goal;
616
return 0;
617
}
618
619
static void damon_sysfs_intervals_rm_dirs(struct damon_sysfs_intervals *intervals)
620
{
621
kobject_put(&intervals->intervals_goal->kobj);
622
}
623
624
static ssize_t sample_us_show(struct kobject *kobj,
625
struct kobj_attribute *attr, char *buf)
626
{
627
struct damon_sysfs_intervals *intervals = container_of(kobj,
628
struct damon_sysfs_intervals, kobj);
629
630
return sysfs_emit(buf, "%lu\n", intervals->sample_us);
631
}
632
633
static ssize_t sample_us_store(struct kobject *kobj,
634
struct kobj_attribute *attr, const char *buf, size_t count)
635
{
636
struct damon_sysfs_intervals *intervals = container_of(kobj,
637
struct damon_sysfs_intervals, kobj);
638
unsigned long us;
639
int err = kstrtoul(buf, 0, &us);
640
641
if (err)
642
return err;
643
644
intervals->sample_us = us;
645
return count;
646
}
647
648
static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
649
char *buf)
650
{
651
struct damon_sysfs_intervals *intervals = container_of(kobj,
652
struct damon_sysfs_intervals, kobj);
653
654
return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
655
}
656
657
static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
658
const char *buf, size_t count)
659
{
660
struct damon_sysfs_intervals *intervals = container_of(kobj,
661
struct damon_sysfs_intervals, kobj);
662
unsigned long us;
663
int err = kstrtoul(buf, 0, &us);
664
665
if (err)
666
return err;
667
668
intervals->aggr_us = us;
669
return count;
670
}
671
672
static ssize_t update_us_show(struct kobject *kobj,
673
struct kobj_attribute *attr, char *buf)
674
{
675
struct damon_sysfs_intervals *intervals = container_of(kobj,
676
struct damon_sysfs_intervals, kobj);
677
678
return sysfs_emit(buf, "%lu\n", intervals->update_us);
679
}
680
681
static ssize_t update_us_store(struct kobject *kobj,
682
struct kobj_attribute *attr, const char *buf, size_t count)
683
{
684
struct damon_sysfs_intervals *intervals = container_of(kobj,
685
struct damon_sysfs_intervals, kobj);
686
unsigned long us;
687
int err = kstrtoul(buf, 0, &us);
688
689
if (err)
690
return err;
691
692
intervals->update_us = us;
693
return count;
694
}
695
696
static void damon_sysfs_intervals_release(struct kobject *kobj)
697
{
698
kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
699
}
700
701
static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
702
__ATTR_RW_MODE(sample_us, 0600);
703
704
static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
705
__ATTR_RW_MODE(aggr_us, 0600);
706
707
static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
708
__ATTR_RW_MODE(update_us, 0600);
709
710
static struct attribute *damon_sysfs_intervals_attrs[] = {
711
&damon_sysfs_intervals_sample_us_attr.attr,
712
&damon_sysfs_intervals_aggr_us_attr.attr,
713
&damon_sysfs_intervals_update_us_attr.attr,
714
NULL,
715
};
716
ATTRIBUTE_GROUPS(damon_sysfs_intervals);
717
718
static const struct kobj_type damon_sysfs_intervals_ktype = {
719
.release = damon_sysfs_intervals_release,
720
.sysfs_ops = &kobj_sysfs_ops,
721
.default_groups = damon_sysfs_intervals_groups,
722
};
723
724
/*
725
* monitoring_attrs directory
726
*/
727
728
struct damon_sysfs_attrs {
729
struct kobject kobj;
730
struct damon_sysfs_intervals *intervals;
731
struct damon_sysfs_ul_range *nr_regions_range;
732
};
733
734
static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
735
{
736
struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
737
738
if (!attrs)
739
return NULL;
740
attrs->kobj = (struct kobject){};
741
return attrs;
742
}
743
744
static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
745
{
746
struct damon_sysfs_intervals *intervals;
747
struct damon_sysfs_ul_range *nr_regions_range;
748
int err;
749
750
intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
751
if (!intervals)
752
return -ENOMEM;
753
754
err = kobject_init_and_add(&intervals->kobj,
755
&damon_sysfs_intervals_ktype, &attrs->kobj,
756
"intervals");
757
if (err)
758
goto put_intervals_out;
759
err = damon_sysfs_intervals_add_dirs(intervals);
760
if (err)
761
goto put_intervals_out;
762
attrs->intervals = intervals;
763
764
nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
765
if (!nr_regions_range) {
766
err = -ENOMEM;
767
goto put_intervals_out;
768
}
769
770
err = kobject_init_and_add(&nr_regions_range->kobj,
771
&damon_sysfs_ul_range_ktype, &attrs->kobj,
772
"nr_regions");
773
if (err)
774
goto put_nr_regions_intervals_out;
775
attrs->nr_regions_range = nr_regions_range;
776
return 0;
777
778
put_nr_regions_intervals_out:
779
kobject_put(&nr_regions_range->kobj);
780
attrs->nr_regions_range = NULL;
781
put_intervals_out:
782
kobject_put(&intervals->kobj);
783
attrs->intervals = NULL;
784
return err;
785
}
786
787
static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
788
{
789
kobject_put(&attrs->nr_regions_range->kobj);
790
damon_sysfs_intervals_rm_dirs(attrs->intervals);
791
kobject_put(&attrs->intervals->kobj);
792
}
793
794
static void damon_sysfs_attrs_release(struct kobject *kobj)
795
{
796
kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
797
}
798
799
static struct attribute *damon_sysfs_attrs_attrs[] = {
800
NULL,
801
};
802
ATTRIBUTE_GROUPS(damon_sysfs_attrs);
803
804
static const struct kobj_type damon_sysfs_attrs_ktype = {
805
.release = damon_sysfs_attrs_release,
806
.sysfs_ops = &kobj_sysfs_ops,
807
.default_groups = damon_sysfs_attrs_groups,
808
};
809
810
/*
811
* context directory
812
*/
813
814
struct damon_sysfs_ops_name {
815
enum damon_ops_id ops_id;
816
char *name;
817
};
818
819
static const struct damon_sysfs_ops_name damon_sysfs_ops_names[] = {
820
{
821
.ops_id = DAMON_OPS_VADDR,
822
.name = "vaddr",
823
},
824
{
825
.ops_id = DAMON_OPS_FVADDR,
826
.name = "fvaddr",
827
},
828
{
829
.ops_id = DAMON_OPS_PADDR,
830
.name = "paddr",
831
},
832
};
833
834
struct damon_sysfs_context {
835
struct kobject kobj;
836
enum damon_ops_id ops_id;
837
struct damon_sysfs_attrs *attrs;
838
struct damon_sysfs_targets *targets;
839
struct damon_sysfs_schemes *schemes;
840
};
841
842
static struct damon_sysfs_context *damon_sysfs_context_alloc(
843
enum damon_ops_id ops_id)
844
{
845
struct damon_sysfs_context *context = kmalloc(sizeof(*context),
846
GFP_KERNEL);
847
848
if (!context)
849
return NULL;
850
context->kobj = (struct kobject){};
851
context->ops_id = ops_id;
852
return context;
853
}
854
855
static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
856
{
857
struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
858
int err;
859
860
if (!attrs)
861
return -ENOMEM;
862
err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
863
&context->kobj, "monitoring_attrs");
864
if (err)
865
goto out;
866
err = damon_sysfs_attrs_add_dirs(attrs);
867
if (err)
868
goto out;
869
context->attrs = attrs;
870
return 0;
871
872
out:
873
kobject_put(&attrs->kobj);
874
return err;
875
}
876
877
static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
878
{
879
struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
880
int err;
881
882
if (!targets)
883
return -ENOMEM;
884
err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
885
&context->kobj, "targets");
886
if (err) {
887
kobject_put(&targets->kobj);
888
return err;
889
}
890
context->targets = targets;
891
return 0;
892
}
893
894
static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
895
{
896
struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
897
int err;
898
899
if (!schemes)
900
return -ENOMEM;
901
err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
902
&context->kobj, "schemes");
903
if (err) {
904
kobject_put(&schemes->kobj);
905
return err;
906
}
907
context->schemes = schemes;
908
return 0;
909
}
910
911
static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
912
{
913
int err;
914
915
err = damon_sysfs_context_set_attrs(context);
916
if (err)
917
return err;
918
919
err = damon_sysfs_context_set_targets(context);
920
if (err)
921
goto put_attrs_out;
922
923
err = damon_sysfs_context_set_schemes(context);
924
if (err)
925
goto put_targets_attrs_out;
926
return 0;
927
928
put_targets_attrs_out:
929
kobject_put(&context->targets->kobj);
930
context->targets = NULL;
931
put_attrs_out:
932
kobject_put(&context->attrs->kobj);
933
context->attrs = NULL;
934
return err;
935
}
936
937
static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
938
{
939
damon_sysfs_attrs_rm_dirs(context->attrs);
940
kobject_put(&context->attrs->kobj);
941
damon_sysfs_targets_rm_dirs(context->targets);
942
kobject_put(&context->targets->kobj);
943
damon_sysfs_schemes_rm_dirs(context->schemes);
944
kobject_put(&context->schemes->kobj);
945
}
946
947
static ssize_t avail_operations_show(struct kobject *kobj,
948
struct kobj_attribute *attr, char *buf)
949
{
950
int len = 0;
951
int i;
952
953
for (i = 0; i < ARRAY_SIZE(damon_sysfs_ops_names); i++) {
954
const struct damon_sysfs_ops_name *ops_name;
955
956
ops_name = &damon_sysfs_ops_names[i];
957
if (!damon_is_registered_ops(ops_name->ops_id))
958
continue;
959
len += sysfs_emit_at(buf, len, "%s\n", ops_name->name);
960
}
961
return len;
962
}
963
964
static ssize_t operations_show(struct kobject *kobj,
965
struct kobj_attribute *attr, char *buf)
966
{
967
struct damon_sysfs_context *context = container_of(kobj,
968
struct damon_sysfs_context, kobj);
969
int i;
970
971
for (i = 0; i < ARRAY_SIZE(damon_sysfs_ops_names); i++) {
972
const struct damon_sysfs_ops_name *ops_name;
973
974
ops_name = &damon_sysfs_ops_names[i];
975
if (ops_name->ops_id == context->ops_id)
976
return sysfs_emit(buf, "%s\n", ops_name->name);
977
}
978
return -EINVAL;
979
}
980
981
static ssize_t operations_store(struct kobject *kobj,
982
struct kobj_attribute *attr, const char *buf, size_t count)
983
{
984
struct damon_sysfs_context *context = container_of(kobj,
985
struct damon_sysfs_context, kobj);
986
int i;
987
988
for (i = 0; i < ARRAY_SIZE(damon_sysfs_ops_names); i++) {
989
const struct damon_sysfs_ops_name *ops_name;
990
991
ops_name = &damon_sysfs_ops_names[i];
992
if (sysfs_streq(buf, ops_name->name)) {
993
context->ops_id = ops_name->ops_id;
994
return count;
995
}
996
}
997
return -EINVAL;
998
}
999
1000
static void damon_sysfs_context_release(struct kobject *kobj)
1001
{
1002
kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1003
}
1004
1005
static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1006
__ATTR_RO_MODE(avail_operations, 0400);
1007
1008
static struct kobj_attribute damon_sysfs_context_operations_attr =
1009
__ATTR_RW_MODE(operations, 0600);
1010
1011
static struct attribute *damon_sysfs_context_attrs[] = {
1012
&damon_sysfs_context_avail_operations_attr.attr,
1013
&damon_sysfs_context_operations_attr.attr,
1014
NULL,
1015
};
1016
ATTRIBUTE_GROUPS(damon_sysfs_context);
1017
1018
static const struct kobj_type damon_sysfs_context_ktype = {
1019
.release = damon_sysfs_context_release,
1020
.sysfs_ops = &kobj_sysfs_ops,
1021
.default_groups = damon_sysfs_context_groups,
1022
};
1023
1024
/*
1025
* contexts directory
1026
*/
1027
1028
struct damon_sysfs_contexts {
1029
struct kobject kobj;
1030
struct damon_sysfs_context **contexts_arr;
1031
int nr;
1032
};
1033
1034
static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1035
{
1036
return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1037
}
1038
1039
static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1040
{
1041
struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1042
int i;
1043
1044
for (i = 0; i < contexts->nr; i++) {
1045
damon_sysfs_context_rm_dirs(contexts_arr[i]);
1046
kobject_put(&contexts_arr[i]->kobj);
1047
}
1048
contexts->nr = 0;
1049
kfree(contexts_arr);
1050
contexts->contexts_arr = NULL;
1051
}
1052
1053
static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1054
int nr_contexts)
1055
{
1056
struct damon_sysfs_context **contexts_arr, *context;
1057
int err, i;
1058
1059
damon_sysfs_contexts_rm_dirs(contexts);
1060
if (!nr_contexts)
1061
return 0;
1062
1063
contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1064
GFP_KERNEL | __GFP_NOWARN);
1065
if (!contexts_arr)
1066
return -ENOMEM;
1067
contexts->contexts_arr = contexts_arr;
1068
1069
for (i = 0; i < nr_contexts; i++) {
1070
context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1071
if (!context) {
1072
damon_sysfs_contexts_rm_dirs(contexts);
1073
return -ENOMEM;
1074
}
1075
1076
err = kobject_init_and_add(&context->kobj,
1077
&damon_sysfs_context_ktype, &contexts->kobj,
1078
"%d", i);
1079
if (err)
1080
goto out;
1081
1082
err = damon_sysfs_context_add_dirs(context);
1083
if (err)
1084
goto out;
1085
1086
contexts_arr[i] = context;
1087
contexts->nr++;
1088
}
1089
return 0;
1090
1091
out:
1092
damon_sysfs_contexts_rm_dirs(contexts);
1093
kobject_put(&context->kobj);
1094
return err;
1095
}
1096
1097
static ssize_t nr_contexts_show(struct kobject *kobj,
1098
struct kobj_attribute *attr, char *buf)
1099
{
1100
struct damon_sysfs_contexts *contexts = container_of(kobj,
1101
struct damon_sysfs_contexts, kobj);
1102
1103
return sysfs_emit(buf, "%d\n", contexts->nr);
1104
}
1105
1106
static ssize_t nr_contexts_store(struct kobject *kobj,
1107
struct kobj_attribute *attr, const char *buf, size_t count)
1108
{
1109
struct damon_sysfs_contexts *contexts;
1110
int nr, err;
1111
1112
err = kstrtoint(buf, 0, &nr);
1113
if (err)
1114
return err;
1115
/* TODO: support multiple contexts per kdamond */
1116
if (nr < 0 || 1 < nr)
1117
return -EINVAL;
1118
1119
contexts = container_of(kobj, struct damon_sysfs_contexts, kobj);
1120
if (!mutex_trylock(&damon_sysfs_lock))
1121
return -EBUSY;
1122
err = damon_sysfs_contexts_add_dirs(contexts, nr);
1123
mutex_unlock(&damon_sysfs_lock);
1124
if (err)
1125
return err;
1126
1127
return count;
1128
}
1129
1130
static void damon_sysfs_contexts_release(struct kobject *kobj)
1131
{
1132
kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1133
}
1134
1135
static struct kobj_attribute damon_sysfs_contexts_nr_attr
1136
= __ATTR_RW_MODE(nr_contexts, 0600);
1137
1138
static struct attribute *damon_sysfs_contexts_attrs[] = {
1139
&damon_sysfs_contexts_nr_attr.attr,
1140
NULL,
1141
};
1142
ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1143
1144
static const struct kobj_type damon_sysfs_contexts_ktype = {
1145
.release = damon_sysfs_contexts_release,
1146
.sysfs_ops = &kobj_sysfs_ops,
1147
.default_groups = damon_sysfs_contexts_groups,
1148
};
1149
1150
/*
1151
* kdamond directory
1152
*/
1153
1154
struct damon_sysfs_kdamond {
1155
struct kobject kobj;
1156
struct damon_sysfs_contexts *contexts;
1157
struct damon_ctx *damon_ctx;
1158
unsigned int refresh_ms;
1159
};
1160
1161
static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
1162
{
1163
return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
1164
}
1165
1166
static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
1167
{
1168
struct damon_sysfs_contexts *contexts;
1169
int err;
1170
1171
contexts = damon_sysfs_contexts_alloc();
1172
if (!contexts)
1173
return -ENOMEM;
1174
1175
err = kobject_init_and_add(&contexts->kobj,
1176
&damon_sysfs_contexts_ktype, &kdamond->kobj,
1177
"contexts");
1178
if (err) {
1179
kobject_put(&contexts->kobj);
1180
return err;
1181
}
1182
kdamond->contexts = contexts;
1183
1184
return err;
1185
}
1186
1187
static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
1188
{
1189
damon_sysfs_contexts_rm_dirs(kdamond->contexts);
1190
kobject_put(&kdamond->contexts->kobj);
1191
}
1192
1193
/*
1194
* enum damon_sysfs_cmd - Commands for a specific kdamond.
1195
*/
1196
enum damon_sysfs_cmd {
1197
/* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
1198
DAMON_SYSFS_CMD_ON,
1199
/* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
1200
DAMON_SYSFS_CMD_OFF,
1201
/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
1202
DAMON_SYSFS_CMD_COMMIT,
1203
/*
1204
* @DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: Commit the quota goals
1205
* to DAMON.
1206
*/
1207
DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS,
1208
/*
1209
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
1210
* files.
1211
*/
1212
DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
1213
/*
1214
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: Update
1215
* tried_regions/total_bytes sysfs files for each scheme.
1216
*/
1217
DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES,
1218
/*
1219
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: Update schemes tried
1220
* regions
1221
*/
1222
DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS,
1223
/*
1224
* @DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: Clear schemes tried
1225
* regions
1226
*/
1227
DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS,
1228
/*
1229
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: Update the
1230
* effective size quota of the scheme in bytes.
1231
*/
1232
DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS,
1233
/*
1234
* @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring
1235
* intevals.
1236
*/
1237
DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS,
1238
/*
1239
* @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
1240
*/
1241
NR_DAMON_SYSFS_CMDS,
1242
};
1243
1244
/* Should match with enum damon_sysfs_cmd */
1245
static const char * const damon_sysfs_cmd_strs[] = {
1246
"on",
1247
"off",
1248
"commit",
1249
"commit_schemes_quota_goals",
1250
"update_schemes_stats",
1251
"update_schemes_tried_bytes",
1252
"update_schemes_tried_regions",
1253
"clear_schemes_tried_regions",
1254
"update_schemes_effective_quotas",
1255
"update_tuned_intervals",
1256
};
1257
1258
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
1259
char *buf)
1260
{
1261
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1262
struct damon_sysfs_kdamond, kobj);
1263
struct damon_ctx *ctx = kdamond->damon_ctx;
1264
bool running;
1265
1266
if (!ctx)
1267
running = false;
1268
else
1269
running = damon_is_running(ctx);
1270
1271
return sysfs_emit(buf, "%s\n", running ?
1272
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
1273
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
1274
}
1275
1276
static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
1277
struct damon_sysfs_attrs *sys_attrs)
1278
{
1279
struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
1280
struct damon_sysfs_intervals_goal *sys_goal =
1281
sys_intervals->intervals_goal;
1282
struct damon_sysfs_ul_range *sys_nr_regions =
1283
sys_attrs->nr_regions_range;
1284
struct damon_attrs attrs = {
1285
.sample_interval = sys_intervals->sample_us,
1286
.aggr_interval = sys_intervals->aggr_us,
1287
.intervals_goal = {
1288
.access_bp = sys_goal->access_bp,
1289
.aggrs = sys_goal->aggrs,
1290
.min_sample_us = sys_goal->min_sample_us,
1291
.max_sample_us = sys_goal->max_sample_us},
1292
.ops_update_interval = sys_intervals->update_us,
1293
.min_nr_regions = sys_nr_regions->min,
1294
.max_nr_regions = sys_nr_regions->max,
1295
};
1296
return damon_set_attrs(ctx, &attrs);
1297
}
1298
1299
static int damon_sysfs_set_regions(struct damon_target *t,
1300
struct damon_sysfs_regions *sysfs_regions)
1301
{
1302
struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
1303
sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1304
int i, err = -EINVAL;
1305
1306
if (!ranges)
1307
return -ENOMEM;
1308
for (i = 0; i < sysfs_regions->nr; i++) {
1309
struct damon_sysfs_region *sys_region =
1310
sysfs_regions->regions_arr[i];
1311
1312
if (sys_region->ar.start > sys_region->ar.end)
1313
goto out;
1314
1315
ranges[i].start = sys_region->ar.start;
1316
ranges[i].end = sys_region->ar.end;
1317
if (i == 0)
1318
continue;
1319
if (ranges[i - 1].end > ranges[i].start)
1320
goto out;
1321
}
1322
err = damon_set_regions(t, ranges, sysfs_regions->nr);
1323
out:
1324
kfree(ranges);
1325
return err;
1326
1327
}
1328
1329
static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
1330
struct damon_ctx *ctx)
1331
{
1332
struct damon_target *t = damon_new_target();
1333
1334
if (!t)
1335
return -ENOMEM;
1336
damon_add_target(ctx, t);
1337
if (damon_target_has_pid(ctx)) {
1338
t->pid = find_get_pid(sys_target->pid);
1339
if (!t->pid)
1340
/* caller will destroy targets */
1341
return -EINVAL;
1342
}
1343
return damon_sysfs_set_regions(t, sys_target->regions);
1344
}
1345
1346
static int damon_sysfs_add_targets(struct damon_ctx *ctx,
1347
struct damon_sysfs_targets *sysfs_targets)
1348
{
1349
int i, err;
1350
1351
/* Multiple physical address space monitoring targets makes no sense */
1352
if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
1353
return -EINVAL;
1354
1355
for (i = 0; i < sysfs_targets->nr; i++) {
1356
struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
1357
1358
err = damon_sysfs_add_target(st, ctx);
1359
if (err)
1360
return err;
1361
}
1362
return 0;
1363
}
1364
1365
/*
1366
* damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
1367
* @data: The kobject wrapper that associated to the kdamond thread.
1368
*
1369
* This function reads the schemes stats of specific kdamond and update the
1370
* related values for sysfs files. This function should be called from DAMON
1371
* worker thread,to safely access the DAMON contexts-internal data. Caller
1372
* should also ensure holding ``damon_syfs_lock``, and ->damon_ctx of @data is
1373
* not NULL but a valid pointer, to safely access DAMON sysfs variables.
1374
*/
1375
static int damon_sysfs_upd_schemes_stats(void *data)
1376
{
1377
struct damon_sysfs_kdamond *kdamond = data;
1378
struct damon_ctx *ctx = kdamond->damon_ctx;
1379
1380
damon_sysfs_schemes_update_stats(
1381
kdamond->contexts->contexts_arr[0]->schemes, ctx);
1382
return 0;
1383
}
1384
1385
static inline bool damon_sysfs_kdamond_running(
1386
struct damon_sysfs_kdamond *kdamond)
1387
{
1388
return kdamond->damon_ctx &&
1389
damon_is_running(kdamond->damon_ctx);
1390
}
1391
1392
static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
1393
struct damon_sysfs_context *sys_ctx)
1394
{
1395
int err;
1396
1397
err = damon_select_ops(ctx, sys_ctx->ops_id);
1398
if (err)
1399
return err;
1400
err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
1401
if (err)
1402
return err;
1403
err = damon_sysfs_add_targets(ctx, sys_ctx->targets);
1404
if (err)
1405
return err;
1406
return damon_sysfs_add_schemes(ctx, sys_ctx->schemes);
1407
}
1408
1409
static struct damon_ctx *damon_sysfs_build_ctx(
1410
struct damon_sysfs_context *sys_ctx);
1411
1412
/*
1413
* damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
1414
* @kdamond: The kobject wrapper for the associated kdamond.
1415
*
1416
* Returns error if the sysfs input is wrong.
1417
*/
1418
static int damon_sysfs_commit_input(void *data)
1419
{
1420
struct damon_sysfs_kdamond *kdamond = data;
1421
struct damon_ctx *param_ctx, *test_ctx;
1422
int err;
1423
1424
if (!damon_sysfs_kdamond_running(kdamond))
1425
return -EINVAL;
1426
/* TODO: Support multiple contexts per kdamond */
1427
if (kdamond->contexts->nr != 1)
1428
return -EINVAL;
1429
1430
param_ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
1431
if (IS_ERR(param_ctx))
1432
return PTR_ERR(param_ctx);
1433
test_ctx = damon_new_ctx();
1434
err = damon_commit_ctx(test_ctx, param_ctx);
1435
if (err) {
1436
damon_destroy_ctx(test_ctx);
1437
goto out;
1438
}
1439
err = damon_commit_ctx(kdamond->damon_ctx, param_ctx);
1440
out:
1441
damon_destroy_ctx(param_ctx);
1442
return err;
1443
}
1444
1445
static int damon_sysfs_commit_schemes_quota_goals(void *data)
1446
{
1447
struct damon_sysfs_kdamond *sysfs_kdamond = data;
1448
struct damon_ctx *ctx;
1449
struct damon_sysfs_context *sysfs_ctx;
1450
1451
if (!damon_sysfs_kdamond_running(sysfs_kdamond))
1452
return -EINVAL;
1453
/* TODO: Support multiple contexts per kdamond */
1454
if (sysfs_kdamond->contexts->nr != 1)
1455
return -EINVAL;
1456
1457
ctx = sysfs_kdamond->damon_ctx;
1458
sysfs_ctx = sysfs_kdamond->contexts->contexts_arr[0];
1459
return damos_sysfs_set_quota_scores(sysfs_ctx->schemes, ctx);
1460
}
1461
1462
/*
1463
* damon_sysfs_upd_schemes_effective_quotas() - Update schemes effective quotas
1464
* sysfs files.
1465
* @data: The kobject wrapper that associated to the kdamond thread.
1466
*
1467
* This function reads the schemes' effective quotas of specific kdamond and
1468
* update the related values for sysfs files. This function should be called
1469
* from DAMON callbacks while holding ``damon_syfs_lock``, to safely access the
1470
* DAMON contexts-internal data and DAMON sysfs variables.
1471
*/
1472
static int damon_sysfs_upd_schemes_effective_quotas(void *data)
1473
{
1474
struct damon_sysfs_kdamond *kdamond = data;
1475
struct damon_ctx *ctx = kdamond->damon_ctx;
1476
1477
damos_sysfs_update_effective_quotas(
1478
kdamond->contexts->contexts_arr[0]->schemes, ctx);
1479
return 0;
1480
}
1481
1482
static int damon_sysfs_upd_tuned_intervals(void *data)
1483
{
1484
struct damon_sysfs_kdamond *kdamond = data;
1485
struct damon_ctx *ctx = kdamond->damon_ctx;
1486
1487
kdamond->contexts->contexts_arr[0]->attrs->intervals->sample_us =
1488
ctx->attrs.sample_interval;
1489
kdamond->contexts->contexts_arr[0]->attrs->intervals->aggr_us =
1490
ctx->attrs.aggr_interval;
1491
return 0;
1492
}
1493
1494
static struct damon_ctx *damon_sysfs_build_ctx(
1495
struct damon_sysfs_context *sys_ctx)
1496
{
1497
struct damon_ctx *ctx = damon_new_ctx();
1498
int err;
1499
1500
if (!ctx)
1501
return ERR_PTR(-ENOMEM);
1502
1503
err = damon_sysfs_apply_inputs(ctx, sys_ctx);
1504
if (err) {
1505
damon_destroy_ctx(ctx);
1506
return ERR_PTR(err);
1507
}
1508
1509
return ctx;
1510
}
1511
1512
static int damon_sysfs_repeat_call_fn(void *data)
1513
{
1514
struct damon_sysfs_kdamond *sysfs_kdamond = data;
1515
static unsigned long next_update_jiffies;
1516
1517
if (!sysfs_kdamond->refresh_ms)
1518
return 0;
1519
if (time_before(jiffies, next_update_jiffies))
1520
return 0;
1521
next_update_jiffies = jiffies +
1522
msecs_to_jiffies(sysfs_kdamond->refresh_ms);
1523
1524
if (!mutex_trylock(&damon_sysfs_lock))
1525
return 0;
1526
damon_sysfs_upd_tuned_intervals(sysfs_kdamond);
1527
damon_sysfs_upd_schemes_stats(sysfs_kdamond);
1528
damon_sysfs_upd_schemes_effective_quotas(sysfs_kdamond);
1529
mutex_unlock(&damon_sysfs_lock);
1530
return 0;
1531
}
1532
1533
static struct damon_call_control damon_sysfs_repeat_call_control = {
1534
.fn = damon_sysfs_repeat_call_fn,
1535
.repeat = true,
1536
};
1537
1538
static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
1539
{
1540
struct damon_ctx *ctx;
1541
int err;
1542
1543
if (damon_sysfs_kdamond_running(kdamond))
1544
return -EBUSY;
1545
/* TODO: support multiple contexts per kdamond */
1546
if (kdamond->contexts->nr != 1)
1547
return -EINVAL;
1548
1549
if (kdamond->damon_ctx)
1550
damon_destroy_ctx(kdamond->damon_ctx);
1551
kdamond->damon_ctx = NULL;
1552
1553
ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
1554
if (IS_ERR(ctx))
1555
return PTR_ERR(ctx);
1556
err = damon_start(&ctx, 1, false);
1557
if (err) {
1558
damon_destroy_ctx(ctx);
1559
return err;
1560
}
1561
kdamond->damon_ctx = ctx;
1562
1563
damon_sysfs_repeat_call_control.data = kdamond;
1564
damon_call(ctx, &damon_sysfs_repeat_call_control);
1565
return err;
1566
}
1567
1568
static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
1569
{
1570
if (!kdamond->damon_ctx)
1571
return -EINVAL;
1572
return damon_stop(&kdamond->damon_ctx, 1);
1573
/*
1574
* To allow users show final monitoring results of already turned-off
1575
* DAMON, we free kdamond->damon_ctx in next
1576
* damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
1577
*/
1578
}
1579
1580
static int damon_sysfs_damon_call(int (*fn)(void *data),
1581
struct damon_sysfs_kdamond *kdamond)
1582
{
1583
struct damon_call_control call_control = {};
1584
1585
if (!kdamond->damon_ctx)
1586
return -EINVAL;
1587
call_control.fn = fn;
1588
call_control.data = kdamond;
1589
return damon_call(kdamond->damon_ctx, &call_control);
1590
}
1591
1592
struct damon_sysfs_schemes_walk_data {
1593
struct damon_sysfs_kdamond *sysfs_kdamond;
1594
bool total_bytes_only;
1595
};
1596
1597
/* populate the region directory */
1598
static void damon_sysfs_schemes_tried_regions_upd_one(void *data, struct damon_ctx *ctx,
1599
struct damon_target *t, struct damon_region *r,
1600
struct damos *s, unsigned long sz_filter_passed)
1601
{
1602
struct damon_sysfs_schemes_walk_data *walk_data = data;
1603
struct damon_sysfs_kdamond *sysfs_kdamond = walk_data->sysfs_kdamond;
1604
1605
damos_sysfs_populate_region_dir(
1606
sysfs_kdamond->contexts->contexts_arr[0]->schemes,
1607
ctx, t, r, s, walk_data->total_bytes_only,
1608
sz_filter_passed);
1609
}
1610
1611
static int damon_sysfs_update_schemes_tried_regions(
1612
struct damon_sysfs_kdamond *sysfs_kdamond, bool total_bytes_only)
1613
{
1614
struct damon_sysfs_schemes_walk_data walk_data = {
1615
.sysfs_kdamond = sysfs_kdamond,
1616
.total_bytes_only = total_bytes_only,
1617
};
1618
struct damos_walk_control control = {
1619
.walk_fn = damon_sysfs_schemes_tried_regions_upd_one,
1620
.data = &walk_data,
1621
};
1622
struct damon_ctx *ctx = sysfs_kdamond->damon_ctx;
1623
1624
if (!ctx)
1625
return -EINVAL;
1626
1627
damon_sysfs_schemes_clear_regions(
1628
sysfs_kdamond->contexts->contexts_arr[0]->schemes);
1629
return damos_walk(ctx, &control);
1630
}
1631
1632
/*
1633
* damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
1634
* @cmd: The command to handle.
1635
* @kdamond: The kobject wrapper for the associated kdamond.
1636
*
1637
* This function handles a DAMON sysfs command for a kdamond.
1638
*
1639
* Return: 0 on success, negative error code otherwise.
1640
*/
1641
static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
1642
struct damon_sysfs_kdamond *kdamond)
1643
{
1644
switch (cmd) {
1645
case DAMON_SYSFS_CMD_ON:
1646
return damon_sysfs_turn_damon_on(kdamond);
1647
case DAMON_SYSFS_CMD_OFF:
1648
return damon_sysfs_turn_damon_off(kdamond);
1649
case DAMON_SYSFS_CMD_COMMIT:
1650
return damon_sysfs_damon_call(
1651
damon_sysfs_commit_input, kdamond);
1652
case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS:
1653
return damon_sysfs_damon_call(
1654
damon_sysfs_commit_schemes_quota_goals,
1655
kdamond);
1656
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
1657
return damon_sysfs_damon_call(
1658
damon_sysfs_upd_schemes_stats, kdamond);
1659
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES:
1660
return damon_sysfs_update_schemes_tried_regions(kdamond, true);
1661
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS:
1662
return damon_sysfs_update_schemes_tried_regions(kdamond, false);
1663
case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS:
1664
return damon_sysfs_schemes_clear_regions(
1665
kdamond->contexts->contexts_arr[0]->schemes);
1666
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS:
1667
return damon_sysfs_damon_call(
1668
damon_sysfs_upd_schemes_effective_quotas,
1669
kdamond);
1670
case DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS:
1671
return damon_sysfs_damon_call(
1672
damon_sysfs_upd_tuned_intervals, kdamond);
1673
default:
1674
return -EINVAL;
1675
}
1676
}
1677
1678
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
1679
const char *buf, size_t count)
1680
{
1681
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1682
struct damon_sysfs_kdamond, kobj);
1683
enum damon_sysfs_cmd cmd;
1684
ssize_t ret = -EINVAL;
1685
1686
if (!mutex_trylock(&damon_sysfs_lock))
1687
return -EBUSY;
1688
for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
1689
if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
1690
ret = damon_sysfs_handle_cmd(cmd, kdamond);
1691
break;
1692
}
1693
}
1694
mutex_unlock(&damon_sysfs_lock);
1695
if (!ret)
1696
ret = count;
1697
return ret;
1698
}
1699
1700
static ssize_t pid_show(struct kobject *kobj,
1701
struct kobj_attribute *attr, char *buf)
1702
{
1703
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1704
struct damon_sysfs_kdamond, kobj);
1705
struct damon_ctx *ctx;
1706
int pid = -1;
1707
1708
if (!mutex_trylock(&damon_sysfs_lock))
1709
return -EBUSY;
1710
ctx = kdamond->damon_ctx;
1711
if (!ctx)
1712
goto out;
1713
1714
mutex_lock(&ctx->kdamond_lock);
1715
if (ctx->kdamond)
1716
pid = ctx->kdamond->pid;
1717
mutex_unlock(&ctx->kdamond_lock);
1718
out:
1719
mutex_unlock(&damon_sysfs_lock);
1720
return sysfs_emit(buf, "%d\n", pid);
1721
}
1722
1723
static ssize_t refresh_ms_show(struct kobject *kobj,
1724
struct kobj_attribute *attr, char *buf)
1725
{
1726
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1727
struct damon_sysfs_kdamond, kobj);
1728
1729
return sysfs_emit(buf, "%u\n", kdamond->refresh_ms);
1730
}
1731
1732
static ssize_t refresh_ms_store(struct kobject *kobj,
1733
struct kobj_attribute *attr, const char *buf, size_t count)
1734
{
1735
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1736
struct damon_sysfs_kdamond, kobj);
1737
unsigned int nr;
1738
int err = kstrtouint(buf, 0, &nr);
1739
1740
if (err)
1741
return err;
1742
1743
kdamond->refresh_ms = nr;
1744
return count;
1745
}
1746
1747
static void damon_sysfs_kdamond_release(struct kobject *kobj)
1748
{
1749
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1750
struct damon_sysfs_kdamond, kobj);
1751
1752
if (kdamond->damon_ctx)
1753
damon_destroy_ctx(kdamond->damon_ctx);
1754
kfree(kdamond);
1755
}
1756
1757
static struct kobj_attribute damon_sysfs_kdamond_state_attr =
1758
__ATTR_RW_MODE(state, 0600);
1759
1760
static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
1761
__ATTR_RO_MODE(pid, 0400);
1762
1763
static struct kobj_attribute damon_sysfs_kdamond_refresh_ms_attr =
1764
__ATTR_RW_MODE(refresh_ms, 0600);
1765
1766
static struct attribute *damon_sysfs_kdamond_attrs[] = {
1767
&damon_sysfs_kdamond_state_attr.attr,
1768
&damon_sysfs_kdamond_pid_attr.attr,
1769
&damon_sysfs_kdamond_refresh_ms_attr.attr,
1770
NULL,
1771
};
1772
ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
1773
1774
static const struct kobj_type damon_sysfs_kdamond_ktype = {
1775
.release = damon_sysfs_kdamond_release,
1776
.sysfs_ops = &kobj_sysfs_ops,
1777
.default_groups = damon_sysfs_kdamond_groups,
1778
};
1779
1780
/*
1781
* kdamonds directory
1782
*/
1783
1784
struct damon_sysfs_kdamonds {
1785
struct kobject kobj;
1786
struct damon_sysfs_kdamond **kdamonds_arr;
1787
int nr;
1788
};
1789
1790
static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
1791
{
1792
return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
1793
}
1794
1795
static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
1796
{
1797
struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
1798
int i;
1799
1800
for (i = 0; i < kdamonds->nr; i++) {
1801
damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
1802
kobject_put(&kdamonds_arr[i]->kobj);
1803
}
1804
kdamonds->nr = 0;
1805
kfree(kdamonds_arr);
1806
kdamonds->kdamonds_arr = NULL;
1807
}
1808
1809
static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds,
1810
int nr_kdamonds)
1811
{
1812
int i;
1813
1814
for (i = 0; i < nr_kdamonds; i++) {
1815
if (damon_sysfs_kdamond_running(kdamonds[i]))
1816
return true;
1817
}
1818
1819
return false;
1820
}
1821
1822
static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
1823
int nr_kdamonds)
1824
{
1825
struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
1826
int err, i;
1827
1828
if (damon_sysfs_kdamonds_busy(kdamonds->kdamonds_arr, kdamonds->nr))
1829
return -EBUSY;
1830
1831
damon_sysfs_kdamonds_rm_dirs(kdamonds);
1832
if (!nr_kdamonds)
1833
return 0;
1834
1835
kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
1836
GFP_KERNEL | __GFP_NOWARN);
1837
if (!kdamonds_arr)
1838
return -ENOMEM;
1839
kdamonds->kdamonds_arr = kdamonds_arr;
1840
1841
for (i = 0; i < nr_kdamonds; i++) {
1842
kdamond = damon_sysfs_kdamond_alloc();
1843
if (!kdamond) {
1844
damon_sysfs_kdamonds_rm_dirs(kdamonds);
1845
return -ENOMEM;
1846
}
1847
1848
err = kobject_init_and_add(&kdamond->kobj,
1849
&damon_sysfs_kdamond_ktype, &kdamonds->kobj,
1850
"%d", i);
1851
if (err)
1852
goto out;
1853
1854
err = damon_sysfs_kdamond_add_dirs(kdamond);
1855
if (err)
1856
goto out;
1857
1858
kdamonds_arr[i] = kdamond;
1859
kdamonds->nr++;
1860
}
1861
return 0;
1862
1863
out:
1864
damon_sysfs_kdamonds_rm_dirs(kdamonds);
1865
kobject_put(&kdamond->kobj);
1866
return err;
1867
}
1868
1869
static ssize_t nr_kdamonds_show(struct kobject *kobj,
1870
struct kobj_attribute *attr, char *buf)
1871
{
1872
struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
1873
struct damon_sysfs_kdamonds, kobj);
1874
1875
return sysfs_emit(buf, "%d\n", kdamonds->nr);
1876
}
1877
1878
static ssize_t nr_kdamonds_store(struct kobject *kobj,
1879
struct kobj_attribute *attr, const char *buf, size_t count)
1880
{
1881
struct damon_sysfs_kdamonds *kdamonds;
1882
int nr, err;
1883
1884
err = kstrtoint(buf, 0, &nr);
1885
if (err)
1886
return err;
1887
if (nr < 0)
1888
return -EINVAL;
1889
1890
kdamonds = container_of(kobj, struct damon_sysfs_kdamonds, kobj);
1891
1892
if (!mutex_trylock(&damon_sysfs_lock))
1893
return -EBUSY;
1894
err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
1895
mutex_unlock(&damon_sysfs_lock);
1896
if (err)
1897
return err;
1898
1899
return count;
1900
}
1901
1902
static void damon_sysfs_kdamonds_release(struct kobject *kobj)
1903
{
1904
kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
1905
}
1906
1907
static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
1908
__ATTR_RW_MODE(nr_kdamonds, 0600);
1909
1910
static struct attribute *damon_sysfs_kdamonds_attrs[] = {
1911
&damon_sysfs_kdamonds_nr_attr.attr,
1912
NULL,
1913
};
1914
ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
1915
1916
static const struct kobj_type damon_sysfs_kdamonds_ktype = {
1917
.release = damon_sysfs_kdamonds_release,
1918
.sysfs_ops = &kobj_sysfs_ops,
1919
.default_groups = damon_sysfs_kdamonds_groups,
1920
};
1921
1922
/*
1923
* damon user interface directory
1924
*/
1925
1926
struct damon_sysfs_ui_dir {
1927
struct kobject kobj;
1928
struct damon_sysfs_kdamonds *kdamonds;
1929
};
1930
1931
static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
1932
{
1933
return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
1934
}
1935
1936
static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
1937
{
1938
struct damon_sysfs_kdamonds *kdamonds;
1939
int err;
1940
1941
kdamonds = damon_sysfs_kdamonds_alloc();
1942
if (!kdamonds)
1943
return -ENOMEM;
1944
1945
err = kobject_init_and_add(&kdamonds->kobj,
1946
&damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
1947
"kdamonds");
1948
if (err) {
1949
kobject_put(&kdamonds->kobj);
1950
return err;
1951
}
1952
ui_dir->kdamonds = kdamonds;
1953
return err;
1954
}
1955
1956
static void damon_sysfs_ui_dir_release(struct kobject *kobj)
1957
{
1958
kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
1959
}
1960
1961
static struct attribute *damon_sysfs_ui_dir_attrs[] = {
1962
NULL,
1963
};
1964
ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
1965
1966
static const struct kobj_type damon_sysfs_ui_dir_ktype = {
1967
.release = damon_sysfs_ui_dir_release,
1968
.sysfs_ops = &kobj_sysfs_ops,
1969
.default_groups = damon_sysfs_ui_dir_groups,
1970
};
1971
1972
static int __init damon_sysfs_init(void)
1973
{
1974
struct kobject *damon_sysfs_root;
1975
struct damon_sysfs_ui_dir *admin;
1976
int err;
1977
1978
damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
1979
if (!damon_sysfs_root)
1980
return -ENOMEM;
1981
1982
admin = damon_sysfs_ui_dir_alloc();
1983
if (!admin) {
1984
kobject_put(damon_sysfs_root);
1985
return -ENOMEM;
1986
}
1987
err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
1988
damon_sysfs_root, "admin");
1989
if (err)
1990
goto out;
1991
err = damon_sysfs_ui_dir_add_dirs(admin);
1992
if (err)
1993
goto out;
1994
return 0;
1995
1996
out:
1997
kobject_put(&admin->kobj);
1998
kobject_put(damon_sysfs_root);
1999
return err;
2000
}
2001
subsys_initcall(damon_sysfs_init);
2002
2003
#include "tests/sysfs-kunit.h"
2004
2005