Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/damon/sysfs.c
49611 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* DAMON sysfs Interface
4
*
5
* Copyright (c) 2022 SeongJae Park <[email protected]>
6
*/
7
8
#include <linux/pid.h>
9
#include <linux/sched.h>
10
#include <linux/slab.h>
11
12
#include "sysfs-common.h"
13
14
/*
15
* init region directory
16
*/
17
18
struct damon_sysfs_region {
19
struct kobject kobj;
20
struct damon_addr_range ar;
21
};
22
23
static struct damon_sysfs_region *damon_sysfs_region_alloc(void)
24
{
25
return kzalloc(sizeof(struct damon_sysfs_region), GFP_KERNEL);
26
}
27
28
static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
29
char *buf)
30
{
31
struct damon_sysfs_region *region = container_of(kobj,
32
struct damon_sysfs_region, kobj);
33
34
return sysfs_emit(buf, "%lu\n", region->ar.start);
35
}
36
37
static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
38
const char *buf, size_t count)
39
{
40
struct damon_sysfs_region *region = container_of(kobj,
41
struct damon_sysfs_region, kobj);
42
int err = kstrtoul(buf, 0, &region->ar.start);
43
44
return err ? err : count;
45
}
46
47
static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
48
char *buf)
49
{
50
struct damon_sysfs_region *region = container_of(kobj,
51
struct damon_sysfs_region, kobj);
52
53
return sysfs_emit(buf, "%lu\n", region->ar.end);
54
}
55
56
static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
57
const char *buf, size_t count)
58
{
59
struct damon_sysfs_region *region = container_of(kobj,
60
struct damon_sysfs_region, kobj);
61
int err = kstrtoul(buf, 0, &region->ar.end);
62
63
return err ? err : count;
64
}
65
66
static void damon_sysfs_region_release(struct kobject *kobj)
67
{
68
kfree(container_of(kobj, struct damon_sysfs_region, kobj));
69
}
70
71
static struct kobj_attribute damon_sysfs_region_start_attr =
72
__ATTR_RW_MODE(start, 0600);
73
74
static struct kobj_attribute damon_sysfs_region_end_attr =
75
__ATTR_RW_MODE(end, 0600);
76
77
static struct attribute *damon_sysfs_region_attrs[] = {
78
&damon_sysfs_region_start_attr.attr,
79
&damon_sysfs_region_end_attr.attr,
80
NULL,
81
};
82
ATTRIBUTE_GROUPS(damon_sysfs_region);
83
84
static const struct kobj_type damon_sysfs_region_ktype = {
85
.release = damon_sysfs_region_release,
86
.sysfs_ops = &kobj_sysfs_ops,
87
.default_groups = damon_sysfs_region_groups,
88
};
89
90
/*
91
* init_regions directory
92
*/
93
94
struct damon_sysfs_regions {
95
struct kobject kobj;
96
struct damon_sysfs_region **regions_arr;
97
int nr;
98
};
99
100
static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
101
{
102
return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
103
}
104
105
static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
106
{
107
struct damon_sysfs_region **regions_arr = regions->regions_arr;
108
int i;
109
110
for (i = 0; i < regions->nr; i++)
111
kobject_put(&regions_arr[i]->kobj);
112
regions->nr = 0;
113
kfree(regions_arr);
114
regions->regions_arr = NULL;
115
}
116
117
static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
118
int nr_regions)
119
{
120
struct damon_sysfs_region **regions_arr, *region;
121
int err, i;
122
123
damon_sysfs_regions_rm_dirs(regions);
124
if (!nr_regions)
125
return 0;
126
127
regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
128
GFP_KERNEL | __GFP_NOWARN);
129
if (!regions_arr)
130
return -ENOMEM;
131
regions->regions_arr = regions_arr;
132
133
for (i = 0; i < nr_regions; i++) {
134
region = damon_sysfs_region_alloc();
135
if (!region) {
136
damon_sysfs_regions_rm_dirs(regions);
137
return -ENOMEM;
138
}
139
140
err = kobject_init_and_add(&region->kobj,
141
&damon_sysfs_region_ktype, &regions->kobj,
142
"%d", i);
143
if (err) {
144
kobject_put(&region->kobj);
145
damon_sysfs_regions_rm_dirs(regions);
146
return err;
147
}
148
149
regions_arr[i] = region;
150
regions->nr++;
151
}
152
return 0;
153
}
154
155
static ssize_t nr_regions_show(struct kobject *kobj,
156
struct kobj_attribute *attr, char *buf)
157
{
158
struct damon_sysfs_regions *regions = container_of(kobj,
159
struct damon_sysfs_regions, kobj);
160
161
return sysfs_emit(buf, "%d\n", regions->nr);
162
}
163
164
static ssize_t nr_regions_store(struct kobject *kobj,
165
struct kobj_attribute *attr, const char *buf, size_t count)
166
{
167
struct damon_sysfs_regions *regions;
168
int nr, err = kstrtoint(buf, 0, &nr);
169
170
if (err)
171
return err;
172
if (nr < 0)
173
return -EINVAL;
174
175
regions = container_of(kobj, struct damon_sysfs_regions, kobj);
176
177
if (!mutex_trylock(&damon_sysfs_lock))
178
return -EBUSY;
179
err = damon_sysfs_regions_add_dirs(regions, nr);
180
mutex_unlock(&damon_sysfs_lock);
181
if (err)
182
return err;
183
184
return count;
185
}
186
187
static void damon_sysfs_regions_release(struct kobject *kobj)
188
{
189
kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
190
}
191
192
static struct kobj_attribute damon_sysfs_regions_nr_attr =
193
__ATTR_RW_MODE(nr_regions, 0600);
194
195
static struct attribute *damon_sysfs_regions_attrs[] = {
196
&damon_sysfs_regions_nr_attr.attr,
197
NULL,
198
};
199
ATTRIBUTE_GROUPS(damon_sysfs_regions);
200
201
static const struct kobj_type damon_sysfs_regions_ktype = {
202
.release = damon_sysfs_regions_release,
203
.sysfs_ops = &kobj_sysfs_ops,
204
.default_groups = damon_sysfs_regions_groups,
205
};
206
207
/*
208
* target directory
209
*/
210
211
struct damon_sysfs_target {
212
struct kobject kobj;
213
struct damon_sysfs_regions *regions;
214
int pid;
215
bool obsolete;
216
};
217
218
static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
219
{
220
return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
221
}
222
223
static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
224
{
225
struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
226
int err;
227
228
if (!regions)
229
return -ENOMEM;
230
231
err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
232
&target->kobj, "regions");
233
if (err)
234
kobject_put(&regions->kobj);
235
else
236
target->regions = regions;
237
return err;
238
}
239
240
static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
241
{
242
damon_sysfs_regions_rm_dirs(target->regions);
243
kobject_put(&target->regions->kobj);
244
}
245
246
static ssize_t pid_target_show(struct kobject *kobj,
247
struct kobj_attribute *attr, char *buf)
248
{
249
struct damon_sysfs_target *target = container_of(kobj,
250
struct damon_sysfs_target, kobj);
251
252
return sysfs_emit(buf, "%d\n", target->pid);
253
}
254
255
static ssize_t pid_target_store(struct kobject *kobj,
256
struct kobj_attribute *attr, const char *buf, size_t count)
257
{
258
struct damon_sysfs_target *target = container_of(kobj,
259
struct damon_sysfs_target, kobj);
260
int err = kstrtoint(buf, 0, &target->pid);
261
262
if (err)
263
return -EINVAL;
264
return count;
265
}
266
267
static ssize_t obsolete_target_show(struct kobject *kobj,
268
struct kobj_attribute *attr, char *buf)
269
{
270
struct damon_sysfs_target *target = container_of(kobj,
271
struct damon_sysfs_target, kobj);
272
273
return sysfs_emit(buf, "%c\n", target->obsolete ? 'Y' : 'N');
274
}
275
276
static ssize_t obsolete_target_store(struct kobject *kobj,
277
struct kobj_attribute *attr, const char *buf, size_t count)
278
{
279
struct damon_sysfs_target *target = container_of(kobj,
280
struct damon_sysfs_target, kobj);
281
bool obsolete;
282
int err = kstrtobool(buf, &obsolete);
283
284
if (err)
285
return err;
286
target->obsolete = obsolete;
287
return count;
288
}
289
290
static void damon_sysfs_target_release(struct kobject *kobj)
291
{
292
kfree(container_of(kobj, struct damon_sysfs_target, kobj));
293
}
294
295
static struct kobj_attribute damon_sysfs_target_pid_attr =
296
__ATTR_RW_MODE(pid_target, 0600);
297
298
static struct kobj_attribute damon_sysfs_target_obsolete_attr =
299
__ATTR_RW_MODE(obsolete_target, 0600);
300
301
static struct attribute *damon_sysfs_target_attrs[] = {
302
&damon_sysfs_target_pid_attr.attr,
303
&damon_sysfs_target_obsolete_attr.attr,
304
NULL,
305
};
306
ATTRIBUTE_GROUPS(damon_sysfs_target);
307
308
static const struct kobj_type damon_sysfs_target_ktype = {
309
.release = damon_sysfs_target_release,
310
.sysfs_ops = &kobj_sysfs_ops,
311
.default_groups = damon_sysfs_target_groups,
312
};
313
314
/*
315
* targets directory
316
*/
317
318
struct damon_sysfs_targets {
319
struct kobject kobj;
320
struct damon_sysfs_target **targets_arr;
321
int nr;
322
};
323
324
static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
325
{
326
return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
327
}
328
329
static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
330
{
331
struct damon_sysfs_target **targets_arr = targets->targets_arr;
332
int i;
333
334
for (i = 0; i < targets->nr; i++) {
335
damon_sysfs_target_rm_dirs(targets_arr[i]);
336
kobject_put(&targets_arr[i]->kobj);
337
}
338
targets->nr = 0;
339
kfree(targets_arr);
340
targets->targets_arr = NULL;
341
}
342
343
static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
344
int nr_targets)
345
{
346
struct damon_sysfs_target **targets_arr, *target;
347
int err, i;
348
349
damon_sysfs_targets_rm_dirs(targets);
350
if (!nr_targets)
351
return 0;
352
353
targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
354
GFP_KERNEL | __GFP_NOWARN);
355
if (!targets_arr)
356
return -ENOMEM;
357
targets->targets_arr = targets_arr;
358
359
for (i = 0; i < nr_targets; i++) {
360
target = damon_sysfs_target_alloc();
361
if (!target) {
362
damon_sysfs_targets_rm_dirs(targets);
363
return -ENOMEM;
364
}
365
366
err = kobject_init_and_add(&target->kobj,
367
&damon_sysfs_target_ktype, &targets->kobj,
368
"%d", i);
369
if (err)
370
goto out;
371
372
err = damon_sysfs_target_add_dirs(target);
373
if (err)
374
goto out;
375
376
targets_arr[i] = target;
377
targets->nr++;
378
}
379
return 0;
380
381
out:
382
damon_sysfs_targets_rm_dirs(targets);
383
kobject_put(&target->kobj);
384
return err;
385
}
386
387
static ssize_t nr_targets_show(struct kobject *kobj,
388
struct kobj_attribute *attr, char *buf)
389
{
390
struct damon_sysfs_targets *targets = container_of(kobj,
391
struct damon_sysfs_targets, kobj);
392
393
return sysfs_emit(buf, "%d\n", targets->nr);
394
}
395
396
static ssize_t nr_targets_store(struct kobject *kobj,
397
struct kobj_attribute *attr, const char *buf, size_t count)
398
{
399
struct damon_sysfs_targets *targets;
400
int nr, err = kstrtoint(buf, 0, &nr);
401
402
if (err)
403
return err;
404
if (nr < 0)
405
return -EINVAL;
406
407
targets = container_of(kobj, struct damon_sysfs_targets, kobj);
408
409
if (!mutex_trylock(&damon_sysfs_lock))
410
return -EBUSY;
411
err = damon_sysfs_targets_add_dirs(targets, nr);
412
mutex_unlock(&damon_sysfs_lock);
413
if (err)
414
return err;
415
416
return count;
417
}
418
419
static void damon_sysfs_targets_release(struct kobject *kobj)
420
{
421
kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
422
}
423
424
static struct kobj_attribute damon_sysfs_targets_nr_attr =
425
__ATTR_RW_MODE(nr_targets, 0600);
426
427
static struct attribute *damon_sysfs_targets_attrs[] = {
428
&damon_sysfs_targets_nr_attr.attr,
429
NULL,
430
};
431
ATTRIBUTE_GROUPS(damon_sysfs_targets);
432
433
static const struct kobj_type damon_sysfs_targets_ktype = {
434
.release = damon_sysfs_targets_release,
435
.sysfs_ops = &kobj_sysfs_ops,
436
.default_groups = damon_sysfs_targets_groups,
437
};
438
439
/*
440
* intervals goal directory
441
*/
442
443
struct damon_sysfs_intervals_goal {
444
struct kobject kobj;
445
unsigned long access_bp;
446
unsigned long aggrs;
447
unsigned long min_sample_us;
448
unsigned long max_sample_us;
449
};
450
451
static struct damon_sysfs_intervals_goal *damon_sysfs_intervals_goal_alloc(
452
unsigned long access_bp, unsigned long aggrs,
453
unsigned long min_sample_us, unsigned long max_sample_us)
454
{
455
struct damon_sysfs_intervals_goal *goal = kmalloc(sizeof(*goal),
456
GFP_KERNEL);
457
458
if (!goal)
459
return NULL;
460
461
goal->kobj = (struct kobject){};
462
goal->access_bp = access_bp;
463
goal->aggrs = aggrs;
464
goal->min_sample_us = min_sample_us;
465
goal->max_sample_us = max_sample_us;
466
return goal;
467
}
468
469
static ssize_t access_bp_show(struct kobject *kobj,
470
struct kobj_attribute *attr, char *buf)
471
{
472
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
473
struct damon_sysfs_intervals_goal, kobj);
474
475
return sysfs_emit(buf, "%lu\n", goal->access_bp);
476
}
477
478
static ssize_t access_bp_store(struct kobject *kobj,
479
struct kobj_attribute *attr, const char *buf, size_t count)
480
{
481
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
482
struct damon_sysfs_intervals_goal, kobj);
483
unsigned long nr;
484
int err = kstrtoul(buf, 0, &nr);
485
486
if (err)
487
return err;
488
489
goal->access_bp = nr;
490
return count;
491
}
492
493
static ssize_t aggrs_show(struct kobject *kobj,
494
struct kobj_attribute *attr, char *buf)
495
{
496
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
497
struct damon_sysfs_intervals_goal, kobj);
498
499
return sysfs_emit(buf, "%lu\n", goal->aggrs);
500
}
501
502
static ssize_t aggrs_store(struct kobject *kobj,
503
struct kobj_attribute *attr, const char *buf, size_t count)
504
{
505
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
506
struct damon_sysfs_intervals_goal, kobj);
507
unsigned long nr;
508
int err = kstrtoul(buf, 0, &nr);
509
510
if (err)
511
return err;
512
513
goal->aggrs = nr;
514
return count;
515
}
516
517
static ssize_t min_sample_us_show(struct kobject *kobj,
518
struct kobj_attribute *attr, char *buf)
519
{
520
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
521
struct damon_sysfs_intervals_goal, kobj);
522
523
return sysfs_emit(buf, "%lu\n", goal->min_sample_us);
524
}
525
526
static ssize_t min_sample_us_store(struct kobject *kobj,
527
struct kobj_attribute *attr, const char *buf, size_t count)
528
{
529
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
530
struct damon_sysfs_intervals_goal, kobj);
531
unsigned long nr;
532
int err = kstrtoul(buf, 0, &nr);
533
534
if (err)
535
return err;
536
537
goal->min_sample_us = nr;
538
return count;
539
}
540
541
static ssize_t max_sample_us_show(struct kobject *kobj,
542
struct kobj_attribute *attr, char *buf)
543
{
544
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
545
struct damon_sysfs_intervals_goal, kobj);
546
547
return sysfs_emit(buf, "%lu\n", goal->max_sample_us);
548
}
549
550
static ssize_t max_sample_us_store(struct kobject *kobj,
551
struct kobj_attribute *attr, const char *buf, size_t count)
552
{
553
struct damon_sysfs_intervals_goal *goal = container_of(kobj,
554
struct damon_sysfs_intervals_goal, kobj);
555
unsigned long nr;
556
int err = kstrtoul(buf, 0, &nr);
557
558
if (err)
559
return err;
560
561
goal->max_sample_us = nr;
562
return count;
563
}
564
565
static void damon_sysfs_intervals_goal_release(struct kobject *kobj)
566
{
567
kfree(container_of(kobj, struct damon_sysfs_intervals_goal, kobj));
568
}
569
570
static struct kobj_attribute damon_sysfs_intervals_goal_access_bp_attr =
571
__ATTR_RW_MODE(access_bp, 0600);
572
573
static struct kobj_attribute damon_sysfs_intervals_goal_aggrs_attr =
574
__ATTR_RW_MODE(aggrs, 0600);
575
576
static struct kobj_attribute damon_sysfs_intervals_goal_min_sample_us_attr =
577
__ATTR_RW_MODE(min_sample_us, 0600);
578
579
static struct kobj_attribute damon_sysfs_intervals_goal_max_sample_us_attr =
580
__ATTR_RW_MODE(max_sample_us, 0600);
581
582
static struct attribute *damon_sysfs_intervals_goal_attrs[] = {
583
&damon_sysfs_intervals_goal_access_bp_attr.attr,
584
&damon_sysfs_intervals_goal_aggrs_attr.attr,
585
&damon_sysfs_intervals_goal_min_sample_us_attr.attr,
586
&damon_sysfs_intervals_goal_max_sample_us_attr.attr,
587
NULL,
588
};
589
ATTRIBUTE_GROUPS(damon_sysfs_intervals_goal);
590
591
static const struct kobj_type damon_sysfs_intervals_goal_ktype = {
592
.release = damon_sysfs_intervals_goal_release,
593
.sysfs_ops = &kobj_sysfs_ops,
594
.default_groups = damon_sysfs_intervals_goal_groups,
595
};
596
597
/*
598
* intervals directory
599
*/
600
601
struct damon_sysfs_intervals {
602
struct kobject kobj;
603
unsigned long sample_us;
604
unsigned long aggr_us;
605
unsigned long update_us;
606
struct damon_sysfs_intervals_goal *intervals_goal;
607
};
608
609
static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
610
unsigned long sample_us, unsigned long aggr_us,
611
unsigned long update_us)
612
{
613
struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
614
GFP_KERNEL);
615
616
if (!intervals)
617
return NULL;
618
619
intervals->kobj = (struct kobject){};
620
intervals->sample_us = sample_us;
621
intervals->aggr_us = aggr_us;
622
intervals->update_us = update_us;
623
return intervals;
624
}
625
626
static int damon_sysfs_intervals_add_dirs(struct damon_sysfs_intervals *intervals)
627
{
628
struct damon_sysfs_intervals_goal *goal;
629
int err;
630
631
goal = damon_sysfs_intervals_goal_alloc(0, 0, 0, 0);
632
if (!goal)
633
return -ENOMEM;
634
635
err = kobject_init_and_add(&goal->kobj,
636
&damon_sysfs_intervals_goal_ktype, &intervals->kobj,
637
"intervals_goal");
638
if (err) {
639
kobject_put(&goal->kobj);
640
intervals->intervals_goal = NULL;
641
return err;
642
}
643
intervals->intervals_goal = goal;
644
return 0;
645
}
646
647
static void damon_sysfs_intervals_rm_dirs(struct damon_sysfs_intervals *intervals)
648
{
649
kobject_put(&intervals->intervals_goal->kobj);
650
}
651
652
static ssize_t sample_us_show(struct kobject *kobj,
653
struct kobj_attribute *attr, char *buf)
654
{
655
struct damon_sysfs_intervals *intervals = container_of(kobj,
656
struct damon_sysfs_intervals, kobj);
657
658
return sysfs_emit(buf, "%lu\n", intervals->sample_us);
659
}
660
661
static ssize_t sample_us_store(struct kobject *kobj,
662
struct kobj_attribute *attr, const char *buf, size_t count)
663
{
664
struct damon_sysfs_intervals *intervals = container_of(kobj,
665
struct damon_sysfs_intervals, kobj);
666
unsigned long us;
667
int err = kstrtoul(buf, 0, &us);
668
669
if (err)
670
return err;
671
672
intervals->sample_us = us;
673
return count;
674
}
675
676
static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
677
char *buf)
678
{
679
struct damon_sysfs_intervals *intervals = container_of(kobj,
680
struct damon_sysfs_intervals, kobj);
681
682
return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
683
}
684
685
static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
686
const char *buf, size_t count)
687
{
688
struct damon_sysfs_intervals *intervals = container_of(kobj,
689
struct damon_sysfs_intervals, kobj);
690
unsigned long us;
691
int err = kstrtoul(buf, 0, &us);
692
693
if (err)
694
return err;
695
696
intervals->aggr_us = us;
697
return count;
698
}
699
700
static ssize_t update_us_show(struct kobject *kobj,
701
struct kobj_attribute *attr, char *buf)
702
{
703
struct damon_sysfs_intervals *intervals = container_of(kobj,
704
struct damon_sysfs_intervals, kobj);
705
706
return sysfs_emit(buf, "%lu\n", intervals->update_us);
707
}
708
709
static ssize_t update_us_store(struct kobject *kobj,
710
struct kobj_attribute *attr, const char *buf, size_t count)
711
{
712
struct damon_sysfs_intervals *intervals = container_of(kobj,
713
struct damon_sysfs_intervals, kobj);
714
unsigned long us;
715
int err = kstrtoul(buf, 0, &us);
716
717
if (err)
718
return err;
719
720
intervals->update_us = us;
721
return count;
722
}
723
724
static void damon_sysfs_intervals_release(struct kobject *kobj)
725
{
726
kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
727
}
728
729
static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
730
__ATTR_RW_MODE(sample_us, 0600);
731
732
static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
733
__ATTR_RW_MODE(aggr_us, 0600);
734
735
static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
736
__ATTR_RW_MODE(update_us, 0600);
737
738
static struct attribute *damon_sysfs_intervals_attrs[] = {
739
&damon_sysfs_intervals_sample_us_attr.attr,
740
&damon_sysfs_intervals_aggr_us_attr.attr,
741
&damon_sysfs_intervals_update_us_attr.attr,
742
NULL,
743
};
744
ATTRIBUTE_GROUPS(damon_sysfs_intervals);
745
746
static const struct kobj_type damon_sysfs_intervals_ktype = {
747
.release = damon_sysfs_intervals_release,
748
.sysfs_ops = &kobj_sysfs_ops,
749
.default_groups = damon_sysfs_intervals_groups,
750
};
751
752
/*
753
* monitoring_attrs directory
754
*/
755
756
struct damon_sysfs_attrs {
757
struct kobject kobj;
758
struct damon_sysfs_intervals *intervals;
759
struct damon_sysfs_ul_range *nr_regions_range;
760
};
761
762
static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
763
{
764
struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
765
766
if (!attrs)
767
return NULL;
768
attrs->kobj = (struct kobject){};
769
return attrs;
770
}
771
772
static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
773
{
774
struct damon_sysfs_intervals *intervals;
775
struct damon_sysfs_ul_range *nr_regions_range;
776
int err;
777
778
intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
779
if (!intervals)
780
return -ENOMEM;
781
782
err = kobject_init_and_add(&intervals->kobj,
783
&damon_sysfs_intervals_ktype, &attrs->kobj,
784
"intervals");
785
if (err)
786
goto put_intervals_out;
787
err = damon_sysfs_intervals_add_dirs(intervals);
788
if (err)
789
goto put_intervals_out;
790
attrs->intervals = intervals;
791
792
nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
793
if (!nr_regions_range) {
794
err = -ENOMEM;
795
goto rmdir_put_intervals_out;
796
}
797
798
err = kobject_init_and_add(&nr_regions_range->kobj,
799
&damon_sysfs_ul_range_ktype, &attrs->kobj,
800
"nr_regions");
801
if (err)
802
goto put_nr_regions_intervals_out;
803
attrs->nr_regions_range = nr_regions_range;
804
return 0;
805
806
put_nr_regions_intervals_out:
807
kobject_put(&nr_regions_range->kobj);
808
attrs->nr_regions_range = NULL;
809
rmdir_put_intervals_out:
810
damon_sysfs_intervals_rm_dirs(intervals);
811
put_intervals_out:
812
kobject_put(&intervals->kobj);
813
attrs->intervals = NULL;
814
return err;
815
}
816
817
static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
818
{
819
kobject_put(&attrs->nr_regions_range->kobj);
820
damon_sysfs_intervals_rm_dirs(attrs->intervals);
821
kobject_put(&attrs->intervals->kobj);
822
}
823
824
static void damon_sysfs_attrs_release(struct kobject *kobj)
825
{
826
kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
827
}
828
829
static struct attribute *damon_sysfs_attrs_attrs[] = {
830
NULL,
831
};
832
ATTRIBUTE_GROUPS(damon_sysfs_attrs);
833
834
static const struct kobj_type damon_sysfs_attrs_ktype = {
835
.release = damon_sysfs_attrs_release,
836
.sysfs_ops = &kobj_sysfs_ops,
837
.default_groups = damon_sysfs_attrs_groups,
838
};
839
840
/*
841
* context directory
842
*/
843
844
struct damon_sysfs_ops_name {
845
enum damon_ops_id ops_id;
846
char *name;
847
};
848
849
static const struct damon_sysfs_ops_name damon_sysfs_ops_names[] = {
850
{
851
.ops_id = DAMON_OPS_VADDR,
852
.name = "vaddr",
853
},
854
{
855
.ops_id = DAMON_OPS_FVADDR,
856
.name = "fvaddr",
857
},
858
{
859
.ops_id = DAMON_OPS_PADDR,
860
.name = "paddr",
861
},
862
};
863
864
struct damon_sysfs_context {
865
struct kobject kobj;
866
enum damon_ops_id ops_id;
867
unsigned long addr_unit;
868
struct damon_sysfs_attrs *attrs;
869
struct damon_sysfs_targets *targets;
870
struct damon_sysfs_schemes *schemes;
871
};
872
873
static struct damon_sysfs_context *damon_sysfs_context_alloc(
874
enum damon_ops_id ops_id)
875
{
876
struct damon_sysfs_context *context = kmalloc(sizeof(*context),
877
GFP_KERNEL);
878
879
if (!context)
880
return NULL;
881
context->kobj = (struct kobject){};
882
context->ops_id = ops_id;
883
context->addr_unit = 1;
884
return context;
885
}
886
887
static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
888
{
889
struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
890
int err;
891
892
if (!attrs)
893
return -ENOMEM;
894
err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
895
&context->kobj, "monitoring_attrs");
896
if (err)
897
goto out;
898
err = damon_sysfs_attrs_add_dirs(attrs);
899
if (err)
900
goto out;
901
context->attrs = attrs;
902
return 0;
903
904
out:
905
kobject_put(&attrs->kobj);
906
return err;
907
}
908
909
static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
910
{
911
struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
912
int err;
913
914
if (!targets)
915
return -ENOMEM;
916
err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
917
&context->kobj, "targets");
918
if (err) {
919
kobject_put(&targets->kobj);
920
return err;
921
}
922
context->targets = targets;
923
return 0;
924
}
925
926
static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
927
{
928
struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
929
int err;
930
931
if (!schemes)
932
return -ENOMEM;
933
err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
934
&context->kobj, "schemes");
935
if (err) {
936
kobject_put(&schemes->kobj);
937
return err;
938
}
939
context->schemes = schemes;
940
return 0;
941
}
942
943
static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
944
{
945
int err;
946
947
err = damon_sysfs_context_set_attrs(context);
948
if (err)
949
return err;
950
951
err = damon_sysfs_context_set_targets(context);
952
if (err)
953
goto rmdir_put_attrs_out;
954
955
err = damon_sysfs_context_set_schemes(context);
956
if (err)
957
goto put_targets_attrs_out;
958
return 0;
959
960
put_targets_attrs_out:
961
kobject_put(&context->targets->kobj);
962
context->targets = NULL;
963
rmdir_put_attrs_out:
964
damon_sysfs_attrs_rm_dirs(context->attrs);
965
kobject_put(&context->attrs->kobj);
966
context->attrs = NULL;
967
return err;
968
}
969
970
static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
971
{
972
damon_sysfs_attrs_rm_dirs(context->attrs);
973
kobject_put(&context->attrs->kobj);
974
damon_sysfs_targets_rm_dirs(context->targets);
975
kobject_put(&context->targets->kobj);
976
damon_sysfs_schemes_rm_dirs(context->schemes);
977
kobject_put(&context->schemes->kobj);
978
}
979
980
static ssize_t avail_operations_show(struct kobject *kobj,
981
struct kobj_attribute *attr, char *buf)
982
{
983
int len = 0;
984
int i;
985
986
for (i = 0; i < ARRAY_SIZE(damon_sysfs_ops_names); i++) {
987
const struct damon_sysfs_ops_name *ops_name;
988
989
ops_name = &damon_sysfs_ops_names[i];
990
if (!damon_is_registered_ops(ops_name->ops_id))
991
continue;
992
len += sysfs_emit_at(buf, len, "%s\n", ops_name->name);
993
}
994
return len;
995
}
996
997
static ssize_t operations_show(struct kobject *kobj,
998
struct kobj_attribute *attr, char *buf)
999
{
1000
struct damon_sysfs_context *context = container_of(kobj,
1001
struct damon_sysfs_context, kobj);
1002
int i;
1003
1004
for (i = 0; i < ARRAY_SIZE(damon_sysfs_ops_names); i++) {
1005
const struct damon_sysfs_ops_name *ops_name;
1006
1007
ops_name = &damon_sysfs_ops_names[i];
1008
if (ops_name->ops_id == context->ops_id)
1009
return sysfs_emit(buf, "%s\n", ops_name->name);
1010
}
1011
return -EINVAL;
1012
}
1013
1014
static ssize_t operations_store(struct kobject *kobj,
1015
struct kobj_attribute *attr, const char *buf, size_t count)
1016
{
1017
struct damon_sysfs_context *context = container_of(kobj,
1018
struct damon_sysfs_context, kobj);
1019
int i;
1020
1021
for (i = 0; i < ARRAY_SIZE(damon_sysfs_ops_names); i++) {
1022
const struct damon_sysfs_ops_name *ops_name;
1023
1024
ops_name = &damon_sysfs_ops_names[i];
1025
if (sysfs_streq(buf, ops_name->name)) {
1026
context->ops_id = ops_name->ops_id;
1027
return count;
1028
}
1029
}
1030
return -EINVAL;
1031
}
1032
1033
static ssize_t addr_unit_show(struct kobject *kobj,
1034
struct kobj_attribute *attr, char *buf)
1035
{
1036
struct damon_sysfs_context *context = container_of(kobj,
1037
struct damon_sysfs_context, kobj);
1038
1039
return sysfs_emit(buf, "%lu\n", context->addr_unit);
1040
}
1041
1042
static ssize_t addr_unit_store(struct kobject *kobj,
1043
struct kobj_attribute *attr, const char *buf, size_t count)
1044
{
1045
struct damon_sysfs_context *context = container_of(kobj,
1046
struct damon_sysfs_context, kobj);
1047
unsigned long input_addr_unit;
1048
int err = kstrtoul(buf, 0, &input_addr_unit);
1049
1050
if (err)
1051
return err;
1052
if (!input_addr_unit)
1053
return -EINVAL;
1054
1055
context->addr_unit = input_addr_unit;
1056
return count;
1057
}
1058
1059
static void damon_sysfs_context_release(struct kobject *kobj)
1060
{
1061
kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1062
}
1063
1064
static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1065
__ATTR_RO_MODE(avail_operations, 0400);
1066
1067
static struct kobj_attribute damon_sysfs_context_operations_attr =
1068
__ATTR_RW_MODE(operations, 0600);
1069
1070
static struct kobj_attribute damon_sysfs_context_addr_unit_attr =
1071
__ATTR_RW_MODE(addr_unit, 0600);
1072
1073
static struct attribute *damon_sysfs_context_attrs[] = {
1074
&damon_sysfs_context_avail_operations_attr.attr,
1075
&damon_sysfs_context_operations_attr.attr,
1076
&damon_sysfs_context_addr_unit_attr.attr,
1077
NULL,
1078
};
1079
ATTRIBUTE_GROUPS(damon_sysfs_context);
1080
1081
static const struct kobj_type damon_sysfs_context_ktype = {
1082
.release = damon_sysfs_context_release,
1083
.sysfs_ops = &kobj_sysfs_ops,
1084
.default_groups = damon_sysfs_context_groups,
1085
};
1086
1087
/*
1088
* contexts directory
1089
*/
1090
1091
struct damon_sysfs_contexts {
1092
struct kobject kobj;
1093
struct damon_sysfs_context **contexts_arr;
1094
int nr;
1095
};
1096
1097
static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1098
{
1099
return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1100
}
1101
1102
static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1103
{
1104
struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1105
int i;
1106
1107
for (i = 0; i < contexts->nr; i++) {
1108
damon_sysfs_context_rm_dirs(contexts_arr[i]);
1109
kobject_put(&contexts_arr[i]->kobj);
1110
}
1111
contexts->nr = 0;
1112
kfree(contexts_arr);
1113
contexts->contexts_arr = NULL;
1114
}
1115
1116
static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1117
int nr_contexts)
1118
{
1119
struct damon_sysfs_context **contexts_arr, *context;
1120
int err, i;
1121
1122
damon_sysfs_contexts_rm_dirs(contexts);
1123
if (!nr_contexts)
1124
return 0;
1125
1126
contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1127
GFP_KERNEL | __GFP_NOWARN);
1128
if (!contexts_arr)
1129
return -ENOMEM;
1130
contexts->contexts_arr = contexts_arr;
1131
1132
for (i = 0; i < nr_contexts; i++) {
1133
context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1134
if (!context) {
1135
damon_sysfs_contexts_rm_dirs(contexts);
1136
return -ENOMEM;
1137
}
1138
1139
err = kobject_init_and_add(&context->kobj,
1140
&damon_sysfs_context_ktype, &contexts->kobj,
1141
"%d", i);
1142
if (err)
1143
goto out;
1144
1145
err = damon_sysfs_context_add_dirs(context);
1146
if (err)
1147
goto out;
1148
1149
contexts_arr[i] = context;
1150
contexts->nr++;
1151
}
1152
return 0;
1153
1154
out:
1155
damon_sysfs_contexts_rm_dirs(contexts);
1156
kobject_put(&context->kobj);
1157
return err;
1158
}
1159
1160
static ssize_t nr_contexts_show(struct kobject *kobj,
1161
struct kobj_attribute *attr, char *buf)
1162
{
1163
struct damon_sysfs_contexts *contexts = container_of(kobj,
1164
struct damon_sysfs_contexts, kobj);
1165
1166
return sysfs_emit(buf, "%d\n", contexts->nr);
1167
}
1168
1169
static ssize_t nr_contexts_store(struct kobject *kobj,
1170
struct kobj_attribute *attr, const char *buf, size_t count)
1171
{
1172
struct damon_sysfs_contexts *contexts;
1173
int nr, err;
1174
1175
err = kstrtoint(buf, 0, &nr);
1176
if (err)
1177
return err;
1178
/* TODO: support multiple contexts per kdamond */
1179
if (nr < 0 || 1 < nr)
1180
return -EINVAL;
1181
1182
contexts = container_of(kobj, struct damon_sysfs_contexts, kobj);
1183
if (!mutex_trylock(&damon_sysfs_lock))
1184
return -EBUSY;
1185
err = damon_sysfs_contexts_add_dirs(contexts, nr);
1186
mutex_unlock(&damon_sysfs_lock);
1187
if (err)
1188
return err;
1189
1190
return count;
1191
}
1192
1193
static void damon_sysfs_contexts_release(struct kobject *kobj)
1194
{
1195
kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1196
}
1197
1198
static struct kobj_attribute damon_sysfs_contexts_nr_attr
1199
= __ATTR_RW_MODE(nr_contexts, 0600);
1200
1201
static struct attribute *damon_sysfs_contexts_attrs[] = {
1202
&damon_sysfs_contexts_nr_attr.attr,
1203
NULL,
1204
};
1205
ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1206
1207
static const struct kobj_type damon_sysfs_contexts_ktype = {
1208
.release = damon_sysfs_contexts_release,
1209
.sysfs_ops = &kobj_sysfs_ops,
1210
.default_groups = damon_sysfs_contexts_groups,
1211
};
1212
1213
/*
1214
* kdamond directory
1215
*/
1216
1217
struct damon_sysfs_kdamond {
1218
struct kobject kobj;
1219
struct damon_sysfs_contexts *contexts;
1220
struct damon_ctx *damon_ctx;
1221
unsigned int refresh_ms;
1222
};
1223
1224
static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
1225
{
1226
return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
1227
}
1228
1229
static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
1230
{
1231
struct damon_sysfs_contexts *contexts;
1232
int err;
1233
1234
contexts = damon_sysfs_contexts_alloc();
1235
if (!contexts)
1236
return -ENOMEM;
1237
1238
err = kobject_init_and_add(&contexts->kobj,
1239
&damon_sysfs_contexts_ktype, &kdamond->kobj,
1240
"contexts");
1241
if (err) {
1242
kobject_put(&contexts->kobj);
1243
return err;
1244
}
1245
kdamond->contexts = contexts;
1246
1247
return err;
1248
}
1249
1250
static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
1251
{
1252
damon_sysfs_contexts_rm_dirs(kdamond->contexts);
1253
kobject_put(&kdamond->contexts->kobj);
1254
}
1255
1256
/*
1257
* enum damon_sysfs_cmd - Commands for a specific kdamond.
1258
*/
1259
enum damon_sysfs_cmd {
1260
/* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
1261
DAMON_SYSFS_CMD_ON,
1262
/* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
1263
DAMON_SYSFS_CMD_OFF,
1264
/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
1265
DAMON_SYSFS_CMD_COMMIT,
1266
/*
1267
* @DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: Commit the quota goals
1268
* to DAMON.
1269
*/
1270
DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS,
1271
/*
1272
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
1273
* files.
1274
*/
1275
DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
1276
/*
1277
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: Update
1278
* tried_regions/total_bytes sysfs files for each scheme.
1279
*/
1280
DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES,
1281
/*
1282
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: Update schemes tried
1283
* regions
1284
*/
1285
DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS,
1286
/*
1287
* @DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: Clear schemes tried
1288
* regions
1289
*/
1290
DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS,
1291
/*
1292
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: Update the
1293
* effective size quota of the scheme in bytes.
1294
*/
1295
DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS,
1296
/*
1297
* @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring
1298
* intervals.
1299
*/
1300
DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS,
1301
/*
1302
* @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
1303
*/
1304
NR_DAMON_SYSFS_CMDS,
1305
};
1306
1307
/* Should match with enum damon_sysfs_cmd */
1308
static const char * const damon_sysfs_cmd_strs[] = {
1309
"on",
1310
"off",
1311
"commit",
1312
"commit_schemes_quota_goals",
1313
"update_schemes_stats",
1314
"update_schemes_tried_bytes",
1315
"update_schemes_tried_regions",
1316
"clear_schemes_tried_regions",
1317
"update_schemes_effective_quotas",
1318
"update_tuned_intervals",
1319
};
1320
1321
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
1322
char *buf)
1323
{
1324
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1325
struct damon_sysfs_kdamond, kobj);
1326
struct damon_ctx *ctx;
1327
bool running = false;
1328
1329
if (!mutex_trylock(&damon_sysfs_lock))
1330
return -EBUSY;
1331
1332
ctx = kdamond->damon_ctx;
1333
if (ctx)
1334
running = damon_is_running(ctx);
1335
1336
mutex_unlock(&damon_sysfs_lock);
1337
1338
return sysfs_emit(buf, "%s\n", running ?
1339
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
1340
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
1341
}
1342
1343
static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
1344
struct damon_sysfs_attrs *sys_attrs)
1345
{
1346
struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
1347
struct damon_sysfs_intervals_goal *sys_goal =
1348
sys_intervals->intervals_goal;
1349
struct damon_sysfs_ul_range *sys_nr_regions =
1350
sys_attrs->nr_regions_range;
1351
struct damon_attrs attrs = {
1352
.sample_interval = sys_intervals->sample_us,
1353
.aggr_interval = sys_intervals->aggr_us,
1354
.intervals_goal = {
1355
.access_bp = sys_goal->access_bp,
1356
.aggrs = sys_goal->aggrs,
1357
.min_sample_us = sys_goal->min_sample_us,
1358
.max_sample_us = sys_goal->max_sample_us},
1359
.ops_update_interval = sys_intervals->update_us,
1360
.min_nr_regions = sys_nr_regions->min,
1361
.max_nr_regions = sys_nr_regions->max,
1362
};
1363
return damon_set_attrs(ctx, &attrs);
1364
}
1365
1366
static int damon_sysfs_set_regions(struct damon_target *t,
1367
struct damon_sysfs_regions *sysfs_regions,
1368
unsigned long min_sz_region)
1369
{
1370
struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
1371
sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1372
int i, err = -EINVAL;
1373
1374
if (!ranges)
1375
return -ENOMEM;
1376
for (i = 0; i < sysfs_regions->nr; i++) {
1377
struct damon_sysfs_region *sys_region =
1378
sysfs_regions->regions_arr[i];
1379
1380
if (sys_region->ar.start > sys_region->ar.end)
1381
goto out;
1382
1383
ranges[i].start = sys_region->ar.start;
1384
ranges[i].end = sys_region->ar.end;
1385
if (i == 0)
1386
continue;
1387
if (ranges[i - 1].end > ranges[i].start)
1388
goto out;
1389
}
1390
err = damon_set_regions(t, ranges, sysfs_regions->nr, min_sz_region);
1391
out:
1392
kfree(ranges);
1393
return err;
1394
1395
}
1396
1397
static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
1398
struct damon_ctx *ctx)
1399
{
1400
struct damon_target *t = damon_new_target();
1401
1402
if (!t)
1403
return -ENOMEM;
1404
damon_add_target(ctx, t);
1405
if (damon_target_has_pid(ctx)) {
1406
t->pid = find_get_pid(sys_target->pid);
1407
if (!t->pid)
1408
/* caller will destroy targets */
1409
return -EINVAL;
1410
}
1411
t->obsolete = sys_target->obsolete;
1412
return damon_sysfs_set_regions(t, sys_target->regions, ctx->min_sz_region);
1413
}
1414
1415
static int damon_sysfs_add_targets(struct damon_ctx *ctx,
1416
struct damon_sysfs_targets *sysfs_targets)
1417
{
1418
int i, err;
1419
1420
/* Multiple physical address space monitoring targets makes no sense */
1421
if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
1422
return -EINVAL;
1423
1424
for (i = 0; i < sysfs_targets->nr; i++) {
1425
struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
1426
1427
err = damon_sysfs_add_target(st, ctx);
1428
if (err)
1429
return err;
1430
}
1431
return 0;
1432
}
1433
1434
/*
1435
* damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
1436
* @data: The kobject wrapper that associated to the kdamond thread.
1437
*
1438
* This function reads the schemes stats of specific kdamond and update the
1439
* related values for sysfs files. This function should be called from DAMON
1440
* worker thread,to safely access the DAMON contexts-internal data. Caller
1441
* should also ensure holding ``damon_syfs_lock``, and ->damon_ctx of @data is
1442
* not NULL but a valid pointer, to safely access DAMON sysfs variables.
1443
*/
1444
static int damon_sysfs_upd_schemes_stats(void *data)
1445
{
1446
struct damon_sysfs_kdamond *kdamond = data;
1447
struct damon_ctx *ctx = kdamond->damon_ctx;
1448
1449
damon_sysfs_schemes_update_stats(
1450
kdamond->contexts->contexts_arr[0]->schemes, ctx);
1451
return 0;
1452
}
1453
1454
static inline bool damon_sysfs_kdamond_running(
1455
struct damon_sysfs_kdamond *kdamond)
1456
{
1457
return kdamond->damon_ctx &&
1458
damon_is_running(kdamond->damon_ctx);
1459
}
1460
1461
static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
1462
struct damon_sysfs_context *sys_ctx)
1463
{
1464
int err;
1465
1466
err = damon_select_ops(ctx, sys_ctx->ops_id);
1467
if (err)
1468
return err;
1469
ctx->addr_unit = sys_ctx->addr_unit;
1470
/* addr_unit is respected by only DAMON_OPS_PADDR */
1471
if (sys_ctx->ops_id == DAMON_OPS_PADDR)
1472
ctx->min_sz_region = max(
1473
DAMON_MIN_REGION / sys_ctx->addr_unit, 1);
1474
err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
1475
if (err)
1476
return err;
1477
err = damon_sysfs_add_targets(ctx, sys_ctx->targets);
1478
if (err)
1479
return err;
1480
return damon_sysfs_add_schemes(ctx, sys_ctx->schemes);
1481
}
1482
1483
static struct damon_ctx *damon_sysfs_build_ctx(
1484
struct damon_sysfs_context *sys_ctx);
1485
1486
/*
1487
* Return a new damon_ctx for testing new parameters to commit.
1488
*/
1489
static struct damon_ctx *damon_sysfs_new_test_ctx(
1490
struct damon_ctx *running_ctx)
1491
{
1492
struct damon_ctx *test_ctx;
1493
int err;
1494
1495
test_ctx = damon_new_ctx();
1496
if (!test_ctx)
1497
return NULL;
1498
err = damon_commit_ctx(test_ctx, running_ctx);
1499
if (err) {
1500
damon_destroy_ctx(test_ctx);
1501
return NULL;
1502
}
1503
return test_ctx;
1504
}
1505
1506
/*
1507
* damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
1508
* @kdamond: The kobject wrapper for the associated kdamond.
1509
*
1510
* Returns error if the sysfs input is wrong.
1511
*/
1512
static int damon_sysfs_commit_input(void *data)
1513
{
1514
struct damon_sysfs_kdamond *kdamond = data;
1515
struct damon_ctx *param_ctx, *test_ctx;
1516
int err;
1517
1518
if (!damon_sysfs_kdamond_running(kdamond))
1519
return -EINVAL;
1520
/* TODO: Support multiple contexts per kdamond */
1521
if (kdamond->contexts->nr != 1)
1522
return -EINVAL;
1523
1524
param_ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
1525
if (IS_ERR(param_ctx))
1526
return PTR_ERR(param_ctx);
1527
test_ctx = damon_sysfs_new_test_ctx(kdamond->damon_ctx);
1528
if (!test_ctx)
1529
return -ENOMEM;
1530
err = damon_commit_ctx(test_ctx, param_ctx);
1531
if (err)
1532
goto out;
1533
err = damon_commit_ctx(kdamond->damon_ctx, param_ctx);
1534
out:
1535
damon_destroy_ctx(test_ctx);
1536
damon_destroy_ctx(param_ctx);
1537
return err;
1538
}
1539
1540
static int damon_sysfs_commit_schemes_quota_goals(void *data)
1541
{
1542
struct damon_sysfs_kdamond *sysfs_kdamond = data;
1543
struct damon_ctx *ctx;
1544
struct damon_sysfs_context *sysfs_ctx;
1545
1546
if (!damon_sysfs_kdamond_running(sysfs_kdamond))
1547
return -EINVAL;
1548
/* TODO: Support multiple contexts per kdamond */
1549
if (sysfs_kdamond->contexts->nr != 1)
1550
return -EINVAL;
1551
1552
ctx = sysfs_kdamond->damon_ctx;
1553
sysfs_ctx = sysfs_kdamond->contexts->contexts_arr[0];
1554
return damos_sysfs_set_quota_scores(sysfs_ctx->schemes, ctx);
1555
}
1556
1557
/*
1558
* damon_sysfs_upd_schemes_effective_quotas() - Update schemes effective quotas
1559
* sysfs files.
1560
* @data: The kobject wrapper that associated to the kdamond thread.
1561
*
1562
* This function reads the schemes' effective quotas of specific kdamond and
1563
* update the related values for sysfs files. This function should be called
1564
* from DAMON callbacks while holding ``damon_syfs_lock``, to safely access the
1565
* DAMON contexts-internal data and DAMON sysfs variables.
1566
*/
1567
static int damon_sysfs_upd_schemes_effective_quotas(void *data)
1568
{
1569
struct damon_sysfs_kdamond *kdamond = data;
1570
struct damon_ctx *ctx = kdamond->damon_ctx;
1571
1572
damos_sysfs_update_effective_quotas(
1573
kdamond->contexts->contexts_arr[0]->schemes, ctx);
1574
return 0;
1575
}
1576
1577
static int damon_sysfs_upd_tuned_intervals(void *data)
1578
{
1579
struct damon_sysfs_kdamond *kdamond = data;
1580
struct damon_ctx *ctx = kdamond->damon_ctx;
1581
1582
kdamond->contexts->contexts_arr[0]->attrs->intervals->sample_us =
1583
ctx->attrs.sample_interval;
1584
kdamond->contexts->contexts_arr[0]->attrs->intervals->aggr_us =
1585
ctx->attrs.aggr_interval;
1586
return 0;
1587
}
1588
1589
static struct damon_ctx *damon_sysfs_build_ctx(
1590
struct damon_sysfs_context *sys_ctx)
1591
{
1592
struct damon_ctx *ctx = damon_new_ctx();
1593
int err;
1594
1595
if (!ctx)
1596
return ERR_PTR(-ENOMEM);
1597
1598
err = damon_sysfs_apply_inputs(ctx, sys_ctx);
1599
if (err) {
1600
damon_destroy_ctx(ctx);
1601
return ERR_PTR(err);
1602
}
1603
1604
return ctx;
1605
}
1606
1607
static unsigned long damon_sysfs_next_update_jiffies;
1608
1609
static int damon_sysfs_repeat_call_fn(void *data)
1610
{
1611
struct damon_sysfs_kdamond *sysfs_kdamond = data;
1612
1613
if (!sysfs_kdamond->refresh_ms)
1614
return 0;
1615
if (time_before(jiffies, damon_sysfs_next_update_jiffies))
1616
return 0;
1617
damon_sysfs_next_update_jiffies = jiffies +
1618
msecs_to_jiffies(sysfs_kdamond->refresh_ms);
1619
1620
if (!mutex_trylock(&damon_sysfs_lock))
1621
return 0;
1622
damon_sysfs_upd_tuned_intervals(sysfs_kdamond);
1623
damon_sysfs_upd_schemes_stats(sysfs_kdamond);
1624
damon_sysfs_upd_schemes_effective_quotas(sysfs_kdamond);
1625
mutex_unlock(&damon_sysfs_lock);
1626
return 0;
1627
}
1628
1629
static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
1630
{
1631
struct damon_ctx *ctx;
1632
struct damon_call_control *repeat_call_control;
1633
int err;
1634
1635
if (damon_sysfs_kdamond_running(kdamond))
1636
return -EBUSY;
1637
/* TODO: support multiple contexts per kdamond */
1638
if (kdamond->contexts->nr != 1)
1639
return -EINVAL;
1640
1641
if (kdamond->damon_ctx)
1642
damon_destroy_ctx(kdamond->damon_ctx);
1643
kdamond->damon_ctx = NULL;
1644
1645
repeat_call_control = kmalloc(sizeof(*repeat_call_control),
1646
GFP_KERNEL);
1647
if (!repeat_call_control)
1648
return -ENOMEM;
1649
1650
ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
1651
if (IS_ERR(ctx)) {
1652
kfree(repeat_call_control);
1653
return PTR_ERR(ctx);
1654
}
1655
err = damon_start(&ctx, 1, false);
1656
if (err) {
1657
kfree(repeat_call_control);
1658
damon_destroy_ctx(ctx);
1659
return err;
1660
}
1661
kdamond->damon_ctx = ctx;
1662
1663
damon_sysfs_next_update_jiffies =
1664
jiffies + msecs_to_jiffies(kdamond->refresh_ms);
1665
1666
repeat_call_control->fn = damon_sysfs_repeat_call_fn;
1667
repeat_call_control->data = kdamond;
1668
repeat_call_control->repeat = true;
1669
repeat_call_control->dealloc_on_cancel = true;
1670
damon_call(ctx, repeat_call_control);
1671
return err;
1672
}
1673
1674
static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
1675
{
1676
if (!kdamond->damon_ctx)
1677
return -EINVAL;
1678
return damon_stop(&kdamond->damon_ctx, 1);
1679
/*
1680
* To allow users show final monitoring results of already turned-off
1681
* DAMON, we free kdamond->damon_ctx in next
1682
* damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
1683
*/
1684
}
1685
1686
static int damon_sysfs_damon_call(int (*fn)(void *data),
1687
struct damon_sysfs_kdamond *kdamond)
1688
{
1689
struct damon_call_control call_control = {};
1690
int err;
1691
1692
if (!kdamond->damon_ctx)
1693
return -EINVAL;
1694
call_control.fn = fn;
1695
call_control.data = kdamond;
1696
err = damon_call(kdamond->damon_ctx, &call_control);
1697
return err ? err : call_control.return_code;
1698
}
1699
1700
struct damon_sysfs_schemes_walk_data {
1701
struct damon_sysfs_kdamond *sysfs_kdamond;
1702
bool total_bytes_only;
1703
};
1704
1705
/* populate the region directory */
1706
static void damon_sysfs_schemes_tried_regions_upd_one(void *data, struct damon_ctx *ctx,
1707
struct damon_target *t, struct damon_region *r,
1708
struct damos *s, unsigned long sz_filter_passed)
1709
{
1710
struct damon_sysfs_schemes_walk_data *walk_data = data;
1711
struct damon_sysfs_kdamond *sysfs_kdamond = walk_data->sysfs_kdamond;
1712
1713
damos_sysfs_populate_region_dir(
1714
sysfs_kdamond->contexts->contexts_arr[0]->schemes,
1715
ctx, t, r, s, walk_data->total_bytes_only,
1716
sz_filter_passed);
1717
}
1718
1719
static int damon_sysfs_update_schemes_tried_regions(
1720
struct damon_sysfs_kdamond *sysfs_kdamond, bool total_bytes_only)
1721
{
1722
struct damon_sysfs_schemes_walk_data walk_data = {
1723
.sysfs_kdamond = sysfs_kdamond,
1724
.total_bytes_only = total_bytes_only,
1725
};
1726
struct damos_walk_control control = {
1727
.walk_fn = damon_sysfs_schemes_tried_regions_upd_one,
1728
.data = &walk_data,
1729
};
1730
struct damon_ctx *ctx = sysfs_kdamond->damon_ctx;
1731
1732
if (!ctx)
1733
return -EINVAL;
1734
1735
damon_sysfs_schemes_clear_regions(
1736
sysfs_kdamond->contexts->contexts_arr[0]->schemes);
1737
return damos_walk(ctx, &control);
1738
}
1739
1740
/*
1741
* damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
1742
* @cmd: The command to handle.
1743
* @kdamond: The kobject wrapper for the associated kdamond.
1744
*
1745
* This function handles a DAMON sysfs command for a kdamond.
1746
*
1747
* Return: 0 on success, negative error code otherwise.
1748
*/
1749
static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
1750
struct damon_sysfs_kdamond *kdamond)
1751
{
1752
switch (cmd) {
1753
case DAMON_SYSFS_CMD_ON:
1754
return damon_sysfs_turn_damon_on(kdamond);
1755
case DAMON_SYSFS_CMD_OFF:
1756
return damon_sysfs_turn_damon_off(kdamond);
1757
case DAMON_SYSFS_CMD_COMMIT:
1758
return damon_sysfs_damon_call(
1759
damon_sysfs_commit_input, kdamond);
1760
case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS:
1761
return damon_sysfs_damon_call(
1762
damon_sysfs_commit_schemes_quota_goals,
1763
kdamond);
1764
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
1765
return damon_sysfs_damon_call(
1766
damon_sysfs_upd_schemes_stats, kdamond);
1767
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES:
1768
return damon_sysfs_update_schemes_tried_regions(kdamond, true);
1769
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS:
1770
return damon_sysfs_update_schemes_tried_regions(kdamond, false);
1771
case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS:
1772
return damon_sysfs_schemes_clear_regions(
1773
kdamond->contexts->contexts_arr[0]->schemes);
1774
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS:
1775
return damon_sysfs_damon_call(
1776
damon_sysfs_upd_schemes_effective_quotas,
1777
kdamond);
1778
case DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS:
1779
return damon_sysfs_damon_call(
1780
damon_sysfs_upd_tuned_intervals, kdamond);
1781
default:
1782
return -EINVAL;
1783
}
1784
}
1785
1786
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
1787
const char *buf, size_t count)
1788
{
1789
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1790
struct damon_sysfs_kdamond, kobj);
1791
enum damon_sysfs_cmd cmd;
1792
ssize_t ret = -EINVAL;
1793
1794
if (!mutex_trylock(&damon_sysfs_lock))
1795
return -EBUSY;
1796
for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
1797
if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
1798
ret = damon_sysfs_handle_cmd(cmd, kdamond);
1799
break;
1800
}
1801
}
1802
mutex_unlock(&damon_sysfs_lock);
1803
if (!ret)
1804
ret = count;
1805
return ret;
1806
}
1807
1808
static ssize_t pid_show(struct kobject *kobj,
1809
struct kobj_attribute *attr, char *buf)
1810
{
1811
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1812
struct damon_sysfs_kdamond, kobj);
1813
struct damon_ctx *ctx;
1814
int pid = -1;
1815
1816
if (!mutex_trylock(&damon_sysfs_lock))
1817
return -EBUSY;
1818
ctx = kdamond->damon_ctx;
1819
if (!ctx)
1820
goto out;
1821
1822
mutex_lock(&ctx->kdamond_lock);
1823
if (ctx->kdamond)
1824
pid = ctx->kdamond->pid;
1825
mutex_unlock(&ctx->kdamond_lock);
1826
out:
1827
mutex_unlock(&damon_sysfs_lock);
1828
return sysfs_emit(buf, "%d\n", pid);
1829
}
1830
1831
static ssize_t refresh_ms_show(struct kobject *kobj,
1832
struct kobj_attribute *attr, char *buf)
1833
{
1834
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1835
struct damon_sysfs_kdamond, kobj);
1836
1837
return sysfs_emit(buf, "%u\n", kdamond->refresh_ms);
1838
}
1839
1840
static ssize_t refresh_ms_store(struct kobject *kobj,
1841
struct kobj_attribute *attr, const char *buf, size_t count)
1842
{
1843
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1844
struct damon_sysfs_kdamond, kobj);
1845
unsigned int nr;
1846
int err = kstrtouint(buf, 0, &nr);
1847
1848
if (err)
1849
return err;
1850
1851
kdamond->refresh_ms = nr;
1852
return count;
1853
}
1854
1855
static void damon_sysfs_kdamond_release(struct kobject *kobj)
1856
{
1857
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1858
struct damon_sysfs_kdamond, kobj);
1859
1860
if (kdamond->damon_ctx)
1861
damon_destroy_ctx(kdamond->damon_ctx);
1862
kfree(kdamond);
1863
}
1864
1865
static struct kobj_attribute damon_sysfs_kdamond_state_attr =
1866
__ATTR_RW_MODE(state, 0600);
1867
1868
static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
1869
__ATTR_RO_MODE(pid, 0400);
1870
1871
static struct kobj_attribute damon_sysfs_kdamond_refresh_ms_attr =
1872
__ATTR_RW_MODE(refresh_ms, 0600);
1873
1874
static struct attribute *damon_sysfs_kdamond_attrs[] = {
1875
&damon_sysfs_kdamond_state_attr.attr,
1876
&damon_sysfs_kdamond_pid_attr.attr,
1877
&damon_sysfs_kdamond_refresh_ms_attr.attr,
1878
NULL,
1879
};
1880
ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
1881
1882
static const struct kobj_type damon_sysfs_kdamond_ktype = {
1883
.release = damon_sysfs_kdamond_release,
1884
.sysfs_ops = &kobj_sysfs_ops,
1885
.default_groups = damon_sysfs_kdamond_groups,
1886
};
1887
1888
/*
1889
* kdamonds directory
1890
*/
1891
1892
struct damon_sysfs_kdamonds {
1893
struct kobject kobj;
1894
struct damon_sysfs_kdamond **kdamonds_arr;
1895
int nr;
1896
};
1897
1898
static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
1899
{
1900
return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
1901
}
1902
1903
static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
1904
{
1905
struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
1906
int i;
1907
1908
for (i = 0; i < kdamonds->nr; i++) {
1909
damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
1910
kobject_put(&kdamonds_arr[i]->kobj);
1911
}
1912
kdamonds->nr = 0;
1913
kfree(kdamonds_arr);
1914
kdamonds->kdamonds_arr = NULL;
1915
}
1916
1917
static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds,
1918
int nr_kdamonds)
1919
{
1920
int i;
1921
1922
for (i = 0; i < nr_kdamonds; i++) {
1923
if (damon_sysfs_kdamond_running(kdamonds[i]))
1924
return true;
1925
}
1926
1927
return false;
1928
}
1929
1930
static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
1931
int nr_kdamonds)
1932
{
1933
struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
1934
int err, i;
1935
1936
if (damon_sysfs_kdamonds_busy(kdamonds->kdamonds_arr, kdamonds->nr))
1937
return -EBUSY;
1938
1939
damon_sysfs_kdamonds_rm_dirs(kdamonds);
1940
if (!nr_kdamonds)
1941
return 0;
1942
1943
kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
1944
GFP_KERNEL | __GFP_NOWARN);
1945
if (!kdamonds_arr)
1946
return -ENOMEM;
1947
kdamonds->kdamonds_arr = kdamonds_arr;
1948
1949
for (i = 0; i < nr_kdamonds; i++) {
1950
kdamond = damon_sysfs_kdamond_alloc();
1951
if (!kdamond) {
1952
damon_sysfs_kdamonds_rm_dirs(kdamonds);
1953
return -ENOMEM;
1954
}
1955
1956
err = kobject_init_and_add(&kdamond->kobj,
1957
&damon_sysfs_kdamond_ktype, &kdamonds->kobj,
1958
"%d", i);
1959
if (err)
1960
goto out;
1961
1962
err = damon_sysfs_kdamond_add_dirs(kdamond);
1963
if (err)
1964
goto out;
1965
1966
kdamonds_arr[i] = kdamond;
1967
kdamonds->nr++;
1968
}
1969
return 0;
1970
1971
out:
1972
damon_sysfs_kdamonds_rm_dirs(kdamonds);
1973
kobject_put(&kdamond->kobj);
1974
return err;
1975
}
1976
1977
static ssize_t nr_kdamonds_show(struct kobject *kobj,
1978
struct kobj_attribute *attr, char *buf)
1979
{
1980
struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
1981
struct damon_sysfs_kdamonds, kobj);
1982
1983
return sysfs_emit(buf, "%d\n", kdamonds->nr);
1984
}
1985
1986
static ssize_t nr_kdamonds_store(struct kobject *kobj,
1987
struct kobj_attribute *attr, const char *buf, size_t count)
1988
{
1989
struct damon_sysfs_kdamonds *kdamonds;
1990
int nr, err;
1991
1992
err = kstrtoint(buf, 0, &nr);
1993
if (err)
1994
return err;
1995
if (nr < 0)
1996
return -EINVAL;
1997
1998
kdamonds = container_of(kobj, struct damon_sysfs_kdamonds, kobj);
1999
2000
if (!mutex_trylock(&damon_sysfs_lock))
2001
return -EBUSY;
2002
err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2003
mutex_unlock(&damon_sysfs_lock);
2004
if (err)
2005
return err;
2006
2007
return count;
2008
}
2009
2010
static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2011
{
2012
kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2013
}
2014
2015
static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2016
__ATTR_RW_MODE(nr_kdamonds, 0600);
2017
2018
static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2019
&damon_sysfs_kdamonds_nr_attr.attr,
2020
NULL,
2021
};
2022
ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2023
2024
static const struct kobj_type damon_sysfs_kdamonds_ktype = {
2025
.release = damon_sysfs_kdamonds_release,
2026
.sysfs_ops = &kobj_sysfs_ops,
2027
.default_groups = damon_sysfs_kdamonds_groups,
2028
};
2029
2030
/*
2031
* damon user interface directory
2032
*/
2033
2034
struct damon_sysfs_ui_dir {
2035
struct kobject kobj;
2036
struct damon_sysfs_kdamonds *kdamonds;
2037
};
2038
2039
static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2040
{
2041
return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2042
}
2043
2044
static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2045
{
2046
struct damon_sysfs_kdamonds *kdamonds;
2047
int err;
2048
2049
kdamonds = damon_sysfs_kdamonds_alloc();
2050
if (!kdamonds)
2051
return -ENOMEM;
2052
2053
err = kobject_init_and_add(&kdamonds->kobj,
2054
&damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2055
"kdamonds");
2056
if (err) {
2057
kobject_put(&kdamonds->kobj);
2058
return err;
2059
}
2060
ui_dir->kdamonds = kdamonds;
2061
return err;
2062
}
2063
2064
static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2065
{
2066
kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2067
}
2068
2069
static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2070
NULL,
2071
};
2072
ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2073
2074
static const struct kobj_type damon_sysfs_ui_dir_ktype = {
2075
.release = damon_sysfs_ui_dir_release,
2076
.sysfs_ops = &kobj_sysfs_ops,
2077
.default_groups = damon_sysfs_ui_dir_groups,
2078
};
2079
2080
static int __init damon_sysfs_init(void)
2081
{
2082
struct kobject *damon_sysfs_root;
2083
struct damon_sysfs_ui_dir *admin;
2084
int err;
2085
2086
damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2087
if (!damon_sysfs_root)
2088
return -ENOMEM;
2089
2090
admin = damon_sysfs_ui_dir_alloc();
2091
if (!admin) {
2092
kobject_put(damon_sysfs_root);
2093
return -ENOMEM;
2094
}
2095
err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2096
damon_sysfs_root, "admin");
2097
if (err)
2098
goto out;
2099
err = damon_sysfs_ui_dir_add_dirs(admin);
2100
if (err)
2101
goto out;
2102
return 0;
2103
2104
out:
2105
kobject_put(&admin->kobj);
2106
kobject_put(damon_sysfs_root);
2107
return err;
2108
}
2109
subsys_initcall(damon_sysfs_init);
2110
2111
#include "tests/sysfs-kunit.h"
2112
2113