Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/damon/tests/core-kunit.h
51375 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Data Access Monitor Unit Tests
4
*
5
* Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
6
*
7
* Author: SeongJae Park <[email protected]>
8
*/
9
10
#ifdef CONFIG_DAMON_KUNIT_TEST
11
12
#ifndef _DAMON_CORE_TEST_H
13
#define _DAMON_CORE_TEST_H
14
15
#include <kunit/test.h>
16
17
static void damon_test_regions(struct kunit *test)
18
{
19
struct damon_region *r;
20
struct damon_target *t;
21
22
r = damon_new_region(1, 2);
23
if (!r)
24
kunit_skip(test, "region alloc fail");
25
KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
26
KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
27
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
28
29
t = damon_new_target();
30
if (!t) {
31
damon_free_region(r);
32
kunit_skip(test, "target alloc fail");
33
}
34
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
35
36
damon_add_region(r, t);
37
KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
38
39
damon_destroy_region(r, t);
40
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
41
42
damon_free_target(t);
43
}
44
45
static unsigned int nr_damon_targets(struct damon_ctx *ctx)
46
{
47
struct damon_target *t;
48
unsigned int nr_targets = 0;
49
50
damon_for_each_target(t, ctx)
51
nr_targets++;
52
53
return nr_targets;
54
}
55
56
static void damon_test_target(struct kunit *test)
57
{
58
struct damon_ctx *c = damon_new_ctx();
59
struct damon_target *t;
60
61
if (!c)
62
kunit_skip(test, "ctx alloc fail");
63
64
t = damon_new_target();
65
if (!t) {
66
damon_destroy_ctx(c);
67
kunit_skip(test, "target alloc fail");
68
}
69
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
70
71
damon_add_target(c, t);
72
KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
73
74
damon_destroy_target(t, c);
75
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
76
77
damon_destroy_ctx(c);
78
}
79
80
/*
81
* Test kdamond_reset_aggregated()
82
*
83
* DAMON checks access to each region and aggregates this information as the
84
* access frequency of each region. In detail, it increases '->nr_accesses' of
85
* regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes
86
* the aggregated information ('->nr_accesses' of each regions) to the result
87
* buffer. As a result of the flushing, the '->nr_accesses' of regions are
88
* initialized to zero.
89
*/
90
static void damon_test_aggregate(struct kunit *test)
91
{
92
struct damon_ctx *ctx = damon_new_ctx();
93
unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
94
unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
95
unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
96
struct damon_target *t;
97
struct damon_region *r;
98
int it, ir;
99
100
if (!ctx)
101
kunit_skip(test, "ctx alloc fail");
102
103
for (it = 0; it < 3; it++) {
104
t = damon_new_target();
105
if (!t) {
106
damon_destroy_ctx(ctx);
107
kunit_skip(test, "target alloc fail");
108
}
109
damon_add_target(ctx, t);
110
}
111
112
it = 0;
113
damon_for_each_target(t, ctx) {
114
for (ir = 0; ir < 3; ir++) {
115
r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
116
if (!r) {
117
damon_destroy_ctx(ctx);
118
kunit_skip(test, "region alloc fail");
119
}
120
r->nr_accesses = accesses[it][ir];
121
r->nr_accesses_bp = accesses[it][ir] * 10000;
122
damon_add_region(r, t);
123
}
124
it++;
125
}
126
kdamond_reset_aggregated(ctx);
127
it = 0;
128
damon_for_each_target(t, ctx) {
129
ir = 0;
130
/* '->nr_accesses' should be zeroed */
131
damon_for_each_region(r, t) {
132
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
133
ir++;
134
}
135
/* regions should be preserved */
136
KUNIT_EXPECT_EQ(test, 3, ir);
137
it++;
138
}
139
/* targets also should be preserved */
140
KUNIT_EXPECT_EQ(test, 3, it);
141
142
damon_destroy_ctx(ctx);
143
}
144
145
static void damon_test_split_at(struct kunit *test)
146
{
147
struct damon_target *t;
148
struct damon_region *r, *r_new;
149
150
t = damon_new_target();
151
if (!t)
152
kunit_skip(test, "target alloc fail");
153
r = damon_new_region(0, 100);
154
if (!r) {
155
damon_free_target(t);
156
kunit_skip(test, "region alloc fail");
157
}
158
r->nr_accesses_bp = 420000;
159
r->nr_accesses = 42;
160
r->last_nr_accesses = 15;
161
r->age = 10;
162
damon_add_region(r, t);
163
damon_split_region_at(t, r, 25);
164
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
165
KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
166
167
r_new = damon_next_region(r);
168
KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul);
169
KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul);
170
171
KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp);
172
KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses);
173
KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
174
KUNIT_EXPECT_EQ(test, r->age, r_new->age);
175
176
damon_free_target(t);
177
}
178
179
static void damon_test_merge_two(struct kunit *test)
180
{
181
struct damon_target *t;
182
struct damon_region *r, *r2, *r3;
183
int i;
184
185
t = damon_new_target();
186
if (!t)
187
kunit_skip(test, "target alloc fail");
188
r = damon_new_region(0, 100);
189
if (!r) {
190
damon_free_target(t);
191
kunit_skip(test, "region alloc fail");
192
}
193
r->nr_accesses = 10;
194
r->nr_accesses_bp = 100000;
195
r->age = 9;
196
damon_add_region(r, t);
197
r2 = damon_new_region(100, 300);
198
if (!r2) {
199
damon_free_target(t);
200
kunit_skip(test, "second region alloc fail");
201
}
202
r2->nr_accesses = 20;
203
r2->nr_accesses_bp = 200000;
204
r2->age = 21;
205
damon_add_region(r2, t);
206
207
damon_merge_two_regions(t, r, r2);
208
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
209
KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
210
KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
211
KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, 160000u);
212
KUNIT_EXPECT_EQ(test, r->age, 17u);
213
214
i = 0;
215
damon_for_each_region(r3, t) {
216
KUNIT_EXPECT_PTR_EQ(test, r, r3);
217
i++;
218
}
219
KUNIT_EXPECT_EQ(test, i, 1);
220
221
damon_free_target(t);
222
}
223
224
static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
225
{
226
struct damon_region *r;
227
unsigned int i = 0;
228
229
damon_for_each_region(r, t) {
230
if (i++ == idx)
231
return r;
232
}
233
234
return NULL;
235
}
236
237
static void damon_test_merge_regions_of(struct kunit *test)
238
{
239
struct damon_target *t;
240
struct damon_region *r;
241
unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184, 230};
242
unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230, 10170};
243
unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2, 5};
244
245
unsigned long saddrs[] = {0, 114, 130, 156, 170, 230};
246
unsigned long eaddrs[] = {112, 130, 156, 170, 230, 10170};
247
int i;
248
249
t = damon_new_target();
250
if (!t)
251
kunit_skip(test, "target alloc fail");
252
for (i = 0; i < ARRAY_SIZE(sa); i++) {
253
r = damon_new_region(sa[i], ea[i]);
254
if (!r) {
255
damon_free_target(t);
256
kunit_skip(test, "region alloc fail");
257
}
258
r->nr_accesses = nrs[i];
259
r->nr_accesses_bp = nrs[i] * 10000;
260
damon_add_region(r, t);
261
}
262
263
damon_merge_regions_of(t, 9, 9999);
264
/* 0-112, 114-130, 130-156, 156-170, 170-230, 230-10170 */
265
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 6u);
266
for (i = 0; i < 6; i++) {
267
r = __nth_region_of(t, i);
268
KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
269
KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
270
}
271
damon_free_target(t);
272
}
273
274
static void damon_test_split_regions_of(struct kunit *test)
275
{
276
struct damon_target *t;
277
struct damon_region *r;
278
unsigned long sa[] = {0, 300, 500};
279
unsigned long ea[] = {220, 400, 700};
280
int i;
281
282
t = damon_new_target();
283
if (!t)
284
kunit_skip(test, "target alloc fail");
285
r = damon_new_region(0, 22);
286
if (!r) {
287
damon_free_target(t);
288
kunit_skip(test, "region alloc fail");
289
}
290
damon_add_region(r, t);
291
damon_split_regions_of(t, 2, 1);
292
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
293
damon_free_target(t);
294
295
t = damon_new_target();
296
if (!t)
297
kunit_skip(test, "second target alloc fail");
298
r = damon_new_region(0, 220);
299
if (!r) {
300
damon_free_target(t);
301
kunit_skip(test, "second region alloc fail");
302
}
303
damon_add_region(r, t);
304
damon_split_regions_of(t, 4, 1);
305
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
306
damon_free_target(t);
307
308
t = damon_new_target();
309
if (!t)
310
kunit_skip(test, "third target alloc fail");
311
for (i = 0; i < ARRAY_SIZE(sa); i++) {
312
r = damon_new_region(sa[i], ea[i]);
313
if (!r) {
314
damon_free_target(t);
315
kunit_skip(test, "region alloc fail");
316
}
317
damon_add_region(r, t);
318
}
319
damon_split_regions_of(t, 4, 5);
320
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 12u);
321
damon_for_each_region(r, t)
322
KUNIT_EXPECT_GE(test, damon_sz_region(r) % 5ul, 0ul);
323
damon_free_target(t);
324
}
325
326
static void damon_test_ops_registration(struct kunit *test)
327
{
328
struct damon_ctx *c = damon_new_ctx();
329
struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
330
bool need_cleanup = false;
331
332
if (!c)
333
kunit_skip(test, "ctx alloc fail");
334
335
/* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
336
if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
337
bak.id = DAMON_OPS_VADDR;
338
KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
339
need_cleanup = true;
340
}
341
342
/* DAMON_OPS_VADDR is ensured to be registered */
343
KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
344
345
/* Double-registration is prohibited */
346
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
347
348
/* Unknown ops id cannot be registered */
349
KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL);
350
351
/* Registration should success after unregistration */
352
mutex_lock(&damon_ops_lock);
353
bak = damon_registered_ops[DAMON_OPS_VADDR];
354
damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){};
355
mutex_unlock(&damon_ops_lock);
356
357
ops.id = DAMON_OPS_VADDR;
358
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0);
359
360
mutex_lock(&damon_ops_lock);
361
damon_registered_ops[DAMON_OPS_VADDR] = bak;
362
mutex_unlock(&damon_ops_lock);
363
364
/* Check double-registration failure again */
365
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
366
367
damon_destroy_ctx(c);
368
369
if (need_cleanup) {
370
mutex_lock(&damon_ops_lock);
371
damon_registered_ops[DAMON_OPS_VADDR] =
372
(struct damon_operations){};
373
mutex_unlock(&damon_ops_lock);
374
}
375
}
376
377
static void damon_test_set_regions(struct kunit *test)
378
{
379
struct damon_target *t = damon_new_target();
380
struct damon_region *r1, *r2;
381
struct damon_addr_range range = {.start = 8, .end = 28};
382
unsigned long expects[] = {8, 16, 16, 24, 24, 28};
383
int expect_idx = 0;
384
struct damon_region *r;
385
386
if (!t)
387
kunit_skip(test, "target alloc fail");
388
r1 = damon_new_region(4, 16);
389
if (!r1) {
390
damon_free_target(t);
391
kunit_skip(test, "region alloc fail");
392
}
393
r2 = damon_new_region(24, 32);
394
if (!r2) {
395
damon_free_target(t);
396
damon_free_region(r1);
397
kunit_skip(test, "second region alloc fail");
398
}
399
400
damon_add_region(r1, t);
401
damon_add_region(r2, t);
402
damon_set_regions(t, &range, 1, 1);
403
404
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
405
damon_for_each_region(r, t) {
406
KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
407
KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
408
}
409
damon_destroy_target(t, NULL);
410
}
411
412
static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
413
{
414
struct damon_attrs attrs = {
415
.sample_interval = 10,
416
.aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
417
};
418
419
/*
420
* In some cases such as 32bit architectures where UINT_MAX is
421
* ULONG_MAX, attrs.aggr_interval becomes zero. Calling
422
* damon_nr_accesses_to_accesses_bp() in the case will cause
423
* divide-by-zero. Such case is prohibited in normal execution since
424
* the caution is documented on the comment for the function, and
425
* damon_update_monitoring_results() does the check. Skip the test in
426
* the case.
427
*/
428
if (!attrs.aggr_interval)
429
kunit_skip(test, "aggr_interval is zero.");
430
431
KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
432
}
433
434
static void damon_test_update_monitoring_result(struct kunit *test)
435
{
436
struct damon_attrs old_attrs = {
437
.sample_interval = 10, .aggr_interval = 1000,};
438
struct damon_attrs new_attrs;
439
struct damon_region *r = damon_new_region(3, 7);
440
441
if (!r)
442
kunit_skip(test, "region alloc fail");
443
444
r->nr_accesses = 15;
445
r->nr_accesses_bp = 150000;
446
r->age = 20;
447
448
new_attrs = (struct damon_attrs){
449
.sample_interval = 100, .aggr_interval = 10000,};
450
damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
451
KUNIT_EXPECT_EQ(test, r->nr_accesses, 15);
452
KUNIT_EXPECT_EQ(test, r->age, 2);
453
454
new_attrs = (struct damon_attrs){
455
.sample_interval = 1, .aggr_interval = 1000};
456
damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
457
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
458
KUNIT_EXPECT_EQ(test, r->age, 2);
459
460
new_attrs = (struct damon_attrs){
461
.sample_interval = 1, .aggr_interval = 100};
462
damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
463
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
464
KUNIT_EXPECT_EQ(test, r->age, 20);
465
466
damon_free_region(r);
467
}
468
469
static void damon_test_set_attrs(struct kunit *test)
470
{
471
struct damon_ctx *c = damon_new_ctx();
472
struct damon_attrs valid_attrs = {
473
.min_nr_regions = 10, .max_nr_regions = 1000,
474
.sample_interval = 5000, .aggr_interval = 100000,};
475
struct damon_attrs invalid_attrs;
476
477
if (!c)
478
kunit_skip(test, "ctx alloc fail");
479
480
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
481
482
invalid_attrs = valid_attrs;
483
invalid_attrs.min_nr_regions = 1;
484
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
485
486
invalid_attrs = valid_attrs;
487
invalid_attrs.max_nr_regions = 9;
488
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
489
490
invalid_attrs = valid_attrs;
491
invalid_attrs.aggr_interval = 4999;
492
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
493
494
damon_destroy_ctx(c);
495
}
496
497
static void damon_test_moving_sum(struct kunit *test)
498
{
499
unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10;
500
unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0};
501
unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000,
502
45000, 40000, 35000, 30000};
503
int i;
504
505
for (i = 0; i < ARRAY_SIZE(new_values); i++) {
506
mvsum = damon_moving_sum(mvsum, nomvsum, len_window,
507
new_values[i]);
508
KUNIT_EXPECT_EQ(test, mvsum, expects[i]);
509
}
510
}
511
512
static void damos_test_new_filter(struct kunit *test)
513
{
514
struct damos_filter *filter;
515
516
filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false);
517
if (!filter)
518
kunit_skip(test, "filter alloc fail");
519
KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
520
KUNIT_EXPECT_EQ(test, filter->matching, true);
521
KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
522
KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list);
523
damos_destroy_filter(filter);
524
}
525
526
static void damos_test_commit_quota_goal_for(struct kunit *test,
527
struct damos_quota_goal *dst,
528
struct damos_quota_goal *src)
529
{
530
u64 dst_last_psi_total = 0;
531
532
if (dst->metric == DAMOS_QUOTA_SOME_MEM_PSI_US)
533
dst_last_psi_total = dst->last_psi_total;
534
damos_commit_quota_goal(dst, src);
535
536
KUNIT_EXPECT_EQ(test, dst->metric, src->metric);
537
KUNIT_EXPECT_EQ(test, dst->target_value, src->target_value);
538
if (src->metric == DAMOS_QUOTA_USER_INPUT)
539
KUNIT_EXPECT_EQ(test, dst->current_value, src->current_value);
540
if (dst_last_psi_total && src->metric == DAMOS_QUOTA_SOME_MEM_PSI_US)
541
KUNIT_EXPECT_EQ(test, dst->last_psi_total, dst_last_psi_total);
542
switch (dst->metric) {
543
case DAMOS_QUOTA_NODE_MEM_USED_BP:
544
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
545
KUNIT_EXPECT_EQ(test, dst->nid, src->nid);
546
break;
547
case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
548
case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
549
KUNIT_EXPECT_EQ(test, dst->nid, src->nid);
550
KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id);
551
break;
552
default:
553
break;
554
}
555
}
556
557
static void damos_test_commit_quota_goal(struct kunit *test)
558
{
559
struct damos_quota_goal dst = {
560
.metric = DAMOS_QUOTA_SOME_MEM_PSI_US,
561
.target_value = 1000,
562
.current_value = 123,
563
.last_psi_total = 456,
564
};
565
566
damos_test_commit_quota_goal_for(test, &dst,
567
&(struct damos_quota_goal){
568
.metric = DAMOS_QUOTA_USER_INPUT,
569
.target_value = 789,
570
.current_value = 12});
571
damos_test_commit_quota_goal_for(test, &dst,
572
&(struct damos_quota_goal){
573
.metric = DAMOS_QUOTA_NODE_MEM_FREE_BP,
574
.target_value = 345,
575
.current_value = 678,
576
.nid = 9,
577
});
578
damos_test_commit_quota_goal_for(test, &dst,
579
&(struct damos_quota_goal){
580
.metric = DAMOS_QUOTA_NODE_MEM_USED_BP,
581
.target_value = 12,
582
.current_value = 345,
583
.nid = 6,
584
});
585
damos_test_commit_quota_goal_for(test, &dst,
586
&(struct damos_quota_goal){
587
.metric = DAMOS_QUOTA_NODE_MEMCG_USED_BP,
588
.target_value = 456,
589
.current_value = 567,
590
.nid = 6,
591
.memcg_id = 7,
592
});
593
damos_test_commit_quota_goal_for(test, &dst,
594
&(struct damos_quota_goal){
595
.metric = DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
596
.target_value = 890,
597
.current_value = 901,
598
.nid = 10,
599
.memcg_id = 1,
600
});
601
damos_test_commit_quota_goal_for(test, &dst,
602
&(struct damos_quota_goal) {
603
.metric = DAMOS_QUOTA_SOME_MEM_PSI_US,
604
.target_value = 234,
605
.current_value = 345,
606
.last_psi_total = 567,
607
});
608
}
609
610
static void damos_test_commit_quota_goals_for(struct kunit *test,
611
struct damos_quota_goal *dst_goals, int nr_dst_goals,
612
struct damos_quota_goal *src_goals, int nr_src_goals)
613
{
614
struct damos_quota dst, src;
615
struct damos_quota_goal *goal, *next;
616
bool skip = true;
617
int i;
618
619
INIT_LIST_HEAD(&dst.goals);
620
INIT_LIST_HEAD(&src.goals);
621
622
for (i = 0; i < nr_dst_goals; i++) {
623
/*
624
* When nr_src_goals is smaller than dst_goals,
625
* damos_commit_quota_goals() will kfree() the dst goals.
626
* Make it kfree()-able.
627
*/
628
goal = damos_new_quota_goal(dst_goals[i].metric,
629
dst_goals[i].target_value);
630
if (!goal)
631
goto out;
632
damos_add_quota_goal(&dst, goal);
633
}
634
skip = false;
635
for (i = 0; i < nr_src_goals; i++)
636
damos_add_quota_goal(&src, &src_goals[i]);
637
638
damos_commit_quota_goals(&dst, &src);
639
640
i = 0;
641
damos_for_each_quota_goal(goal, (&dst)) {
642
KUNIT_EXPECT_EQ(test, goal->metric, src_goals[i].metric);
643
KUNIT_EXPECT_EQ(test, goal->target_value,
644
src_goals[i++].target_value);
645
}
646
KUNIT_EXPECT_EQ(test, i, nr_src_goals);
647
648
out:
649
damos_for_each_quota_goal_safe(goal, next, (&dst))
650
damos_destroy_quota_goal(goal);
651
if (skip)
652
kunit_skip(test, "goal alloc fail");
653
}
654
655
static void damos_test_commit_quota_goals(struct kunit *test)
656
{
657
damos_test_commit_quota_goals_for(test,
658
(struct damos_quota_goal[]){}, 0,
659
(struct damos_quota_goal[]){
660
{
661
.metric = DAMOS_QUOTA_USER_INPUT,
662
.target_value = 123,
663
},
664
}, 1);
665
damos_test_commit_quota_goals_for(test,
666
(struct damos_quota_goal[]){
667
{
668
.metric = DAMOS_QUOTA_USER_INPUT,
669
.target_value = 234,
670
},
671
672
}, 1,
673
(struct damos_quota_goal[]){
674
{
675
.metric = DAMOS_QUOTA_USER_INPUT,
676
.target_value = 345,
677
},
678
}, 1);
679
damos_test_commit_quota_goals_for(test,
680
(struct damos_quota_goal[]){
681
{
682
.metric = DAMOS_QUOTA_USER_INPUT,
683
.target_value = 456,
684
},
685
686
}, 1,
687
(struct damos_quota_goal[]){}, 0);
688
}
689
690
static void damos_test_commit_quota(struct kunit *test)
691
{
692
struct damos_quota dst = {
693
.reset_interval = 1,
694
.ms = 2,
695
.sz = 3,
696
.weight_sz = 4,
697
.weight_nr_accesses = 5,
698
.weight_age = 6,
699
};
700
struct damos_quota src = {
701
.reset_interval = 7,
702
.ms = 8,
703
.sz = 9,
704
.weight_sz = 10,
705
.weight_nr_accesses = 11,
706
.weight_age = 12,
707
};
708
709
INIT_LIST_HEAD(&dst.goals);
710
INIT_LIST_HEAD(&src.goals);
711
712
damos_commit_quota(&dst, &src);
713
714
KUNIT_EXPECT_EQ(test, dst.reset_interval, src.reset_interval);
715
KUNIT_EXPECT_EQ(test, dst.ms, src.ms);
716
KUNIT_EXPECT_EQ(test, dst.sz, src.sz);
717
KUNIT_EXPECT_EQ(test, dst.weight_sz, src.weight_sz);
718
KUNIT_EXPECT_EQ(test, dst.weight_nr_accesses, src.weight_nr_accesses);
719
KUNIT_EXPECT_EQ(test, dst.weight_age, src.weight_age);
720
}
721
722
static int damos_test_help_dests_setup(struct damos_migrate_dests *dests,
723
unsigned int *node_id_arr, unsigned int *weight_arr,
724
size_t nr_dests)
725
{
726
size_t i;
727
728
dests->node_id_arr = kmalloc_array(nr_dests,
729
sizeof(*dests->node_id_arr), GFP_KERNEL);
730
if (!dests->node_id_arr)
731
return -ENOMEM;
732
dests->weight_arr = kmalloc_array(nr_dests,
733
sizeof(*dests->weight_arr), GFP_KERNEL);
734
if (!dests->weight_arr) {
735
kfree(dests->node_id_arr);
736
dests->node_id_arr = NULL;
737
return -ENOMEM;
738
}
739
740
for (i = 0; i < nr_dests; i++) {
741
dests->node_id_arr[i] = node_id_arr[i];
742
dests->weight_arr[i] = weight_arr[i];
743
}
744
dests->nr_dests = nr_dests;
745
return 0;
746
}
747
748
static void damos_test_help_dests_free(struct damos_migrate_dests *dests)
749
{
750
kfree(dests->node_id_arr);
751
kfree(dests->weight_arr);
752
}
753
754
static void damos_test_commit_dests_for(struct kunit *test,
755
unsigned int *dst_node_id_arr, unsigned int *dst_weight_arr,
756
size_t dst_nr_dests,
757
unsigned int *src_node_id_arr, unsigned int *src_weight_arr,
758
size_t src_nr_dests)
759
{
760
struct damos_migrate_dests dst = {}, src = {};
761
int i, err;
762
bool skip = true;
763
764
err = damos_test_help_dests_setup(&dst, dst_node_id_arr,
765
dst_weight_arr, dst_nr_dests);
766
if (err)
767
kunit_skip(test, "dests setup fail");
768
err = damos_test_help_dests_setup(&src, src_node_id_arr,
769
src_weight_arr, src_nr_dests);
770
if (err) {
771
damos_test_help_dests_free(&dst);
772
kunit_skip(test, "src setup fail");
773
}
774
err = damos_commit_dests(&dst, &src);
775
if (err)
776
goto out;
777
skip = false;
778
779
KUNIT_EXPECT_EQ(test, dst.nr_dests, src_nr_dests);
780
for (i = 0; i < dst.nr_dests; i++) {
781
KUNIT_EXPECT_EQ(test, dst.node_id_arr[i], src_node_id_arr[i]);
782
KUNIT_EXPECT_EQ(test, dst.weight_arr[i], src_weight_arr[i]);
783
}
784
785
out:
786
damos_test_help_dests_free(&dst);
787
damos_test_help_dests_free(&src);
788
if (skip)
789
kunit_skip(test, "skip");
790
}
791
792
static void damos_test_commit_dests(struct kunit *test)
793
{
794
damos_test_commit_dests_for(test,
795
(unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
796
3,
797
(unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
798
3);
799
damos_test_commit_dests_for(test,
800
(unsigned int[]){1, 2}, (unsigned int[]){2, 3},
801
2,
802
(unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
803
3);
804
damos_test_commit_dests_for(test,
805
NULL, NULL, 0,
806
(unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
807
3);
808
damos_test_commit_dests_for(test,
809
(unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
810
3,
811
(unsigned int[]){4, 5}, (unsigned int[]){5, 6}, 2);
812
damos_test_commit_dests_for(test,
813
(unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
814
3,
815
NULL, NULL, 0);
816
}
817
818
static void damos_test_commit_filter_for(struct kunit *test,
819
struct damos_filter *dst, struct damos_filter *src)
820
{
821
damos_commit_filter(dst, src);
822
KUNIT_EXPECT_EQ(test, dst->type, src->type);
823
KUNIT_EXPECT_EQ(test, dst->matching, src->matching);
824
KUNIT_EXPECT_EQ(test, dst->allow, src->allow);
825
switch (src->type) {
826
case DAMOS_FILTER_TYPE_MEMCG:
827
KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id);
828
break;
829
case DAMOS_FILTER_TYPE_ADDR:
830
KUNIT_EXPECT_EQ(test, dst->addr_range.start,
831
src->addr_range.start);
832
KUNIT_EXPECT_EQ(test, dst->addr_range.end,
833
src->addr_range.end);
834
break;
835
case DAMOS_FILTER_TYPE_TARGET:
836
KUNIT_EXPECT_EQ(test, dst->target_idx, src->target_idx);
837
break;
838
case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
839
KUNIT_EXPECT_EQ(test, dst->sz_range.min, src->sz_range.min);
840
KUNIT_EXPECT_EQ(test, dst->sz_range.max, src->sz_range.max);
841
break;
842
default:
843
break;
844
}
845
}
846
847
static void damos_test_commit_filter(struct kunit *test)
848
{
849
struct damos_filter dst = {
850
.type = DAMOS_FILTER_TYPE_ACTIVE,
851
.matching = false,
852
.allow = false,
853
};
854
855
damos_test_commit_filter_for(test, &dst,
856
&(struct damos_filter){
857
.type = DAMOS_FILTER_TYPE_ANON,
858
.matching = true,
859
.allow = true,
860
});
861
damos_test_commit_filter_for(test, &dst,
862
&(struct damos_filter){
863
.type = DAMOS_FILTER_TYPE_MEMCG,
864
.matching = false,
865
.allow = false,
866
.memcg_id = 123,
867
});
868
damos_test_commit_filter_for(test, &dst,
869
&(struct damos_filter){
870
.type = DAMOS_FILTER_TYPE_YOUNG,
871
.matching = true,
872
.allow = true,
873
});
874
damos_test_commit_filter_for(test, &dst,
875
&(struct damos_filter){
876
.type = DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
877
.matching = false,
878
.allow = false,
879
.sz_range = {.min = 234, .max = 345},
880
});
881
damos_test_commit_filter_for(test, &dst,
882
&(struct damos_filter){
883
.type = DAMOS_FILTER_TYPE_UNMAPPED,
884
.matching = true,
885
.allow = true,
886
});
887
damos_test_commit_filter_for(test, &dst,
888
&(struct damos_filter){
889
.type = DAMOS_FILTER_TYPE_ADDR,
890
.matching = false,
891
.allow = false,
892
.addr_range = {.start = 456, .end = 567},
893
});
894
damos_test_commit_filter_for(test, &dst,
895
&(struct damos_filter){
896
.type = DAMOS_FILTER_TYPE_TARGET,
897
.matching = true,
898
.allow = true,
899
.target_idx = 6,
900
});
901
}
902
903
static void damos_test_help_initailize_scheme(struct damos *scheme)
904
{
905
INIT_LIST_HEAD(&scheme->quota.goals);
906
INIT_LIST_HEAD(&scheme->core_filters);
907
INIT_LIST_HEAD(&scheme->ops_filters);
908
}
909
910
static void damos_test_commit_for(struct kunit *test, struct damos *dst,
911
struct damos *src)
912
{
913
int err;
914
915
damos_test_help_initailize_scheme(dst);
916
damos_test_help_initailize_scheme(src);
917
918
err = damos_commit(dst, src);
919
if (err)
920
kunit_skip(test, "damos_commit fail");
921
922
KUNIT_EXPECT_EQ(test, dst->pattern.min_sz_region,
923
src->pattern.min_sz_region);
924
KUNIT_EXPECT_EQ(test, dst->pattern.max_sz_region,
925
src->pattern.max_sz_region);
926
KUNIT_EXPECT_EQ(test, dst->pattern.min_nr_accesses,
927
src->pattern.min_nr_accesses);
928
KUNIT_EXPECT_EQ(test, dst->pattern.max_nr_accesses,
929
src->pattern.max_nr_accesses);
930
KUNIT_EXPECT_EQ(test, dst->pattern.min_age_region,
931
src->pattern.min_age_region);
932
KUNIT_EXPECT_EQ(test, dst->pattern.max_age_region,
933
src->pattern.max_age_region);
934
935
KUNIT_EXPECT_EQ(test, dst->action, src->action);
936
KUNIT_EXPECT_EQ(test, dst->apply_interval_us, src->apply_interval_us);
937
938
KUNIT_EXPECT_EQ(test, dst->wmarks.metric, src->wmarks.metric);
939
KUNIT_EXPECT_EQ(test, dst->wmarks.interval, src->wmarks.interval);
940
KUNIT_EXPECT_EQ(test, dst->wmarks.high, src->wmarks.high);
941
KUNIT_EXPECT_EQ(test, dst->wmarks.mid, src->wmarks.mid);
942
KUNIT_EXPECT_EQ(test, dst->wmarks.low, src->wmarks.low);
943
944
switch (src->action) {
945
case DAMOS_MIGRATE_COLD:
946
case DAMOS_MIGRATE_HOT:
947
KUNIT_EXPECT_EQ(test, dst->target_nid, src->target_nid);
948
break;
949
default:
950
break;
951
}
952
}
953
954
static void damos_test_commit_pageout(struct kunit *test)
955
{
956
damos_test_commit_for(test,
957
&(struct damos){
958
.pattern = (struct damos_access_pattern){
959
1, 2, 3, 4, 5, 6},
960
.action = DAMOS_PAGEOUT,
961
.apply_interval_us = 1000000,
962
.wmarks = (struct damos_watermarks){
963
DAMOS_WMARK_FREE_MEM_RATE,
964
900, 100, 50},
965
},
966
&(struct damos){
967
.pattern = (struct damos_access_pattern){
968
2, 3, 4, 5, 6, 7},
969
.action = DAMOS_PAGEOUT,
970
.apply_interval_us = 2000000,
971
.wmarks = (struct damos_watermarks){
972
DAMOS_WMARK_FREE_MEM_RATE,
973
800, 50, 30},
974
});
975
}
976
977
static void damos_test_commit_migrate_hot(struct kunit *test)
978
{
979
damos_test_commit_for(test,
980
&(struct damos){
981
.pattern = (struct damos_access_pattern){
982
1, 2, 3, 4, 5, 6},
983
.action = DAMOS_PAGEOUT,
984
.apply_interval_us = 1000000,
985
.wmarks = (struct damos_watermarks){
986
DAMOS_WMARK_FREE_MEM_RATE,
987
900, 100, 50},
988
},
989
&(struct damos){
990
.pattern = (struct damos_access_pattern){
991
2, 3, 4, 5, 6, 7},
992
.action = DAMOS_MIGRATE_HOT,
993
.apply_interval_us = 2000000,
994
.target_nid = 5,
995
});
996
}
997
998
static struct damon_target *damon_test_help_setup_target(
999
unsigned long region_start_end[][2], int nr_regions)
1000
{
1001
struct damon_target *t;
1002
struct damon_region *r;
1003
int i;
1004
1005
t = damon_new_target();
1006
if (!t)
1007
return NULL;
1008
for (i = 0; i < nr_regions; i++) {
1009
r = damon_new_region(region_start_end[i][0],
1010
region_start_end[i][1]);
1011
if (!r) {
1012
damon_free_target(t);
1013
return NULL;
1014
}
1015
damon_add_region(r, t);
1016
}
1017
return t;
1018
}
1019
1020
static void damon_test_commit_target_regions_for(struct kunit *test,
1021
unsigned long dst_start_end[][2], int nr_dst_regions,
1022
unsigned long src_start_end[][2], int nr_src_regions,
1023
unsigned long expect_start_end[][2], int nr_expect_regions)
1024
{
1025
struct damon_target *dst_target, *src_target;
1026
struct damon_region *r;
1027
int i;
1028
1029
dst_target = damon_test_help_setup_target(dst_start_end, nr_dst_regions);
1030
if (!dst_target)
1031
kunit_skip(test, "dst target setup fail");
1032
src_target = damon_test_help_setup_target(src_start_end, nr_src_regions);
1033
if (!src_target) {
1034
damon_free_target(dst_target);
1035
kunit_skip(test, "src target setup fail");
1036
}
1037
damon_commit_target_regions(dst_target, src_target, 1);
1038
i = 0;
1039
damon_for_each_region(r, dst_target) {
1040
KUNIT_EXPECT_EQ(test, r->ar.start, expect_start_end[i][0]);
1041
KUNIT_EXPECT_EQ(test, r->ar.end, expect_start_end[i][1]);
1042
i++;
1043
}
1044
KUNIT_EXPECT_EQ(test, damon_nr_regions(dst_target), nr_expect_regions);
1045
KUNIT_EXPECT_EQ(test, i, nr_expect_regions);
1046
damon_free_target(dst_target);
1047
damon_free_target(src_target);
1048
}
1049
1050
static void damon_test_commit_target_regions(struct kunit *test)
1051
{
1052
damon_test_commit_target_regions_for(test,
1053
(unsigned long[][2]) {{3, 8}, {8, 10}}, 2,
1054
(unsigned long[][2]) {{4, 6}}, 1,
1055
(unsigned long[][2]) {{4, 6}}, 1);
1056
damon_test_commit_target_regions_for(test,
1057
(unsigned long[][2]) {{3, 8}, {8, 10}}, 2,
1058
(unsigned long[][2]) {}, 0,
1059
(unsigned long[][2]) {{3, 8}, {8, 10}}, 2);
1060
}
1061
1062
static void damos_test_filter_out(struct kunit *test)
1063
{
1064
struct damon_target *t;
1065
struct damon_region *r, *r2;
1066
struct damos_filter *f;
1067
1068
f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false);
1069
if (!f)
1070
kunit_skip(test, "filter alloc fail");
1071
f->addr_range = (struct damon_addr_range){.start = 2, .end = 6};
1072
1073
t = damon_new_target();
1074
if (!t) {
1075
damos_destroy_filter(f);
1076
kunit_skip(test, "target alloc fail");
1077
}
1078
r = damon_new_region(3, 5);
1079
if (!r) {
1080
damos_destroy_filter(f);
1081
damon_free_target(t);
1082
kunit_skip(test, "region alloc fail");
1083
}
1084
damon_add_region(r, t);
1085
1086
/* region in the range */
1087
KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f, 1));
1088
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
1089
1090
/* region before the range */
1091
r->ar.start = 1;
1092
r->ar.end = 2;
1093
KUNIT_EXPECT_FALSE(test,
1094
damos_filter_match(NULL, t, r, f, 1));
1095
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
1096
1097
/* region after the range */
1098
r->ar.start = 6;
1099
r->ar.end = 8;
1100
KUNIT_EXPECT_FALSE(test,
1101
damos_filter_match(NULL, t, r, f, 1));
1102
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
1103
1104
/* region started before the range */
1105
r->ar.start = 1;
1106
r->ar.end = 4;
1107
KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f, 1));
1108
/* filter should have split the region */
1109
KUNIT_EXPECT_EQ(test, r->ar.start, 1);
1110
KUNIT_EXPECT_EQ(test, r->ar.end, 2);
1111
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
1112
r2 = damon_next_region(r);
1113
KUNIT_EXPECT_EQ(test, r2->ar.start, 2);
1114
KUNIT_EXPECT_EQ(test, r2->ar.end, 4);
1115
damon_destroy_region(r2, t);
1116
1117
/* region started in the range */
1118
r->ar.start = 2;
1119
r->ar.end = 8;
1120
KUNIT_EXPECT_TRUE(test,
1121
damos_filter_match(NULL, t, r, f, 1));
1122
/* filter should have split the region */
1123
KUNIT_EXPECT_EQ(test, r->ar.start, 2);
1124
KUNIT_EXPECT_EQ(test, r->ar.end, 6);
1125
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
1126
r2 = damon_next_region(r);
1127
KUNIT_EXPECT_EQ(test, r2->ar.start, 6);
1128
KUNIT_EXPECT_EQ(test, r2->ar.end, 8);
1129
damon_destroy_region(r2, t);
1130
1131
damon_free_target(t);
1132
damos_free_filter(f);
1133
}
1134
1135
static void damon_test_feed_loop_next_input(struct kunit *test)
1136
{
1137
unsigned long last_input = 900000, current_score = 200;
1138
1139
/*
1140
* If current score is lower than the goal, which is always 10,000
1141
* (read the comment on damon_feed_loop_next_input()'s comment), next
1142
* input should be higher than the last input.
1143
*/
1144
KUNIT_EXPECT_GT(test,
1145
damon_feed_loop_next_input(last_input, current_score),
1146
last_input);
1147
1148
/*
1149
* If current score is higher than the goal, next input should be lower
1150
* than the last input.
1151
*/
1152
current_score = 250000000;
1153
KUNIT_EXPECT_LT(test,
1154
damon_feed_loop_next_input(last_input, current_score),
1155
last_input);
1156
1157
/*
1158
* The next input depends on the distance between the current score and
1159
* the goal
1160
*/
1161
KUNIT_EXPECT_GT(test,
1162
damon_feed_loop_next_input(last_input, 200),
1163
damon_feed_loop_next_input(last_input, 2000));
1164
}
1165
1166
static void damon_test_set_filters_default_reject(struct kunit *test)
1167
{
1168
struct damos scheme;
1169
struct damos_filter *target_filter, *anon_filter;
1170
1171
INIT_LIST_HEAD(&scheme.core_filters);
1172
INIT_LIST_HEAD(&scheme.ops_filters);
1173
1174
damos_set_filters_default_reject(&scheme);
1175
/*
1176
* No filter is installed. Allow by default on both core and ops layer
1177
* filtering stages, since there are no filters at all.
1178
*/
1179
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
1180
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
1181
1182
target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true);
1183
if (!target_filter)
1184
kunit_skip(test, "filter alloc fail");
1185
damos_add_filter(&scheme, target_filter);
1186
damos_set_filters_default_reject(&scheme);
1187
/*
1188
* A core-handled allow-filter is installed.
1189
* Reject by default on core layer filtering stage due to the last
1190
* core-layer-filter's behavior.
1191
* Allow by default on ops layer filtering stage due to the absence of
1192
* ops layer filters.
1193
*/
1194
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, true);
1195
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
1196
1197
target_filter->allow = false;
1198
damos_set_filters_default_reject(&scheme);
1199
/*
1200
* A core-handled reject-filter is installed.
1201
* Allow by default on core layer filtering stage due to the last
1202
* core-layer-filter's behavior.
1203
* Allow by default on ops layer filtering stage due to the absence of
1204
* ops layer filters.
1205
*/
1206
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
1207
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
1208
1209
anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true);
1210
if (!anon_filter) {
1211
damos_free_filter(target_filter);
1212
kunit_skip(test, "anon_filter alloc fail");
1213
}
1214
damos_add_filter(&scheme, anon_filter);
1215
1216
damos_set_filters_default_reject(&scheme);
1217
/*
1218
* A core-handled reject-filter and ops-handled allow-filter are installed.
1219
* Allow by default on core layer filtering stage due to the existence
1220
* of the ops-handled filter.
1221
* Reject by default on ops layer filtering stage due to the last
1222
* ops-layer-filter's behavior.
1223
*/
1224
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
1225
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
1226
1227
target_filter->allow = true;
1228
damos_set_filters_default_reject(&scheme);
1229
/*
1230
* A core-handled allow-filter and ops-handled allow-filter are
1231
* installed.
1232
* Allow by default on core layer filtering stage due to the existence
1233
* of the ops-handled filter.
1234
* Reject by default on ops layer filtering stage due to the last
1235
* ops-layer-filter's behavior.
1236
*/
1237
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
1238
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
1239
1240
damos_free_filter(anon_filter);
1241
damos_free_filter(target_filter);
1242
}
1243
1244
static struct kunit_case damon_test_cases[] = {
1245
KUNIT_CASE(damon_test_target),
1246
KUNIT_CASE(damon_test_regions),
1247
KUNIT_CASE(damon_test_aggregate),
1248
KUNIT_CASE(damon_test_split_at),
1249
KUNIT_CASE(damon_test_merge_two),
1250
KUNIT_CASE(damon_test_merge_regions_of),
1251
KUNIT_CASE(damon_test_split_regions_of),
1252
KUNIT_CASE(damon_test_ops_registration),
1253
KUNIT_CASE(damon_test_set_regions),
1254
KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp),
1255
KUNIT_CASE(damon_test_update_monitoring_result),
1256
KUNIT_CASE(damon_test_set_attrs),
1257
KUNIT_CASE(damon_test_moving_sum),
1258
KUNIT_CASE(damos_test_new_filter),
1259
KUNIT_CASE(damos_test_commit_quota_goal),
1260
KUNIT_CASE(damos_test_commit_quota_goals),
1261
KUNIT_CASE(damos_test_commit_quota),
1262
KUNIT_CASE(damos_test_commit_dests),
1263
KUNIT_CASE(damos_test_commit_filter),
1264
KUNIT_CASE(damos_test_commit_pageout),
1265
KUNIT_CASE(damos_test_commit_migrate_hot),
1266
KUNIT_CASE(damon_test_commit_target_regions),
1267
KUNIT_CASE(damos_test_filter_out),
1268
KUNIT_CASE(damon_test_feed_loop_next_input),
1269
KUNIT_CASE(damon_test_set_filters_default_reject),
1270
{},
1271
};
1272
1273
static struct kunit_suite damon_test_suite = {
1274
.name = "damon",
1275
.test_cases = damon_test_cases,
1276
};
1277
kunit_test_suite(damon_test_suite);
1278
1279
#endif /* _DAMON_CORE_TEST_H */
1280
1281
#endif /* CONFIG_DAMON_KUNIT_TEST */
1282
1283