Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/damon/tests/core-kunit.h
26289 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Data Access Monitor Unit Tests
4
*
5
* Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
6
*
7
* Author: SeongJae Park <[email protected]>
8
*/
9
10
#ifdef CONFIG_DAMON_KUNIT_TEST
11
12
#ifndef _DAMON_CORE_TEST_H
13
#define _DAMON_CORE_TEST_H
14
15
#include <kunit/test.h>
16
17
static void damon_test_regions(struct kunit *test)
18
{
19
struct damon_region *r;
20
struct damon_target *t;
21
22
r = damon_new_region(1, 2);
23
KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
24
KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
25
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
26
27
t = damon_new_target();
28
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
29
30
damon_add_region(r, t);
31
KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
32
33
damon_destroy_region(r, t);
34
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
35
36
damon_free_target(t);
37
}
38
39
static unsigned int nr_damon_targets(struct damon_ctx *ctx)
40
{
41
struct damon_target *t;
42
unsigned int nr_targets = 0;
43
44
damon_for_each_target(t, ctx)
45
nr_targets++;
46
47
return nr_targets;
48
}
49
50
static void damon_test_target(struct kunit *test)
51
{
52
struct damon_ctx *c = damon_new_ctx();
53
struct damon_target *t;
54
55
t = damon_new_target();
56
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
57
58
damon_add_target(c, t);
59
KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
60
61
damon_destroy_target(t, c);
62
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
63
64
damon_destroy_ctx(c);
65
}
66
67
/*
68
* Test kdamond_reset_aggregated()
69
*
70
* DAMON checks access to each region and aggregates this information as the
71
* access frequency of each region. In detail, it increases '->nr_accesses' of
72
* regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes
73
* the aggregated information ('->nr_accesses' of each regions) to the result
74
* buffer. As a result of the flushing, the '->nr_accesses' of regions are
75
* initialized to zero.
76
*/
77
static void damon_test_aggregate(struct kunit *test)
78
{
79
struct damon_ctx *ctx = damon_new_ctx();
80
unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
81
unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
82
unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
83
struct damon_target *t;
84
struct damon_region *r;
85
int it, ir;
86
87
for (it = 0; it < 3; it++) {
88
t = damon_new_target();
89
damon_add_target(ctx, t);
90
}
91
92
it = 0;
93
damon_for_each_target(t, ctx) {
94
for (ir = 0; ir < 3; ir++) {
95
r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
96
r->nr_accesses = accesses[it][ir];
97
r->nr_accesses_bp = accesses[it][ir] * 10000;
98
damon_add_region(r, t);
99
}
100
it++;
101
}
102
kdamond_reset_aggregated(ctx);
103
it = 0;
104
damon_for_each_target(t, ctx) {
105
ir = 0;
106
/* '->nr_accesses' should be zeroed */
107
damon_for_each_region(r, t) {
108
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
109
ir++;
110
}
111
/* regions should be preserved */
112
KUNIT_EXPECT_EQ(test, 3, ir);
113
it++;
114
}
115
/* targets also should be preserved */
116
KUNIT_EXPECT_EQ(test, 3, it);
117
118
damon_destroy_ctx(ctx);
119
}
120
121
static void damon_test_split_at(struct kunit *test)
122
{
123
struct damon_ctx *c = damon_new_ctx();
124
struct damon_target *t;
125
struct damon_region *r, *r_new;
126
127
t = damon_new_target();
128
r = damon_new_region(0, 100);
129
r->nr_accesses_bp = 420000;
130
r->nr_accesses = 42;
131
r->last_nr_accesses = 15;
132
damon_add_region(r, t);
133
damon_split_region_at(t, r, 25);
134
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
135
KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
136
137
r_new = damon_next_region(r);
138
KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul);
139
KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul);
140
141
KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp);
142
KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses);
143
KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
144
145
damon_free_target(t);
146
damon_destroy_ctx(c);
147
}
148
149
static void damon_test_merge_two(struct kunit *test)
150
{
151
struct damon_target *t;
152
struct damon_region *r, *r2, *r3;
153
int i;
154
155
t = damon_new_target();
156
r = damon_new_region(0, 100);
157
r->nr_accesses = 10;
158
r->nr_accesses_bp = 100000;
159
damon_add_region(r, t);
160
r2 = damon_new_region(100, 300);
161
r2->nr_accesses = 20;
162
r2->nr_accesses_bp = 200000;
163
damon_add_region(r2, t);
164
165
damon_merge_two_regions(t, r, r2);
166
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
167
KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
168
KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
169
170
i = 0;
171
damon_for_each_region(r3, t) {
172
KUNIT_EXPECT_PTR_EQ(test, r, r3);
173
i++;
174
}
175
KUNIT_EXPECT_EQ(test, i, 1);
176
177
damon_free_target(t);
178
}
179
180
static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
181
{
182
struct damon_region *r;
183
unsigned int i = 0;
184
185
damon_for_each_region(r, t) {
186
if (i++ == idx)
187
return r;
188
}
189
190
return NULL;
191
}
192
193
static void damon_test_merge_regions_of(struct kunit *test)
194
{
195
struct damon_target *t;
196
struct damon_region *r;
197
unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184};
198
unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230};
199
unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2};
200
201
unsigned long saddrs[] = {0, 114, 130, 156, 170};
202
unsigned long eaddrs[] = {112, 130, 156, 170, 230};
203
int i;
204
205
t = damon_new_target();
206
for (i = 0; i < ARRAY_SIZE(sa); i++) {
207
r = damon_new_region(sa[i], ea[i]);
208
r->nr_accesses = nrs[i];
209
r->nr_accesses_bp = nrs[i] * 10000;
210
damon_add_region(r, t);
211
}
212
213
damon_merge_regions_of(t, 9, 9999);
214
/* 0-112, 114-130, 130-156, 156-170 */
215
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
216
for (i = 0; i < 5; i++) {
217
r = __nth_region_of(t, i);
218
KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
219
KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
220
}
221
damon_free_target(t);
222
}
223
224
static void damon_test_split_regions_of(struct kunit *test)
225
{
226
struct damon_ctx *c = damon_new_ctx();
227
struct damon_target *t;
228
struct damon_region *r;
229
230
t = damon_new_target();
231
r = damon_new_region(0, 22);
232
damon_add_region(r, t);
233
damon_split_regions_of(t, 2);
234
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
235
damon_free_target(t);
236
237
t = damon_new_target();
238
r = damon_new_region(0, 220);
239
damon_add_region(r, t);
240
damon_split_regions_of(t, 4);
241
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
242
damon_free_target(t);
243
damon_destroy_ctx(c);
244
}
245
246
static void damon_test_ops_registration(struct kunit *test)
247
{
248
struct damon_ctx *c = damon_new_ctx();
249
struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
250
bool need_cleanup = false;
251
252
/* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
253
if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
254
bak.id = DAMON_OPS_VADDR;
255
KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
256
need_cleanup = true;
257
}
258
259
/* DAMON_OPS_VADDR is ensured to be registered */
260
KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
261
262
/* Double-registration is prohibited */
263
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
264
265
/* Unknown ops id cannot be registered */
266
KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL);
267
268
/* Registration should success after unregistration */
269
mutex_lock(&damon_ops_lock);
270
bak = damon_registered_ops[DAMON_OPS_VADDR];
271
damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){};
272
mutex_unlock(&damon_ops_lock);
273
274
ops.id = DAMON_OPS_VADDR;
275
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0);
276
277
mutex_lock(&damon_ops_lock);
278
damon_registered_ops[DAMON_OPS_VADDR] = bak;
279
mutex_unlock(&damon_ops_lock);
280
281
/* Check double-registration failure again */
282
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
283
284
damon_destroy_ctx(c);
285
286
if (need_cleanup) {
287
mutex_lock(&damon_ops_lock);
288
damon_registered_ops[DAMON_OPS_VADDR] =
289
(struct damon_operations){};
290
mutex_unlock(&damon_ops_lock);
291
}
292
}
293
294
static void damon_test_set_regions(struct kunit *test)
295
{
296
struct damon_target *t = damon_new_target();
297
struct damon_region *r1 = damon_new_region(4, 16);
298
struct damon_region *r2 = damon_new_region(24, 32);
299
struct damon_addr_range range = {.start = 8, .end = 28};
300
unsigned long expects[] = {8, 16, 16, 24, 24, 28};
301
int expect_idx = 0;
302
struct damon_region *r;
303
304
damon_add_region(r1, t);
305
damon_add_region(r2, t);
306
damon_set_regions(t, &range, 1);
307
308
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
309
damon_for_each_region(r, t) {
310
KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
311
KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
312
}
313
damon_destroy_target(t, NULL);
314
}
315
316
static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
317
{
318
struct damon_attrs attrs = {
319
.sample_interval = 10,
320
.aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
321
};
322
323
/*
324
* In some cases such as 32bit architectures where UINT_MAX is
325
* ULONG_MAX, attrs.aggr_interval becomes zero. Calling
326
* damon_nr_accesses_to_accesses_bp() in the case will cause
327
* divide-by-zero. Such case is prohibited in normal execution since
328
* the caution is documented on the comment for the function, and
329
* damon_update_monitoring_results() does the check. Skip the test in
330
* the case.
331
*/
332
if (!attrs.aggr_interval)
333
kunit_skip(test, "aggr_interval is zero.");
334
335
KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
336
}
337
338
static void damon_test_update_monitoring_result(struct kunit *test)
339
{
340
struct damon_attrs old_attrs = {
341
.sample_interval = 10, .aggr_interval = 1000,};
342
struct damon_attrs new_attrs;
343
struct damon_region *r = damon_new_region(3, 7);
344
345
r->nr_accesses = 15;
346
r->nr_accesses_bp = 150000;
347
r->age = 20;
348
349
new_attrs = (struct damon_attrs){
350
.sample_interval = 100, .aggr_interval = 10000,};
351
damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
352
KUNIT_EXPECT_EQ(test, r->nr_accesses, 15);
353
KUNIT_EXPECT_EQ(test, r->age, 2);
354
355
new_attrs = (struct damon_attrs){
356
.sample_interval = 1, .aggr_interval = 1000};
357
damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
358
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
359
KUNIT_EXPECT_EQ(test, r->age, 2);
360
361
new_attrs = (struct damon_attrs){
362
.sample_interval = 1, .aggr_interval = 100};
363
damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
364
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
365
KUNIT_EXPECT_EQ(test, r->age, 20);
366
367
damon_free_region(r);
368
}
369
370
static void damon_test_set_attrs(struct kunit *test)
371
{
372
struct damon_ctx *c = damon_new_ctx();
373
struct damon_attrs valid_attrs = {
374
.min_nr_regions = 10, .max_nr_regions = 1000,
375
.sample_interval = 5000, .aggr_interval = 100000,};
376
struct damon_attrs invalid_attrs;
377
378
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
379
380
invalid_attrs = valid_attrs;
381
invalid_attrs.min_nr_regions = 1;
382
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
383
384
invalid_attrs = valid_attrs;
385
invalid_attrs.max_nr_regions = 9;
386
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
387
388
invalid_attrs = valid_attrs;
389
invalid_attrs.aggr_interval = 4999;
390
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
391
392
damon_destroy_ctx(c);
393
}
394
395
static void damon_test_moving_sum(struct kunit *test)
396
{
397
unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10;
398
unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0};
399
unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000,
400
45000, 40000, 35000, 30000};
401
int i;
402
403
for (i = 0; i < ARRAY_SIZE(new_values); i++) {
404
mvsum = damon_moving_sum(mvsum, nomvsum, len_window,
405
new_values[i]);
406
KUNIT_EXPECT_EQ(test, mvsum, expects[i]);
407
}
408
}
409
410
static void damos_test_new_filter(struct kunit *test)
411
{
412
struct damos_filter *filter;
413
414
filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false);
415
KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
416
KUNIT_EXPECT_EQ(test, filter->matching, true);
417
KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
418
KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list);
419
damos_destroy_filter(filter);
420
}
421
422
static void damos_test_filter_out(struct kunit *test)
423
{
424
struct damon_target *t;
425
struct damon_region *r, *r2;
426
struct damos_filter *f;
427
428
f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false);
429
f->addr_range = (struct damon_addr_range){
430
.start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6};
431
432
t = damon_new_target();
433
r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5);
434
damon_add_region(r, t);
435
436
/* region in the range */
437
KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f));
438
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
439
440
/* region before the range */
441
r->ar.start = DAMON_MIN_REGION * 1;
442
r->ar.end = DAMON_MIN_REGION * 2;
443
KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f));
444
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
445
446
/* region after the range */
447
r->ar.start = DAMON_MIN_REGION * 6;
448
r->ar.end = DAMON_MIN_REGION * 8;
449
KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f));
450
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
451
452
/* region started before the range */
453
r->ar.start = DAMON_MIN_REGION * 1;
454
r->ar.end = DAMON_MIN_REGION * 4;
455
KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f));
456
/* filter should have split the region */
457
KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1);
458
KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2);
459
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
460
r2 = damon_next_region(r);
461
KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2);
462
KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4);
463
damon_destroy_region(r2, t);
464
465
/* region started in the range */
466
r->ar.start = DAMON_MIN_REGION * 2;
467
r->ar.end = DAMON_MIN_REGION * 8;
468
KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f));
469
/* filter should have split the region */
470
KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2);
471
KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6);
472
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
473
r2 = damon_next_region(r);
474
KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6);
475
KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8);
476
damon_destroy_region(r2, t);
477
478
damon_free_target(t);
479
damos_free_filter(f);
480
}
481
482
static void damon_test_feed_loop_next_input(struct kunit *test)
483
{
484
unsigned long last_input = 900000, current_score = 200;
485
486
/*
487
* If current score is lower than the goal, which is always 10,000
488
* (read the comment on damon_feed_loop_next_input()'s comment), next
489
* input should be higher than the last input.
490
*/
491
KUNIT_EXPECT_GT(test,
492
damon_feed_loop_next_input(last_input, current_score),
493
last_input);
494
495
/*
496
* If current score is higher than the goal, next input should be lower
497
* than the last input.
498
*/
499
current_score = 250000000;
500
KUNIT_EXPECT_LT(test,
501
damon_feed_loop_next_input(last_input, current_score),
502
last_input);
503
504
/*
505
* The next input depends on the distance between the current score and
506
* the goal
507
*/
508
KUNIT_EXPECT_GT(test,
509
damon_feed_loop_next_input(last_input, 200),
510
damon_feed_loop_next_input(last_input, 2000));
511
}
512
513
static void damon_test_set_filters_default_reject(struct kunit *test)
514
{
515
struct damos scheme;
516
struct damos_filter *target_filter, *anon_filter;
517
518
INIT_LIST_HEAD(&scheme.filters);
519
INIT_LIST_HEAD(&scheme.ops_filters);
520
521
damos_set_filters_default_reject(&scheme);
522
/*
523
* No filter is installed. Allow by default on both core and ops layer
524
* filtering stages, since there are no filters at all.
525
*/
526
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
527
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
528
529
target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true);
530
damos_add_filter(&scheme, target_filter);
531
damos_set_filters_default_reject(&scheme);
532
/*
533
* A core-handled allow-filter is installed.
534
* Rejct by default on core layer filtering stage due to the last
535
* core-layer-filter's behavior.
536
* Allow by default on ops layer filtering stage due to the absence of
537
* ops layer filters.
538
*/
539
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, true);
540
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
541
542
target_filter->allow = false;
543
damos_set_filters_default_reject(&scheme);
544
/*
545
* A core-handled reject-filter is installed.
546
* Allow by default on core layer filtering stage due to the last
547
* core-layer-filter's behavior.
548
* Allow by default on ops layer filtering stage due to the absence of
549
* ops layer filters.
550
*/
551
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
552
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
553
554
anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true);
555
damos_add_filter(&scheme, anon_filter);
556
557
damos_set_filters_default_reject(&scheme);
558
/*
559
* A core-handled reject-filter and ops-handled allow-filter are installed.
560
* Allow by default on core layer filtering stage due to the existence
561
* of the ops-handled filter.
562
* Reject by default on ops layer filtering stage due to the last
563
* ops-layer-filter's behavior.
564
*/
565
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
566
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
567
568
target_filter->allow = true;
569
damos_set_filters_default_reject(&scheme);
570
/*
571
* A core-handled allow-filter and ops-handled allow-filter are
572
* installed.
573
* Allow by default on core layer filtering stage due to the existence
574
* of the ops-handled filter.
575
* Reject by default on ops layer filtering stage due to the last
576
* ops-layer-filter's behavior.
577
*/
578
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
579
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
580
}
581
582
static struct kunit_case damon_test_cases[] = {
583
KUNIT_CASE(damon_test_target),
584
KUNIT_CASE(damon_test_regions),
585
KUNIT_CASE(damon_test_aggregate),
586
KUNIT_CASE(damon_test_split_at),
587
KUNIT_CASE(damon_test_merge_two),
588
KUNIT_CASE(damon_test_merge_regions_of),
589
KUNIT_CASE(damon_test_split_regions_of),
590
KUNIT_CASE(damon_test_ops_registration),
591
KUNIT_CASE(damon_test_set_regions),
592
KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp),
593
KUNIT_CASE(damon_test_update_monitoring_result),
594
KUNIT_CASE(damon_test_set_attrs),
595
KUNIT_CASE(damon_test_moving_sum),
596
KUNIT_CASE(damos_test_new_filter),
597
KUNIT_CASE(damos_test_filter_out),
598
KUNIT_CASE(damon_test_feed_loop_next_input),
599
KUNIT_CASE(damon_test_set_filters_default_reject),
600
{},
601
};
602
603
static struct kunit_suite damon_test_suite = {
604
.name = "damon",
605
.test_cases = damon_test_cases,
606
};
607
kunit_test_suite(damon_test_suite);
608
609
#endif /* _DAMON_CORE_TEST_H */
610
611
#endif /* CONFIG_DAMON_KUNIT_TEST */
612
613