Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/android/tests/binder_alloc_kunit.c
26426 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Test cases for binder allocator code.
4
*
5
* Copyright 2025 Google LLC.
6
* Author: Tiffany Yang <[email protected]>
7
*/
8
9
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11
#include <kunit/test.h>
12
#include <linux/anon_inodes.h>
13
#include <linux/err.h>
14
#include <linux/file.h>
15
#include <linux/fs.h>
16
#include <linux/mm.h>
17
#include <linux/mman.h>
18
#include <linux/seq_buf.h>
19
#include <linux/sizes.h>
20
21
#include "../binder_alloc.h"
22
#include "../binder_internal.h"
23
24
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
25
26
#define BINDER_MMAP_SIZE SZ_128K
27
28
#define BUFFER_NUM 5
29
#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
30
31
#define FREESEQ_BUFLEN ((3 * BUFFER_NUM) + 1)
32
33
#define ALIGN_TYPE_STRLEN (12)
34
35
#define ALIGNMENTS_BUFLEN (((ALIGN_TYPE_STRLEN + 6) * BUFFER_NUM) + 1)
36
37
#define PRINT_ALL_CASES (0)
38
39
/* 5^5 alignment combinations * 2 places to share pages * 5! free sequences */
40
#define TOTAL_EXHAUSTIVE_CASES (3125 * 2 * 120)
41
42
/**
43
* enum buf_end_align_type - Page alignment of a buffer
44
* end with regard to the end of the previous buffer.
45
*
46
* In the pictures below, buf2 refers to the buffer we
47
* are aligning. buf1 refers to previous buffer by addr.
48
* Symbol [ means the start of a buffer, ] means the end
49
* of a buffer, and | means page boundaries.
50
*/
51
enum buf_end_align_type {
52
/**
53
* @SAME_PAGE_UNALIGNED: The end of this buffer is on
54
* the same page as the end of the previous buffer and
55
* is not page aligned. Examples:
56
* buf1 ][ buf2 ][ ...
57
* buf1 ]|[ buf2 ][ ...
58
*/
59
SAME_PAGE_UNALIGNED = 0,
60
/**
61
* @SAME_PAGE_ALIGNED: When the end of the previous buffer
62
* is not page aligned, the end of this buffer is on the
63
* same page as the end of the previous buffer and is page
64
* aligned. When the previous buffer is page aligned, the
65
* end of this buffer is aligned to the next page boundary.
66
* Examples:
67
* buf1 ][ buf2 ]| ...
68
* buf1 ]|[ buf2 ]| ...
69
*/
70
SAME_PAGE_ALIGNED,
71
/**
72
* @NEXT_PAGE_UNALIGNED: The end of this buffer is on
73
* the page next to the end of the previous buffer and
74
* is not page aligned. Examples:
75
* buf1 ][ buf2 | buf2 ][ ...
76
* buf1 ]|[ buf2 | buf2 ][ ...
77
*/
78
NEXT_PAGE_UNALIGNED,
79
/**
80
* @NEXT_PAGE_ALIGNED: The end of this buffer is on
81
* the page next to the end of the previous buffer and
82
* is page aligned. Examples:
83
* buf1 ][ buf2 | buf2 ]| ...
84
* buf1 ]|[ buf2 | buf2 ]| ...
85
*/
86
NEXT_PAGE_ALIGNED,
87
/**
88
* @NEXT_NEXT_UNALIGNED: The end of this buffer is on
89
* the page that follows the page after the end of the
90
* previous buffer and is not page aligned. Examples:
91
* buf1 ][ buf2 | buf2 | buf2 ][ ...
92
* buf1 ]|[ buf2 | buf2 | buf2 ][ ...
93
*/
94
NEXT_NEXT_UNALIGNED,
95
/**
96
* @LOOP_END: The number of enum values in &buf_end_align_type.
97
* It is used for controlling loop termination.
98
*/
99
LOOP_END,
100
};
101
102
static const char *const buf_end_align_type_strs[LOOP_END] = {
103
[SAME_PAGE_UNALIGNED] = "SP_UNALIGNED",
104
[SAME_PAGE_ALIGNED] = " SP_ALIGNED ",
105
[NEXT_PAGE_UNALIGNED] = "NP_UNALIGNED",
106
[NEXT_PAGE_ALIGNED] = " NP_ALIGNED ",
107
[NEXT_NEXT_UNALIGNED] = "NN_UNALIGNED",
108
};
109
110
struct binder_alloc_test_case_info {
111
char alignments[ALIGNMENTS_BUFLEN];
112
struct seq_buf alignments_sb;
113
size_t *buffer_sizes;
114
int *free_sequence;
115
bool front_pages;
116
};
117
118
static void stringify_free_seq(struct kunit *test, int *seq, struct seq_buf *sb)
119
{
120
int i;
121
122
for (i = 0; i < BUFFER_NUM; i++)
123
seq_buf_printf(sb, "[%d]", seq[i]);
124
125
KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));
126
}
127
128
static void stringify_alignments(struct kunit *test, int *alignments,
129
struct seq_buf *sb)
130
{
131
int i;
132
133
for (i = 0; i < BUFFER_NUM; i++)
134
seq_buf_printf(sb, "[ %d:%s ]", i,
135
buf_end_align_type_strs[alignments[i]]);
136
137
KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));
138
}
139
140
static bool check_buffer_pages_allocated(struct kunit *test,
141
struct binder_alloc *alloc,
142
struct binder_buffer *buffer,
143
size_t size)
144
{
145
unsigned long page_addr;
146
unsigned long end;
147
int page_index;
148
149
end = PAGE_ALIGN(buffer->user_data + size);
150
page_addr = buffer->user_data;
151
for (; page_addr < end; page_addr += PAGE_SIZE) {
152
page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
153
if (!alloc->pages[page_index] ||
154
!list_empty(page_to_lru(alloc->pages[page_index]))) {
155
kunit_err(test, "expect alloc but is %s at page index %d\n",
156
alloc->pages[page_index] ?
157
"lru" : "free", page_index);
158
return false;
159
}
160
}
161
return true;
162
}
163
164
static unsigned long binder_alloc_test_alloc_buf(struct kunit *test,
165
struct binder_alloc *alloc,
166
struct binder_buffer *buffers[],
167
size_t *sizes, int *seq)
168
{
169
unsigned long failures = 0;
170
int i;
171
172
for (i = 0; i < BUFFER_NUM; i++) {
173
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
174
if (IS_ERR(buffers[i]) ||
175
!check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))
176
failures++;
177
}
178
179
return failures;
180
}
181
182
static unsigned long binder_alloc_test_free_buf(struct kunit *test,
183
struct binder_alloc *alloc,
184
struct binder_buffer *buffers[],
185
size_t *sizes, int *seq, size_t end)
186
{
187
unsigned long failures = 0;
188
int i;
189
190
for (i = 0; i < BUFFER_NUM; i++)
191
binder_alloc_free_buf(alloc, buffers[seq[i]]);
192
193
for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {
194
if (list_empty(page_to_lru(alloc->pages[i]))) {
195
kunit_err(test, "expect lru but is %s at page index %d\n",
196
alloc->pages[i] ? "alloc" : "free", i);
197
failures++;
198
}
199
}
200
201
return failures;
202
}
203
204
static unsigned long binder_alloc_test_free_page(struct kunit *test,
205
struct binder_alloc *alloc)
206
{
207
unsigned long failures = 0;
208
unsigned long count;
209
int i;
210
211
while ((count = list_lru_count(alloc->freelist))) {
212
list_lru_walk(alloc->freelist, binder_alloc_free_page,
213
NULL, count);
214
}
215
216
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
217
if (alloc->pages[i]) {
218
kunit_err(test, "expect free but is %s at page index %d\n",
219
list_empty(page_to_lru(alloc->pages[i])) ?
220
"alloc" : "lru", i);
221
failures++;
222
}
223
}
224
225
return failures;
226
}
227
228
/* Executes one full test run for the given test case. */
229
static bool binder_alloc_test_alloc_free(struct kunit *test,
230
struct binder_alloc *alloc,
231
struct binder_alloc_test_case_info *tc,
232
size_t end)
233
{
234
unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE;
235
struct binder_buffer *buffers[BUFFER_NUM];
236
unsigned long failures;
237
bool failed = false;
238
239
failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
240
tc->buffer_sizes,
241
tc->free_sequence);
242
failed = failed || failures;
243
KUNIT_EXPECT_EQ_MSG(test, failures, 0,
244
"Initial allocation failed: %lu/%u buffers with errors",
245
failures, BUFFER_NUM);
246
247
failures = binder_alloc_test_free_buf(test, alloc, buffers,
248
tc->buffer_sizes,
249
tc->free_sequence, end);
250
failed = failed || failures;
251
KUNIT_EXPECT_EQ_MSG(test, failures, 0,
252
"Initial buffers not freed correctly: %lu/%lu pages not on lru list",
253
failures, pages);
254
255
/* Allocate from lru. */
256
failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
257
tc->buffer_sizes,
258
tc->free_sequence);
259
failed = failed || failures;
260
KUNIT_EXPECT_EQ_MSG(test, failures, 0,
261
"Reallocation failed: %lu/%u buffers with errors",
262
failures, BUFFER_NUM);
263
264
failures = list_lru_count(alloc->freelist);
265
failed = failed || failures;
266
KUNIT_EXPECT_EQ_MSG(test, failures, 0,
267
"lru list should be empty after reallocation but still has %lu pages",
268
failures);
269
270
failures = binder_alloc_test_free_buf(test, alloc, buffers,
271
tc->buffer_sizes,
272
tc->free_sequence, end);
273
failed = failed || failures;
274
KUNIT_EXPECT_EQ_MSG(test, failures, 0,
275
"Reallocated buffers not freed correctly: %lu/%lu pages not on lru list",
276
failures, pages);
277
278
failures = binder_alloc_test_free_page(test, alloc);
279
failed = failed || failures;
280
KUNIT_EXPECT_EQ_MSG(test, failures, 0,
281
"Failed to clean up allocated pages: %lu/%lu pages still installed",
282
failures, (alloc->buffer_size / PAGE_SIZE));
283
284
return failed;
285
}
286
287
static bool is_dup(int *seq, int index, int val)
288
{
289
int i;
290
291
for (i = 0; i < index; i++) {
292
if (seq[i] == val)
293
return true;
294
}
295
return false;
296
}
297
298
/* Generate BUFFER_NUM factorial free orders. */
299
static void permute_frees(struct kunit *test, struct binder_alloc *alloc,
300
struct binder_alloc_test_case_info *tc,
301
unsigned long *runs, unsigned long *failures,
302
int index, size_t end)
303
{
304
bool case_failed;
305
int i;
306
307
if (index == BUFFER_NUM) {
308
DECLARE_SEQ_BUF(freeseq_sb, FREESEQ_BUFLEN);
309
310
case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end);
311
*runs += 1;
312
*failures += case_failed;
313
314
if (case_failed || PRINT_ALL_CASES) {
315
stringify_free_seq(test, tc->free_sequence,
316
&freeseq_sb);
317
kunit_err(test, "case %lu: [%s] | %s - %s - %s", *runs,
318
case_failed ? "FAILED" : "PASSED",
319
tc->front_pages ? "front" : "back ",
320
seq_buf_str(&tc->alignments_sb),
321
seq_buf_str(&freeseq_sb));
322
}
323
324
return;
325
}
326
for (i = 0; i < BUFFER_NUM; i++) {
327
if (is_dup(tc->free_sequence, index, i))
328
continue;
329
tc->free_sequence[index] = i;
330
permute_frees(test, alloc, tc, runs, failures, index + 1, end);
331
}
332
}
333
334
static void gen_buf_sizes(struct kunit *test,
335
struct binder_alloc *alloc,
336
struct binder_alloc_test_case_info *tc,
337
size_t *end_offset, unsigned long *runs,
338
unsigned long *failures)
339
{
340
size_t last_offset, offset = 0;
341
size_t front_sizes[BUFFER_NUM];
342
size_t back_sizes[BUFFER_NUM];
343
int seq[BUFFER_NUM] = {0};
344
int i;
345
346
tc->free_sequence = seq;
347
for (i = 0; i < BUFFER_NUM; i++) {
348
last_offset = offset;
349
offset = end_offset[i];
350
front_sizes[i] = offset - last_offset;
351
back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
352
}
353
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
354
355
/*
356
* Buffers share the first or last few pages.
357
* Only BUFFER_NUM - 1 buffer sizes are adjustable since
358
* we need one giant buffer before getting to the last page.
359
*/
360
tc->front_pages = true;
361
tc->buffer_sizes = front_sizes;
362
permute_frees(test, alloc, tc, runs, failures, 0,
363
end_offset[BUFFER_NUM - 1]);
364
365
tc->front_pages = false;
366
tc->buffer_sizes = back_sizes;
367
permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size);
368
}
369
370
static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,
371
size_t *end_offset, int *alignments,
372
unsigned long *runs, unsigned long *failures,
373
int index)
374
{
375
size_t end, prev;
376
int align;
377
378
if (index == BUFFER_NUM) {
379
struct binder_alloc_test_case_info tc = {0};
380
381
seq_buf_init(&tc.alignments_sb, tc.alignments,
382
ALIGNMENTS_BUFLEN);
383
stringify_alignments(test, alignments, &tc.alignments_sb);
384
385
gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures);
386
return;
387
}
388
prev = index == 0 ? 0 : end_offset[index - 1];
389
end = prev;
390
391
BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
392
393
for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
394
if (align % 2)
395
end = ALIGN(end, PAGE_SIZE);
396
else
397
end += BUFFER_MIN_SIZE;
398
end_offset[index] = end;
399
alignments[index] = align;
400
gen_buf_offsets(test, alloc, end_offset, alignments, runs,
401
failures, index + 1);
402
}
403
}
404
405
struct binder_alloc_test {
406
struct binder_alloc alloc;
407
struct list_lru binder_test_freelist;
408
struct file *filp;
409
unsigned long mmap_uaddr;
410
};
411
412
static void binder_alloc_test_init_freelist(struct kunit *test)
413
{
414
struct binder_alloc_test *priv = test->priv;
415
416
KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,
417
&priv->binder_test_freelist);
418
}
419
420
static void binder_alloc_test_mmap(struct kunit *test)
421
{
422
struct binder_alloc_test *priv = test->priv;
423
struct binder_alloc *alloc = &priv->alloc;
424
struct binder_buffer *buf;
425
struct rb_node *n;
426
427
KUNIT_EXPECT_EQ(test, alloc->mapped, true);
428
KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);
429
430
n = rb_first(&alloc->allocated_buffers);
431
KUNIT_EXPECT_PTR_EQ(test, n, NULL);
432
433
n = rb_first(&alloc->free_buffers);
434
buf = rb_entry(n, struct binder_buffer, rb_node);
435
KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),
436
BINDER_MMAP_SIZE);
437
KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));
438
}
439
440
/**
441
* binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages.
442
* @test: The test context object.
443
*
444
* Allocate BUFFER_NUM buffers to cover all page alignment cases,
445
* then free them in all orders possible. Check that pages are
446
* correctly allocated, put onto lru when buffers are freed, and
447
* are freed when binder_alloc_free_page() is called.
448
*/
449
static void binder_alloc_exhaustive_test(struct kunit *test)
450
{
451
struct binder_alloc_test *priv = test->priv;
452
size_t end_offset[BUFFER_NUM];
453
int alignments[BUFFER_NUM];
454
unsigned long failures = 0;
455
unsigned long runs = 0;
456
457
gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs,
458
&failures, 0);
459
460
KUNIT_EXPECT_EQ(test, runs, TOTAL_EXHAUSTIVE_CASES);
461
KUNIT_EXPECT_EQ(test, failures, 0);
462
}
463
464
/* ===== End test cases ===== */
465
466
static void binder_alloc_test_vma_close(struct vm_area_struct *vma)
467
{
468
struct binder_alloc *alloc = vma->vm_private_data;
469
470
binder_alloc_vma_close(alloc);
471
}
472
473
static const struct vm_operations_struct binder_alloc_test_vm_ops = {
474
.close = binder_alloc_test_vma_close,
475
.fault = binder_vm_fault,
476
};
477
478
static int binder_alloc_test_mmap_handler(struct file *filp,
479
struct vm_area_struct *vma)
480
{
481
struct binder_alloc *alloc = filp->private_data;
482
483
vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
484
485
vma->vm_ops = &binder_alloc_test_vm_ops;
486
vma->vm_private_data = alloc;
487
488
return binder_alloc_mmap_handler(alloc, vma);
489
}
490
491
static const struct file_operations binder_alloc_test_fops = {
492
.mmap = binder_alloc_test_mmap_handler,
493
};
494
495
static int binder_alloc_test_init(struct kunit *test)
496
{
497
struct binder_alloc_test *priv;
498
int ret;
499
500
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
501
if (!priv)
502
return -ENOMEM;
503
test->priv = priv;
504
505
ret = list_lru_init(&priv->binder_test_freelist);
506
if (ret) {
507
kunit_err(test, "Failed to initialize test freelist\n");
508
return ret;
509
}
510
511
/* __binder_alloc_init requires mm to be attached */
512
ret = kunit_attach_mm();
513
if (ret) {
514
kunit_err(test, "Failed to attach mm\n");
515
return ret;
516
}
517
__binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);
518
519
priv->filp = anon_inode_getfile("binder_alloc_kunit",
520
&binder_alloc_test_fops, &priv->alloc,
521
O_RDWR | O_CLOEXEC);
522
if (IS_ERR_OR_NULL(priv->filp)) {
523
kunit_err(test, "Failed to open binder alloc test driver file\n");
524
return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM;
525
}
526
527
priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE,
528
PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
529
0);
530
if (!priv->mmap_uaddr) {
531
kunit_err(test, "Could not map the test's transaction memory\n");
532
return -ENOMEM;
533
}
534
535
return 0;
536
}
537
538
static void binder_alloc_test_exit(struct kunit *test)
539
{
540
struct binder_alloc_test *priv = test->priv;
541
542
/* Close the backing file to make sure binder_alloc_vma_close runs */
543
if (!IS_ERR_OR_NULL(priv->filp))
544
fput(priv->filp);
545
546
if (priv->alloc.mm)
547
binder_alloc_deferred_release(&priv->alloc);
548
549
/* Make sure freelist is empty */
550
KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0);
551
list_lru_destroy(&priv->binder_test_freelist);
552
}
553
554
static struct kunit_case binder_alloc_test_cases[] = {
555
KUNIT_CASE(binder_alloc_test_init_freelist),
556
KUNIT_CASE(binder_alloc_test_mmap),
557
KUNIT_CASE(binder_alloc_exhaustive_test),
558
{}
559
};
560
561
static struct kunit_suite binder_alloc_test_suite = {
562
.name = "binder_alloc",
563
.test_cases = binder_alloc_test_cases,
564
.init = binder_alloc_test_init,
565
.exit = binder_alloc_test_exit,
566
};
567
568
kunit_test_suite(binder_alloc_test_suite);
569
570
MODULE_AUTHOR("Tiffany Yang <[email protected]>");
571
MODULE_DESCRIPTION("Binder Alloc KUnit tests");
572
MODULE_LICENSE("GPL");
573
574