Path: blob/master/drivers/android/tests/binder_alloc_kunit.c
26426 views
// SPDX-License-Identifier: GPL-2.01/*2* Test cases for binder allocator code.3*4* Copyright 2025 Google LLC.5* Author: Tiffany Yang <[email protected]>6*/78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt910#include <kunit/test.h>11#include <linux/anon_inodes.h>12#include <linux/err.h>13#include <linux/file.h>14#include <linux/fs.h>15#include <linux/mm.h>16#include <linux/mman.h>17#include <linux/seq_buf.h>18#include <linux/sizes.h>1920#include "../binder_alloc.h"21#include "../binder_internal.h"2223MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");2425#define BINDER_MMAP_SIZE SZ_128K2627#define BUFFER_NUM 528#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)2930#define FREESEQ_BUFLEN ((3 * BUFFER_NUM) + 1)3132#define ALIGN_TYPE_STRLEN (12)3334#define ALIGNMENTS_BUFLEN (((ALIGN_TYPE_STRLEN + 6) * BUFFER_NUM) + 1)3536#define PRINT_ALL_CASES (0)3738/* 5^5 alignment combinations * 2 places to share pages * 5! free sequences */39#define TOTAL_EXHAUSTIVE_CASES (3125 * 2 * 120)4041/**42* enum buf_end_align_type - Page alignment of a buffer43* end with regard to the end of the previous buffer.44*45* In the pictures below, buf2 refers to the buffer we46* are aligning. buf1 refers to previous buffer by addr.47* Symbol [ means the start of a buffer, ] means the end48* of a buffer, and | means page boundaries.49*/50enum buf_end_align_type {51/**52* @SAME_PAGE_UNALIGNED: The end of this buffer is on53* the same page as the end of the previous buffer and54* is not page aligned. Examples:55* buf1 ][ buf2 ][ ...56* buf1 ]|[ buf2 ][ ...57*/58SAME_PAGE_UNALIGNED = 0,59/**60* @SAME_PAGE_ALIGNED: When the end of the previous buffer61* is not page aligned, the end of this buffer is on the62* same page as the end of the previous buffer and is page63* aligned. When the previous buffer is page aligned, the64* end of this buffer is aligned to the next page boundary.65* Examples:66* buf1 ][ buf2 ]| ...67* buf1 ]|[ buf2 ]| ...68*/69SAME_PAGE_ALIGNED,70/**71* @NEXT_PAGE_UNALIGNED: The end of this buffer is on72* the page next to the end of the previous buffer and73* is not page aligned. Examples:74* buf1 ][ buf2 | buf2 ][ ...75* buf1 ]|[ buf2 | buf2 ][ ...76*/77NEXT_PAGE_UNALIGNED,78/**79* @NEXT_PAGE_ALIGNED: The end of this buffer is on80* the page next to the end of the previous buffer and81* is page aligned. Examples:82* buf1 ][ buf2 | buf2 ]| ...83* buf1 ]|[ buf2 | buf2 ]| ...84*/85NEXT_PAGE_ALIGNED,86/**87* @NEXT_NEXT_UNALIGNED: The end of this buffer is on88* the page that follows the page after the end of the89* previous buffer and is not page aligned. Examples:90* buf1 ][ buf2 | buf2 | buf2 ][ ...91* buf1 ]|[ buf2 | buf2 | buf2 ][ ...92*/93NEXT_NEXT_UNALIGNED,94/**95* @LOOP_END: The number of enum values in &buf_end_align_type.96* It is used for controlling loop termination.97*/98LOOP_END,99};100101static const char *const buf_end_align_type_strs[LOOP_END] = {102[SAME_PAGE_UNALIGNED] = "SP_UNALIGNED",103[SAME_PAGE_ALIGNED] = " SP_ALIGNED ",104[NEXT_PAGE_UNALIGNED] = "NP_UNALIGNED",105[NEXT_PAGE_ALIGNED] = " NP_ALIGNED ",106[NEXT_NEXT_UNALIGNED] = "NN_UNALIGNED",107};108109struct binder_alloc_test_case_info {110char alignments[ALIGNMENTS_BUFLEN];111struct seq_buf alignments_sb;112size_t *buffer_sizes;113int *free_sequence;114bool front_pages;115};116117static void stringify_free_seq(struct kunit *test, int *seq, struct seq_buf *sb)118{119int i;120121for (i = 0; i < BUFFER_NUM; i++)122seq_buf_printf(sb, "[%d]", seq[i]);123124KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));125}126127static void stringify_alignments(struct kunit *test, int *alignments,128struct seq_buf *sb)129{130int i;131132for (i = 0; i < BUFFER_NUM; i++)133seq_buf_printf(sb, "[ %d:%s ]", i,134buf_end_align_type_strs[alignments[i]]);135136KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));137}138139static bool check_buffer_pages_allocated(struct kunit *test,140struct binder_alloc *alloc,141struct binder_buffer *buffer,142size_t size)143{144unsigned long page_addr;145unsigned long end;146int page_index;147148end = PAGE_ALIGN(buffer->user_data + size);149page_addr = buffer->user_data;150for (; page_addr < end; page_addr += PAGE_SIZE) {151page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;152if (!alloc->pages[page_index] ||153!list_empty(page_to_lru(alloc->pages[page_index]))) {154kunit_err(test, "expect alloc but is %s at page index %d\n",155alloc->pages[page_index] ?156"lru" : "free", page_index);157return false;158}159}160return true;161}162163static unsigned long binder_alloc_test_alloc_buf(struct kunit *test,164struct binder_alloc *alloc,165struct binder_buffer *buffers[],166size_t *sizes, int *seq)167{168unsigned long failures = 0;169int i;170171for (i = 0; i < BUFFER_NUM; i++) {172buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);173if (IS_ERR(buffers[i]) ||174!check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))175failures++;176}177178return failures;179}180181static unsigned long binder_alloc_test_free_buf(struct kunit *test,182struct binder_alloc *alloc,183struct binder_buffer *buffers[],184size_t *sizes, int *seq, size_t end)185{186unsigned long failures = 0;187int i;188189for (i = 0; i < BUFFER_NUM; i++)190binder_alloc_free_buf(alloc, buffers[seq[i]]);191192for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {193if (list_empty(page_to_lru(alloc->pages[i]))) {194kunit_err(test, "expect lru but is %s at page index %d\n",195alloc->pages[i] ? "alloc" : "free", i);196failures++;197}198}199200return failures;201}202203static unsigned long binder_alloc_test_free_page(struct kunit *test,204struct binder_alloc *alloc)205{206unsigned long failures = 0;207unsigned long count;208int i;209210while ((count = list_lru_count(alloc->freelist))) {211list_lru_walk(alloc->freelist, binder_alloc_free_page,212NULL, count);213}214215for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {216if (alloc->pages[i]) {217kunit_err(test, "expect free but is %s at page index %d\n",218list_empty(page_to_lru(alloc->pages[i])) ?219"alloc" : "lru", i);220failures++;221}222}223224return failures;225}226227/* Executes one full test run for the given test case. */228static bool binder_alloc_test_alloc_free(struct kunit *test,229struct binder_alloc *alloc,230struct binder_alloc_test_case_info *tc,231size_t end)232{233unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE;234struct binder_buffer *buffers[BUFFER_NUM];235unsigned long failures;236bool failed = false;237238failures = binder_alloc_test_alloc_buf(test, alloc, buffers,239tc->buffer_sizes,240tc->free_sequence);241failed = failed || failures;242KUNIT_EXPECT_EQ_MSG(test, failures, 0,243"Initial allocation failed: %lu/%u buffers with errors",244failures, BUFFER_NUM);245246failures = binder_alloc_test_free_buf(test, alloc, buffers,247tc->buffer_sizes,248tc->free_sequence, end);249failed = failed || failures;250KUNIT_EXPECT_EQ_MSG(test, failures, 0,251"Initial buffers not freed correctly: %lu/%lu pages not on lru list",252failures, pages);253254/* Allocate from lru. */255failures = binder_alloc_test_alloc_buf(test, alloc, buffers,256tc->buffer_sizes,257tc->free_sequence);258failed = failed || failures;259KUNIT_EXPECT_EQ_MSG(test, failures, 0,260"Reallocation failed: %lu/%u buffers with errors",261failures, BUFFER_NUM);262263failures = list_lru_count(alloc->freelist);264failed = failed || failures;265KUNIT_EXPECT_EQ_MSG(test, failures, 0,266"lru list should be empty after reallocation but still has %lu pages",267failures);268269failures = binder_alloc_test_free_buf(test, alloc, buffers,270tc->buffer_sizes,271tc->free_sequence, end);272failed = failed || failures;273KUNIT_EXPECT_EQ_MSG(test, failures, 0,274"Reallocated buffers not freed correctly: %lu/%lu pages not on lru list",275failures, pages);276277failures = binder_alloc_test_free_page(test, alloc);278failed = failed || failures;279KUNIT_EXPECT_EQ_MSG(test, failures, 0,280"Failed to clean up allocated pages: %lu/%lu pages still installed",281failures, (alloc->buffer_size / PAGE_SIZE));282283return failed;284}285286static bool is_dup(int *seq, int index, int val)287{288int i;289290for (i = 0; i < index; i++) {291if (seq[i] == val)292return true;293}294return false;295}296297/* Generate BUFFER_NUM factorial free orders. */298static void permute_frees(struct kunit *test, struct binder_alloc *alloc,299struct binder_alloc_test_case_info *tc,300unsigned long *runs, unsigned long *failures,301int index, size_t end)302{303bool case_failed;304int i;305306if (index == BUFFER_NUM) {307DECLARE_SEQ_BUF(freeseq_sb, FREESEQ_BUFLEN);308309case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end);310*runs += 1;311*failures += case_failed;312313if (case_failed || PRINT_ALL_CASES) {314stringify_free_seq(test, tc->free_sequence,315&freeseq_sb);316kunit_err(test, "case %lu: [%s] | %s - %s - %s", *runs,317case_failed ? "FAILED" : "PASSED",318tc->front_pages ? "front" : "back ",319seq_buf_str(&tc->alignments_sb),320seq_buf_str(&freeseq_sb));321}322323return;324}325for (i = 0; i < BUFFER_NUM; i++) {326if (is_dup(tc->free_sequence, index, i))327continue;328tc->free_sequence[index] = i;329permute_frees(test, alloc, tc, runs, failures, index + 1, end);330}331}332333static void gen_buf_sizes(struct kunit *test,334struct binder_alloc *alloc,335struct binder_alloc_test_case_info *tc,336size_t *end_offset, unsigned long *runs,337unsigned long *failures)338{339size_t last_offset, offset = 0;340size_t front_sizes[BUFFER_NUM];341size_t back_sizes[BUFFER_NUM];342int seq[BUFFER_NUM] = {0};343int i;344345tc->free_sequence = seq;346for (i = 0; i < BUFFER_NUM; i++) {347last_offset = offset;348offset = end_offset[i];349front_sizes[i] = offset - last_offset;350back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];351}352back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];353354/*355* Buffers share the first or last few pages.356* Only BUFFER_NUM - 1 buffer sizes are adjustable since357* we need one giant buffer before getting to the last page.358*/359tc->front_pages = true;360tc->buffer_sizes = front_sizes;361permute_frees(test, alloc, tc, runs, failures, 0,362end_offset[BUFFER_NUM - 1]);363364tc->front_pages = false;365tc->buffer_sizes = back_sizes;366permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size);367}368369static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,370size_t *end_offset, int *alignments,371unsigned long *runs, unsigned long *failures,372int index)373{374size_t end, prev;375int align;376377if (index == BUFFER_NUM) {378struct binder_alloc_test_case_info tc = {0};379380seq_buf_init(&tc.alignments_sb, tc.alignments,381ALIGNMENTS_BUFLEN);382stringify_alignments(test, alignments, &tc.alignments_sb);383384gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures);385return;386}387prev = index == 0 ? 0 : end_offset[index - 1];388end = prev;389390BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);391392for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {393if (align % 2)394end = ALIGN(end, PAGE_SIZE);395else396end += BUFFER_MIN_SIZE;397end_offset[index] = end;398alignments[index] = align;399gen_buf_offsets(test, alloc, end_offset, alignments, runs,400failures, index + 1);401}402}403404struct binder_alloc_test {405struct binder_alloc alloc;406struct list_lru binder_test_freelist;407struct file *filp;408unsigned long mmap_uaddr;409};410411static void binder_alloc_test_init_freelist(struct kunit *test)412{413struct binder_alloc_test *priv = test->priv;414415KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,416&priv->binder_test_freelist);417}418419static void binder_alloc_test_mmap(struct kunit *test)420{421struct binder_alloc_test *priv = test->priv;422struct binder_alloc *alloc = &priv->alloc;423struct binder_buffer *buf;424struct rb_node *n;425426KUNIT_EXPECT_EQ(test, alloc->mapped, true);427KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);428429n = rb_first(&alloc->allocated_buffers);430KUNIT_EXPECT_PTR_EQ(test, n, NULL);431432n = rb_first(&alloc->free_buffers);433buf = rb_entry(n, struct binder_buffer, rb_node);434KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),435BINDER_MMAP_SIZE);436KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));437}438439/**440* binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages.441* @test: The test context object.442*443* Allocate BUFFER_NUM buffers to cover all page alignment cases,444* then free them in all orders possible. Check that pages are445* correctly allocated, put onto lru when buffers are freed, and446* are freed when binder_alloc_free_page() is called.447*/448static void binder_alloc_exhaustive_test(struct kunit *test)449{450struct binder_alloc_test *priv = test->priv;451size_t end_offset[BUFFER_NUM];452int alignments[BUFFER_NUM];453unsigned long failures = 0;454unsigned long runs = 0;455456gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs,457&failures, 0);458459KUNIT_EXPECT_EQ(test, runs, TOTAL_EXHAUSTIVE_CASES);460KUNIT_EXPECT_EQ(test, failures, 0);461}462463/* ===== End test cases ===== */464465static void binder_alloc_test_vma_close(struct vm_area_struct *vma)466{467struct binder_alloc *alloc = vma->vm_private_data;468469binder_alloc_vma_close(alloc);470}471472static const struct vm_operations_struct binder_alloc_test_vm_ops = {473.close = binder_alloc_test_vma_close,474.fault = binder_vm_fault,475};476477static int binder_alloc_test_mmap_handler(struct file *filp,478struct vm_area_struct *vma)479{480struct binder_alloc *alloc = filp->private_data;481482vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);483484vma->vm_ops = &binder_alloc_test_vm_ops;485vma->vm_private_data = alloc;486487return binder_alloc_mmap_handler(alloc, vma);488}489490static const struct file_operations binder_alloc_test_fops = {491.mmap = binder_alloc_test_mmap_handler,492};493494static int binder_alloc_test_init(struct kunit *test)495{496struct binder_alloc_test *priv;497int ret;498499priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);500if (!priv)501return -ENOMEM;502test->priv = priv;503504ret = list_lru_init(&priv->binder_test_freelist);505if (ret) {506kunit_err(test, "Failed to initialize test freelist\n");507return ret;508}509510/* __binder_alloc_init requires mm to be attached */511ret = kunit_attach_mm();512if (ret) {513kunit_err(test, "Failed to attach mm\n");514return ret;515}516__binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);517518priv->filp = anon_inode_getfile("binder_alloc_kunit",519&binder_alloc_test_fops, &priv->alloc,520O_RDWR | O_CLOEXEC);521if (IS_ERR_OR_NULL(priv->filp)) {522kunit_err(test, "Failed to open binder alloc test driver file\n");523return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM;524}525526priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE,527PROT_READ, MAP_PRIVATE | MAP_NORESERVE,5280);529if (!priv->mmap_uaddr) {530kunit_err(test, "Could not map the test's transaction memory\n");531return -ENOMEM;532}533534return 0;535}536537static void binder_alloc_test_exit(struct kunit *test)538{539struct binder_alloc_test *priv = test->priv;540541/* Close the backing file to make sure binder_alloc_vma_close runs */542if (!IS_ERR_OR_NULL(priv->filp))543fput(priv->filp);544545if (priv->alloc.mm)546binder_alloc_deferred_release(&priv->alloc);547548/* Make sure freelist is empty */549KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0);550list_lru_destroy(&priv->binder_test_freelist);551}552553static struct kunit_case binder_alloc_test_cases[] = {554KUNIT_CASE(binder_alloc_test_init_freelist),555KUNIT_CASE(binder_alloc_test_mmap),556KUNIT_CASE(binder_alloc_exhaustive_test),557{}558};559560static struct kunit_suite binder_alloc_test_suite = {561.name = "binder_alloc",562.test_cases = binder_alloc_test_cases,563.init = binder_alloc_test_init,564.exit = binder_alloc_test_exit,565};566567kunit_test_suite(binder_alloc_test_suite);568569MODULE_AUTHOR("Tiffany Yang <[email protected]>");570MODULE_DESCRIPTION("Binder Alloc KUnit tests");571MODULE_LICENSE("GPL");572573574