/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Copyright (C) 2017 Google, Inc.3*/45#ifndef _LINUX_BINDER_ALLOC_H6#define _LINUX_BINDER_ALLOC_H78#include <linux/rbtree.h>9#include <linux/list.h>10#include <linux/mm.h>11#include <linux/rtmutex.h>12#include <linux/vmalloc.h>13#include <linux/slab.h>14#include <linux/list_lru.h>15#include <uapi/linux/android/binder.h>1617struct binder_transaction;1819/**20* struct binder_buffer - buffer used for binder transactions21* @entry: entry alloc->buffers22* @rb_node: node for allocated_buffers/free_buffers rb trees23* @free: %true if buffer is free24* @clear_on_free: %true if buffer must be zeroed after use25* @allow_user_free: %true if user is allowed to free buffer26* @async_transaction: %true if buffer is in use for an async txn27* @oneway_spam_suspect: %true if total async allocate size just exceed28* spamming detect threshold29* @debug_id: unique ID for debugging30* @transaction: pointer to associated struct binder_transaction31* @target_node: struct binder_node associated with this buffer32* @data_size: size of @transaction data33* @offsets_size: size of array of offsets34* @extra_buffers_size: size of space for other objects (like sg lists)35* @user_data: user pointer to base of buffer space36* @pid: pid to attribute the buffer to (caller)37*38* Bookkeeping structure for binder transaction buffers39*/40struct binder_buffer {41struct list_head entry; /* free and allocated entries by address */42struct rb_node rb_node; /* free entry by size or allocated entry */43/* by address */44unsigned free:1;45unsigned clear_on_free:1;46unsigned allow_user_free:1;47unsigned async_transaction:1;48unsigned oneway_spam_suspect:1;49unsigned debug_id:27;50struct binder_transaction *transaction;51struct binder_node *target_node;52size_t data_size;53size_t offsets_size;54size_t extra_buffers_size;55unsigned long user_data;56int pid;57};5859/**60* struct binder_shrinker_mdata - binder metadata used to reclaim pages61* @lru: LRU entry in binder_freelist62* @alloc: binder_alloc owning the page to reclaim63* @page_index: offset in @alloc->pages[] into the page to reclaim64*/65struct binder_shrinker_mdata {66struct list_head lru;67struct binder_alloc *alloc;68unsigned long page_index;69};7071static inline struct list_head *page_to_lru(struct page *p)72{73struct binder_shrinker_mdata *mdata;7475mdata = (struct binder_shrinker_mdata *)page_private(p);7677return &mdata->lru;78}7980/**81* struct binder_alloc - per-binder proc state for binder allocator82* @mutex: protects binder_alloc fields83* @mm: copy of task->mm (invariant after open)84* @vm_start: base of per-proc address space mapped via mmap85* @buffers: list of all buffers for this proc86* @free_buffers: rb tree of buffers available for allocation87* sorted by size88* @allocated_buffers: rb tree of allocated buffers sorted by address89* @free_async_space: VA space available for async buffers. This is90* initialized at mmap time to 1/2 the full VA space91* @pages: array of struct page *92* @freelist: lru list to use for free pages (invariant after init)93* @buffer_size: size of address space specified via mmap94* @pid: pid for associated binder_proc (invariant after init)95* @pages_high: high watermark of offset in @pages96* @mapped: whether the vm area is mapped, each binder instance is97* allowed a single mapping throughout its lifetime98* @oneway_spam_detected: %true if oneway spam detection fired, clear that99* flag once the async buffer has returned to a healthy state100*101* Bookkeeping structure for per-proc address space management for binder102* buffers. It is normally initialized during binder_init() and binder_mmap()103* calls. The address space is used for both user-visible buffers and for104* struct binder_buffer objects used to track the user buffers105*/106struct binder_alloc {107struct mutex mutex;108struct mm_struct *mm;109unsigned long vm_start;110struct list_head buffers;111struct rb_root free_buffers;112struct rb_root allocated_buffers;113size_t free_async_space;114struct page **pages;115struct list_lru *freelist;116size_t buffer_size;117int pid;118size_t pages_high;119bool mapped;120bool oneway_spam_detected;121};122123enum lru_status binder_alloc_free_page(struct list_head *item,124struct list_lru_one *lru,125void *cb_arg);126struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,127size_t data_size,128size_t offsets_size,129size_t extra_buffers_size,130int is_async);131void binder_alloc_init(struct binder_alloc *alloc);132int binder_alloc_shrinker_init(void);133void binder_alloc_shrinker_exit(void);134void binder_alloc_vma_close(struct binder_alloc *alloc);135struct binder_buffer *136binder_alloc_prepare_to_free(struct binder_alloc *alloc,137unsigned long user_ptr);138void binder_alloc_free_buf(struct binder_alloc *alloc,139struct binder_buffer *buffer);140int binder_alloc_mmap_handler(struct binder_alloc *alloc,141struct vm_area_struct *vma);142void binder_alloc_deferred_release(struct binder_alloc *alloc);143int binder_alloc_get_allocated_count(struct binder_alloc *alloc);144void binder_alloc_print_allocated(struct seq_file *m,145struct binder_alloc *alloc);146void binder_alloc_print_pages(struct seq_file *m,147struct binder_alloc *alloc);148149/**150* binder_alloc_get_free_async_space() - get free space available for async151* @alloc: binder_alloc for this proc152*153* Return: the bytes remaining in the address-space for async transactions154*/155static inline size_t156binder_alloc_get_free_async_space(struct binder_alloc *alloc)157{158guard(mutex)(&alloc->mutex);159return alloc->free_async_space;160}161162unsigned long163binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,164struct binder_buffer *buffer,165binder_size_t buffer_offset,166const void __user *from,167size_t bytes);168169int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,170struct binder_buffer *buffer,171binder_size_t buffer_offset,172void *src,173size_t bytes);174175int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,176void *dest,177struct binder_buffer *buffer,178binder_size_t buffer_offset,179size_t bytes);180181#if IS_ENABLED(CONFIG_KUNIT)182void __binder_alloc_init(struct binder_alloc *alloc, struct list_lru *freelist);183size_t binder_alloc_buffer_size(struct binder_alloc *alloc,184struct binder_buffer *buffer);185#endif186187#endif /* _LINUX_BINDER_ALLOC_H */188189190191