Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/android/binder_alloc.c
26378 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* binder_alloc.c
3
*
4
* Android IPC Subsystem
5
*
6
* Copyright (C) 2007-2017 Google, Inc.
7
*/
8
9
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11
#include <linux/list.h>
12
#include <linux/sched/mm.h>
13
#include <linux/module.h>
14
#include <linux/rtmutex.h>
15
#include <linux/rbtree.h>
16
#include <linux/seq_file.h>
17
#include <linux/vmalloc.h>
18
#include <linux/slab.h>
19
#include <linux/sched.h>
20
#include <linux/list_lru.h>
21
#include <linux/ratelimit.h>
22
#include <asm/cacheflush.h>
23
#include <linux/uaccess.h>
24
#include <linux/highmem.h>
25
#include <linux/sizes.h>
26
#include <kunit/visibility.h>
27
#include "binder_alloc.h"
28
#include "binder_trace.h"
29
30
static struct list_lru binder_freelist;
31
32
static DEFINE_MUTEX(binder_alloc_mmap_lock);
33
34
enum {
35
BINDER_DEBUG_USER_ERROR = 1U << 0,
36
BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37
BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39
};
40
static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
41
42
module_param_named(debug_mask, binder_alloc_debug_mask,
43
uint, 0644);
44
45
#define binder_alloc_debug(mask, x...) \
46
do { \
47
if (binder_alloc_debug_mask & mask) \
48
pr_info_ratelimited(x); \
49
} while (0)
50
51
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52
{
53
return list_entry(buffer->entry.next, struct binder_buffer, entry);
54
}
55
56
static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57
{
58
return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59
}
60
61
VISIBLE_IF_KUNIT size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62
struct binder_buffer *buffer)
63
{
64
if (list_is_last(&buffer->entry, &alloc->buffers))
65
return alloc->vm_start + alloc->buffer_size - buffer->user_data;
66
return binder_buffer_next(buffer)->user_data - buffer->user_data;
67
}
68
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_buffer_size);
69
70
static void binder_insert_free_buffer(struct binder_alloc *alloc,
71
struct binder_buffer *new_buffer)
72
{
73
struct rb_node **p = &alloc->free_buffers.rb_node;
74
struct rb_node *parent = NULL;
75
struct binder_buffer *buffer;
76
size_t buffer_size;
77
size_t new_buffer_size;
78
79
BUG_ON(!new_buffer->free);
80
81
new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
82
83
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
84
"%d: add free buffer, size %zd, at %pK\n",
85
alloc->pid, new_buffer_size, new_buffer);
86
87
while (*p) {
88
parent = *p;
89
buffer = rb_entry(parent, struct binder_buffer, rb_node);
90
BUG_ON(!buffer->free);
91
92
buffer_size = binder_alloc_buffer_size(alloc, buffer);
93
94
if (new_buffer_size < buffer_size)
95
p = &parent->rb_left;
96
else
97
p = &parent->rb_right;
98
}
99
rb_link_node(&new_buffer->rb_node, parent, p);
100
rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
101
}
102
103
static void binder_insert_allocated_buffer_locked(
104
struct binder_alloc *alloc, struct binder_buffer *new_buffer)
105
{
106
struct rb_node **p = &alloc->allocated_buffers.rb_node;
107
struct rb_node *parent = NULL;
108
struct binder_buffer *buffer;
109
110
BUG_ON(new_buffer->free);
111
112
while (*p) {
113
parent = *p;
114
buffer = rb_entry(parent, struct binder_buffer, rb_node);
115
BUG_ON(buffer->free);
116
117
if (new_buffer->user_data < buffer->user_data)
118
p = &parent->rb_left;
119
else if (new_buffer->user_data > buffer->user_data)
120
p = &parent->rb_right;
121
else
122
BUG();
123
}
124
rb_link_node(&new_buffer->rb_node, parent, p);
125
rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
126
}
127
128
static struct binder_buffer *binder_alloc_prepare_to_free_locked(
129
struct binder_alloc *alloc,
130
unsigned long user_ptr)
131
{
132
struct rb_node *n = alloc->allocated_buffers.rb_node;
133
struct binder_buffer *buffer;
134
135
while (n) {
136
buffer = rb_entry(n, struct binder_buffer, rb_node);
137
BUG_ON(buffer->free);
138
139
if (user_ptr < buffer->user_data) {
140
n = n->rb_left;
141
} else if (user_ptr > buffer->user_data) {
142
n = n->rb_right;
143
} else {
144
/*
145
* Guard against user threads attempting to
146
* free the buffer when in use by kernel or
147
* after it's already been freed.
148
*/
149
if (!buffer->allow_user_free)
150
return ERR_PTR(-EPERM);
151
buffer->allow_user_free = 0;
152
return buffer;
153
}
154
}
155
return NULL;
156
}
157
158
/**
159
* binder_alloc_prepare_to_free() - get buffer given user ptr
160
* @alloc: binder_alloc for this proc
161
* @user_ptr: User pointer to buffer data
162
*
163
* Validate userspace pointer to buffer data and return buffer corresponding to
164
* that user pointer. Search the rb tree for buffer that matches user data
165
* pointer.
166
*
167
* Return: Pointer to buffer or NULL
168
*/
169
struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
170
unsigned long user_ptr)
171
{
172
guard(mutex)(&alloc->mutex);
173
return binder_alloc_prepare_to_free_locked(alloc, user_ptr);
174
}
175
176
static inline void
177
binder_set_installed_page(struct binder_alloc *alloc,
178
unsigned long index,
179
struct page *page)
180
{
181
/* Pairs with acquire in binder_get_installed_page() */
182
smp_store_release(&alloc->pages[index], page);
183
}
184
185
static inline struct page *
186
binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
187
{
188
/* Pairs with release in binder_set_installed_page() */
189
return smp_load_acquire(&alloc->pages[index]);
190
}
191
192
static void binder_lru_freelist_add(struct binder_alloc *alloc,
193
unsigned long start, unsigned long end)
194
{
195
unsigned long page_addr;
196
struct page *page;
197
198
trace_binder_update_page_range(alloc, false, start, end);
199
200
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
201
size_t index;
202
int ret;
203
204
index = (page_addr - alloc->vm_start) / PAGE_SIZE;
205
page = binder_get_installed_page(alloc, index);
206
if (!page)
207
continue;
208
209
trace_binder_free_lru_start(alloc, index);
210
211
ret = list_lru_add(alloc->freelist,
212
page_to_lru(page),
213
page_to_nid(page),
214
NULL);
215
WARN_ON(!ret);
216
217
trace_binder_free_lru_end(alloc, index);
218
}
219
}
220
221
static inline
222
void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
223
{
224
/* pairs with smp_load_acquire in binder_alloc_is_mapped() */
225
smp_store_release(&alloc->mapped, state);
226
}
227
228
static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
229
{
230
/* pairs with smp_store_release in binder_alloc_set_mapped() */
231
return smp_load_acquire(&alloc->mapped);
232
}
233
234
static struct page *binder_page_lookup(struct binder_alloc *alloc,
235
unsigned long addr)
236
{
237
struct mm_struct *mm = alloc->mm;
238
struct page *page;
239
long npages = 0;
240
241
/*
242
* Find an existing page in the remote mm. If missing,
243
* don't attempt to fault-in just propagate an error.
244
*/
245
mmap_read_lock(mm);
246
if (binder_alloc_is_mapped(alloc))
247
npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
248
&page, NULL);
249
mmap_read_unlock(mm);
250
251
return npages > 0 ? page : NULL;
252
}
253
254
static int binder_page_insert(struct binder_alloc *alloc,
255
unsigned long addr,
256
struct page *page)
257
{
258
struct mm_struct *mm = alloc->mm;
259
struct vm_area_struct *vma;
260
int ret = -ESRCH;
261
262
/* attempt per-vma lock first */
263
vma = lock_vma_under_rcu(mm, addr);
264
if (vma) {
265
if (binder_alloc_is_mapped(alloc))
266
ret = vm_insert_page(vma, addr, page);
267
vma_end_read(vma);
268
return ret;
269
}
270
271
/* fall back to mmap_lock */
272
mmap_read_lock(mm);
273
vma = vma_lookup(mm, addr);
274
if (vma && binder_alloc_is_mapped(alloc))
275
ret = vm_insert_page(vma, addr, page);
276
mmap_read_unlock(mm);
277
278
return ret;
279
}
280
281
static struct page *binder_page_alloc(struct binder_alloc *alloc,
282
unsigned long index)
283
{
284
struct binder_shrinker_mdata *mdata;
285
struct page *page;
286
287
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
288
if (!page)
289
return NULL;
290
291
/* allocate and install shrinker metadata under page->private */
292
mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
293
if (!mdata) {
294
__free_page(page);
295
return NULL;
296
}
297
298
mdata->alloc = alloc;
299
mdata->page_index = index;
300
INIT_LIST_HEAD(&mdata->lru);
301
set_page_private(page, (unsigned long)mdata);
302
303
return page;
304
}
305
306
static void binder_free_page(struct page *page)
307
{
308
kfree((struct binder_shrinker_mdata *)page_private(page));
309
__free_page(page);
310
}
311
312
static int binder_install_single_page(struct binder_alloc *alloc,
313
unsigned long index,
314
unsigned long addr)
315
{
316
struct page *page;
317
int ret;
318
319
if (!mmget_not_zero(alloc->mm))
320
return -ESRCH;
321
322
page = binder_page_alloc(alloc, index);
323
if (!page) {
324
ret = -ENOMEM;
325
goto out;
326
}
327
328
ret = binder_page_insert(alloc, addr, page);
329
switch (ret) {
330
case -EBUSY:
331
/*
332
* EBUSY is ok. Someone installed the pte first but the
333
* alloc->pages[index] has not been updated yet. Discard
334
* our page and look up the one already installed.
335
*/
336
ret = 0;
337
binder_free_page(page);
338
page = binder_page_lookup(alloc, addr);
339
if (!page) {
340
pr_err("%d: failed to find page at offset %lx\n",
341
alloc->pid, addr - alloc->vm_start);
342
ret = -ESRCH;
343
break;
344
}
345
fallthrough;
346
case 0:
347
/* Mark page installation complete and safe to use */
348
binder_set_installed_page(alloc, index, page);
349
break;
350
default:
351
binder_free_page(page);
352
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
353
alloc->pid, __func__, addr - alloc->vm_start, ret);
354
break;
355
}
356
out:
357
mmput_async(alloc->mm);
358
return ret;
359
}
360
361
static int binder_install_buffer_pages(struct binder_alloc *alloc,
362
struct binder_buffer *buffer,
363
size_t size)
364
{
365
unsigned long start, final;
366
unsigned long page_addr;
367
368
start = buffer->user_data & PAGE_MASK;
369
final = PAGE_ALIGN(buffer->user_data + size);
370
371
for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
372
unsigned long index;
373
int ret;
374
375
index = (page_addr - alloc->vm_start) / PAGE_SIZE;
376
if (binder_get_installed_page(alloc, index))
377
continue;
378
379
trace_binder_alloc_page_start(alloc, index);
380
381
ret = binder_install_single_page(alloc, index, page_addr);
382
if (ret)
383
return ret;
384
385
trace_binder_alloc_page_end(alloc, index);
386
}
387
388
return 0;
389
}
390
391
/* The range of pages should exclude those shared with other buffers */
392
static void binder_lru_freelist_del(struct binder_alloc *alloc,
393
unsigned long start, unsigned long end)
394
{
395
unsigned long page_addr;
396
struct page *page;
397
398
trace_binder_update_page_range(alloc, true, start, end);
399
400
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
401
unsigned long index;
402
bool on_lru;
403
404
index = (page_addr - alloc->vm_start) / PAGE_SIZE;
405
page = binder_get_installed_page(alloc, index);
406
407
if (page) {
408
trace_binder_alloc_lru_start(alloc, index);
409
410
on_lru = list_lru_del(alloc->freelist,
411
page_to_lru(page),
412
page_to_nid(page),
413
NULL);
414
WARN_ON(!on_lru);
415
416
trace_binder_alloc_lru_end(alloc, index);
417
continue;
418
}
419
420
if (index + 1 > alloc->pages_high)
421
alloc->pages_high = index + 1;
422
}
423
}
424
425
static void debug_no_space_locked(struct binder_alloc *alloc)
426
{
427
size_t largest_alloc_size = 0;
428
struct binder_buffer *buffer;
429
size_t allocated_buffers = 0;
430
size_t largest_free_size = 0;
431
size_t total_alloc_size = 0;
432
size_t total_free_size = 0;
433
size_t free_buffers = 0;
434
size_t buffer_size;
435
struct rb_node *n;
436
437
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
438
buffer = rb_entry(n, struct binder_buffer, rb_node);
439
buffer_size = binder_alloc_buffer_size(alloc, buffer);
440
allocated_buffers++;
441
total_alloc_size += buffer_size;
442
if (buffer_size > largest_alloc_size)
443
largest_alloc_size = buffer_size;
444
}
445
446
for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
447
buffer = rb_entry(n, struct binder_buffer, rb_node);
448
buffer_size = binder_alloc_buffer_size(alloc, buffer);
449
free_buffers++;
450
total_free_size += buffer_size;
451
if (buffer_size > largest_free_size)
452
largest_free_size = buffer_size;
453
}
454
455
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
456
"allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
457
total_alloc_size, allocated_buffers,
458
largest_alloc_size, total_free_size,
459
free_buffers, largest_free_size);
460
}
461
462
static bool debug_low_async_space_locked(struct binder_alloc *alloc)
463
{
464
/*
465
* Find the amount and size of buffers allocated by the current caller;
466
* The idea is that once we cross the threshold, whoever is responsible
467
* for the low async space is likely to try to send another async txn,
468
* and at some point we'll catch them in the act. This is more efficient
469
* than keeping a map per pid.
470
*/
471
struct binder_buffer *buffer;
472
size_t total_alloc_size = 0;
473
int pid = current->tgid;
474
size_t num_buffers = 0;
475
struct rb_node *n;
476
477
/*
478
* Only start detecting spammers once we have less than 20% of async
479
* space left (which is less than 10% of total buffer size).
480
*/
481
if (alloc->free_async_space >= alloc->buffer_size / 10) {
482
alloc->oneway_spam_detected = false;
483
return false;
484
}
485
486
for (n = rb_first(&alloc->allocated_buffers); n != NULL;
487
n = rb_next(n)) {
488
buffer = rb_entry(n, struct binder_buffer, rb_node);
489
if (buffer->pid != pid)
490
continue;
491
if (!buffer->async_transaction)
492
continue;
493
total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
494
num_buffers++;
495
}
496
497
/*
498
* Warn if this pid has more than 50 transactions, or more than 50% of
499
* async space (which is 25% of total buffer size). Oneway spam is only
500
* detected when the threshold is exceeded.
501
*/
502
if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
503
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
504
"%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
505
alloc->pid, pid, num_buffers, total_alloc_size);
506
if (!alloc->oneway_spam_detected) {
507
alloc->oneway_spam_detected = true;
508
return true;
509
}
510
}
511
return false;
512
}
513
514
/* Callers preallocate @new_buffer, it is freed by this function if unused */
515
static struct binder_buffer *binder_alloc_new_buf_locked(
516
struct binder_alloc *alloc,
517
struct binder_buffer *new_buffer,
518
size_t size,
519
int is_async)
520
{
521
struct rb_node *n = alloc->free_buffers.rb_node;
522
struct rb_node *best_fit = NULL;
523
struct binder_buffer *buffer;
524
unsigned long next_used_page;
525
unsigned long curr_last_page;
526
size_t buffer_size;
527
528
if (is_async && alloc->free_async_space < size) {
529
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
530
"%d: binder_alloc_buf size %zd failed, no async space left\n",
531
alloc->pid, size);
532
buffer = ERR_PTR(-ENOSPC);
533
goto out;
534
}
535
536
while (n) {
537
buffer = rb_entry(n, struct binder_buffer, rb_node);
538
BUG_ON(!buffer->free);
539
buffer_size = binder_alloc_buffer_size(alloc, buffer);
540
541
if (size < buffer_size) {
542
best_fit = n;
543
n = n->rb_left;
544
} else if (size > buffer_size) {
545
n = n->rb_right;
546
} else {
547
best_fit = n;
548
break;
549
}
550
}
551
552
if (unlikely(!best_fit)) {
553
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
554
"%d: binder_alloc_buf size %zd failed, no address space\n",
555
alloc->pid, size);
556
debug_no_space_locked(alloc);
557
buffer = ERR_PTR(-ENOSPC);
558
goto out;
559
}
560
561
if (buffer_size != size) {
562
/* Found an oversized buffer and needs to be split */
563
buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
564
buffer_size = binder_alloc_buffer_size(alloc, buffer);
565
566
WARN_ON(n || buffer_size == size);
567
new_buffer->user_data = buffer->user_data + size;
568
list_add(&new_buffer->entry, &buffer->entry);
569
new_buffer->free = 1;
570
binder_insert_free_buffer(alloc, new_buffer);
571
new_buffer = NULL;
572
}
573
574
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
575
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
576
alloc->pid, size, buffer, buffer_size);
577
578
/*
579
* Now we remove the pages from the freelist. A clever calculation
580
* with buffer_size determines if the last page is shared with an
581
* adjacent in-use buffer. In such case, the page has been already
582
* removed from the freelist so we trim our range short.
583
*/
584
next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
585
curr_last_page = PAGE_ALIGN(buffer->user_data + size);
586
binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
587
min(next_used_page, curr_last_page));
588
589
rb_erase(&buffer->rb_node, &alloc->free_buffers);
590
buffer->free = 0;
591
buffer->allow_user_free = 0;
592
binder_insert_allocated_buffer_locked(alloc, buffer);
593
buffer->async_transaction = is_async;
594
buffer->oneway_spam_suspect = false;
595
if (is_async) {
596
alloc->free_async_space -= size;
597
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
598
"%d: binder_alloc_buf size %zd async free %zd\n",
599
alloc->pid, size, alloc->free_async_space);
600
if (debug_low_async_space_locked(alloc))
601
buffer->oneway_spam_suspect = true;
602
}
603
604
out:
605
/* Discard possibly unused new_buffer */
606
kfree(new_buffer);
607
return buffer;
608
}
609
610
/* Calculate the sanitized total size, returns 0 for invalid request */
611
static inline size_t sanitized_size(size_t data_size,
612
size_t offsets_size,
613
size_t extra_buffers_size)
614
{
615
size_t total, tmp;
616
617
/* Align to pointer size and check for overflows */
618
tmp = ALIGN(data_size, sizeof(void *)) +
619
ALIGN(offsets_size, sizeof(void *));
620
if (tmp < data_size || tmp < offsets_size)
621
return 0;
622
total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
623
if (total < tmp || total < extra_buffers_size)
624
return 0;
625
626
/* Pad 0-sized buffers so they get a unique address */
627
total = max(total, sizeof(void *));
628
629
return total;
630
}
631
632
/**
633
* binder_alloc_new_buf() - Allocate a new binder buffer
634
* @alloc: binder_alloc for this proc
635
* @data_size: size of user data buffer
636
* @offsets_size: user specified buffer offset
637
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
638
* @is_async: buffer for async transaction
639
*
640
* Allocate a new buffer given the requested sizes. Returns
641
* the kernel version of the buffer pointer. The size allocated
642
* is the sum of the three given sizes (each rounded up to
643
* pointer-sized boundary)
644
*
645
* Return: The allocated buffer or %ERR_PTR(-errno) if error
646
*/
647
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
648
size_t data_size,
649
size_t offsets_size,
650
size_t extra_buffers_size,
651
int is_async)
652
{
653
struct binder_buffer *buffer, *next;
654
size_t size;
655
int ret;
656
657
/* Check binder_alloc is fully initialized */
658
if (!binder_alloc_is_mapped(alloc)) {
659
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
660
"%d: binder_alloc_buf, no vma\n",
661
alloc->pid);
662
return ERR_PTR(-ESRCH);
663
}
664
665
size = sanitized_size(data_size, offsets_size, extra_buffers_size);
666
if (unlikely(!size)) {
667
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
668
"%d: got transaction with invalid size %zd-%zd-%zd\n",
669
alloc->pid, data_size, offsets_size,
670
extra_buffers_size);
671
return ERR_PTR(-EINVAL);
672
}
673
674
/* Preallocate the next buffer */
675
next = kzalloc(sizeof(*next), GFP_KERNEL);
676
if (!next)
677
return ERR_PTR(-ENOMEM);
678
679
mutex_lock(&alloc->mutex);
680
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
681
if (IS_ERR(buffer)) {
682
mutex_unlock(&alloc->mutex);
683
goto out;
684
}
685
686
buffer->data_size = data_size;
687
buffer->offsets_size = offsets_size;
688
buffer->extra_buffers_size = extra_buffers_size;
689
buffer->pid = current->tgid;
690
mutex_unlock(&alloc->mutex);
691
692
ret = binder_install_buffer_pages(alloc, buffer, size);
693
if (ret) {
694
binder_alloc_free_buf(alloc, buffer);
695
buffer = ERR_PTR(ret);
696
}
697
out:
698
return buffer;
699
}
700
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_new_buf);
701
702
static unsigned long buffer_start_page(struct binder_buffer *buffer)
703
{
704
return buffer->user_data & PAGE_MASK;
705
}
706
707
static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
708
{
709
return (buffer->user_data - 1) & PAGE_MASK;
710
}
711
712
static void binder_delete_free_buffer(struct binder_alloc *alloc,
713
struct binder_buffer *buffer)
714
{
715
struct binder_buffer *prev, *next;
716
717
if (PAGE_ALIGNED(buffer->user_data))
718
goto skip_freelist;
719
720
BUG_ON(alloc->buffers.next == &buffer->entry);
721
prev = binder_buffer_prev(buffer);
722
BUG_ON(!prev->free);
723
if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
724
goto skip_freelist;
725
726
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
727
next = binder_buffer_next(buffer);
728
if (buffer_start_page(next) == buffer_start_page(buffer))
729
goto skip_freelist;
730
}
731
732
binder_lru_freelist_add(alloc, buffer_start_page(buffer),
733
buffer_start_page(buffer) + PAGE_SIZE);
734
skip_freelist:
735
list_del(&buffer->entry);
736
kfree(buffer);
737
}
738
739
static void binder_free_buf_locked(struct binder_alloc *alloc,
740
struct binder_buffer *buffer)
741
{
742
size_t size, buffer_size;
743
744
buffer_size = binder_alloc_buffer_size(alloc, buffer);
745
746
size = ALIGN(buffer->data_size, sizeof(void *)) +
747
ALIGN(buffer->offsets_size, sizeof(void *)) +
748
ALIGN(buffer->extra_buffers_size, sizeof(void *));
749
750
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
751
"%d: binder_free_buf %pK size %zd buffer_size %zd\n",
752
alloc->pid, buffer, size, buffer_size);
753
754
BUG_ON(buffer->free);
755
BUG_ON(size > buffer_size);
756
BUG_ON(buffer->transaction != NULL);
757
BUG_ON(buffer->user_data < alloc->vm_start);
758
BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
759
760
if (buffer->async_transaction) {
761
alloc->free_async_space += buffer_size;
762
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
763
"%d: binder_free_buf size %zd async free %zd\n",
764
alloc->pid, size, alloc->free_async_space);
765
}
766
767
binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
768
(buffer->user_data + buffer_size) & PAGE_MASK);
769
770
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
771
buffer->free = 1;
772
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
773
struct binder_buffer *next = binder_buffer_next(buffer);
774
775
if (next->free) {
776
rb_erase(&next->rb_node, &alloc->free_buffers);
777
binder_delete_free_buffer(alloc, next);
778
}
779
}
780
if (alloc->buffers.next != &buffer->entry) {
781
struct binder_buffer *prev = binder_buffer_prev(buffer);
782
783
if (prev->free) {
784
binder_delete_free_buffer(alloc, buffer);
785
rb_erase(&prev->rb_node, &alloc->free_buffers);
786
buffer = prev;
787
}
788
}
789
binder_insert_free_buffer(alloc, buffer);
790
}
791
792
/**
793
* binder_alloc_get_page() - get kernel pointer for given buffer offset
794
* @alloc: binder_alloc for this proc
795
* @buffer: binder buffer to be accessed
796
* @buffer_offset: offset into @buffer data
797
* @pgoffp: address to copy final page offset to
798
*
799
* Lookup the struct page corresponding to the address
800
* at @buffer_offset into @buffer->user_data. If @pgoffp is not
801
* NULL, the byte-offset into the page is written there.
802
*
803
* The caller is responsible to ensure that the offset points
804
* to a valid address within the @buffer and that @buffer is
805
* not freeable by the user. Since it can't be freed, we are
806
* guaranteed that the corresponding elements of @alloc->pages[]
807
* cannot change.
808
*
809
* Return: struct page
810
*/
811
static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
812
struct binder_buffer *buffer,
813
binder_size_t buffer_offset,
814
pgoff_t *pgoffp)
815
{
816
binder_size_t buffer_space_offset = buffer_offset +
817
(buffer->user_data - alloc->vm_start);
818
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
819
size_t index = buffer_space_offset >> PAGE_SHIFT;
820
821
*pgoffp = pgoff;
822
823
return alloc->pages[index];
824
}
825
826
/**
827
* binder_alloc_clear_buf() - zero out buffer
828
* @alloc: binder_alloc for this proc
829
* @buffer: binder buffer to be cleared
830
*
831
* memset the given buffer to 0
832
*/
833
static void binder_alloc_clear_buf(struct binder_alloc *alloc,
834
struct binder_buffer *buffer)
835
{
836
size_t bytes = binder_alloc_buffer_size(alloc, buffer);
837
binder_size_t buffer_offset = 0;
838
839
while (bytes) {
840
unsigned long size;
841
struct page *page;
842
pgoff_t pgoff;
843
844
page = binder_alloc_get_page(alloc, buffer,
845
buffer_offset, &pgoff);
846
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
847
memset_page(page, pgoff, 0, size);
848
bytes -= size;
849
buffer_offset += size;
850
}
851
}
852
853
/**
854
* binder_alloc_free_buf() - free a binder buffer
855
* @alloc: binder_alloc for this proc
856
* @buffer: kernel pointer to buffer
857
*
858
* Free the buffer allocated via binder_alloc_new_buf()
859
*/
860
void binder_alloc_free_buf(struct binder_alloc *alloc,
861
struct binder_buffer *buffer)
862
{
863
/*
864
* We could eliminate the call to binder_alloc_clear_buf()
865
* from binder_alloc_deferred_release() by moving this to
866
* binder_free_buf_locked(). However, that could
867
* increase contention for the alloc mutex if clear_on_free
868
* is used frequently for large buffers. The mutex is not
869
* needed for correctness here.
870
*/
871
if (buffer->clear_on_free) {
872
binder_alloc_clear_buf(alloc, buffer);
873
buffer->clear_on_free = false;
874
}
875
mutex_lock(&alloc->mutex);
876
binder_free_buf_locked(alloc, buffer);
877
mutex_unlock(&alloc->mutex);
878
}
879
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_buf);
880
881
/**
882
* binder_alloc_mmap_handler() - map virtual address space for proc
883
* @alloc: alloc structure for this proc
884
* @vma: vma passed to mmap()
885
*
886
* Called by binder_mmap() to initialize the space specified in
887
* vma for allocating binder buffers
888
*
889
* Return:
890
* 0 = success
891
* -EBUSY = address space already mapped
892
* -ENOMEM = failed to map memory to given address space
893
*/
894
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
895
struct vm_area_struct *vma)
896
{
897
struct binder_buffer *buffer;
898
const char *failure_string;
899
int ret;
900
901
if (unlikely(vma->vm_mm != alloc->mm)) {
902
ret = -EINVAL;
903
failure_string = "invalid vma->vm_mm";
904
goto err_invalid_mm;
905
}
906
907
mutex_lock(&binder_alloc_mmap_lock);
908
if (alloc->buffer_size) {
909
ret = -EBUSY;
910
failure_string = "already mapped";
911
goto err_already_mapped;
912
}
913
alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
914
SZ_4M);
915
mutex_unlock(&binder_alloc_mmap_lock);
916
917
alloc->vm_start = vma->vm_start;
918
919
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
920
sizeof(alloc->pages[0]),
921
GFP_KERNEL);
922
if (!alloc->pages) {
923
ret = -ENOMEM;
924
failure_string = "alloc page array";
925
goto err_alloc_pages_failed;
926
}
927
928
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
929
if (!buffer) {
930
ret = -ENOMEM;
931
failure_string = "alloc buffer struct";
932
goto err_alloc_buf_struct_failed;
933
}
934
935
buffer->user_data = alloc->vm_start;
936
list_add(&buffer->entry, &alloc->buffers);
937
buffer->free = 1;
938
binder_insert_free_buffer(alloc, buffer);
939
alloc->free_async_space = alloc->buffer_size / 2;
940
941
/* Signal binder_alloc is fully initialized */
942
binder_alloc_set_mapped(alloc, true);
943
944
return 0;
945
946
err_alloc_buf_struct_failed:
947
kvfree(alloc->pages);
948
alloc->pages = NULL;
949
err_alloc_pages_failed:
950
alloc->vm_start = 0;
951
mutex_lock(&binder_alloc_mmap_lock);
952
alloc->buffer_size = 0;
953
err_already_mapped:
954
mutex_unlock(&binder_alloc_mmap_lock);
955
err_invalid_mm:
956
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
957
"%s: %d %lx-%lx %s failed %d\n", __func__,
958
alloc->pid, vma->vm_start, vma->vm_end,
959
failure_string, ret);
960
return ret;
961
}
962
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_mmap_handler);
963
964
void binder_alloc_deferred_release(struct binder_alloc *alloc)
965
{
966
struct rb_node *n;
967
int buffers, page_count;
968
struct binder_buffer *buffer;
969
970
buffers = 0;
971
mutex_lock(&alloc->mutex);
972
BUG_ON(alloc->mapped);
973
974
while ((n = rb_first(&alloc->allocated_buffers))) {
975
buffer = rb_entry(n, struct binder_buffer, rb_node);
976
977
/* Transaction should already have been freed */
978
BUG_ON(buffer->transaction);
979
980
if (buffer->clear_on_free) {
981
binder_alloc_clear_buf(alloc, buffer);
982
buffer->clear_on_free = false;
983
}
984
binder_free_buf_locked(alloc, buffer);
985
buffers++;
986
}
987
988
while (!list_empty(&alloc->buffers)) {
989
buffer = list_first_entry(&alloc->buffers,
990
struct binder_buffer, entry);
991
WARN_ON(!buffer->free);
992
993
list_del(&buffer->entry);
994
WARN_ON_ONCE(!list_empty(&alloc->buffers));
995
kfree(buffer);
996
}
997
998
page_count = 0;
999
if (alloc->pages) {
1000
int i;
1001
1002
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1003
struct page *page;
1004
bool on_lru;
1005
1006
page = binder_get_installed_page(alloc, i);
1007
if (!page)
1008
continue;
1009
1010
on_lru = list_lru_del(alloc->freelist,
1011
page_to_lru(page),
1012
page_to_nid(page),
1013
NULL);
1014
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
1015
"%s: %d: page %d %s\n",
1016
__func__, alloc->pid, i,
1017
on_lru ? "on lru" : "active");
1018
binder_free_page(page);
1019
page_count++;
1020
}
1021
}
1022
mutex_unlock(&alloc->mutex);
1023
kvfree(alloc->pages);
1024
if (alloc->mm)
1025
mmdrop(alloc->mm);
1026
1027
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
1028
"%s: %d buffers %d, pages %d\n",
1029
__func__, alloc->pid, buffers, page_count);
1030
}
1031
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_deferred_release);
1032
1033
/**
1034
* binder_alloc_print_allocated() - print buffer info
1035
* @m: seq_file for output via seq_printf()
1036
* @alloc: binder_alloc for this proc
1037
*
1038
* Prints information about every buffer associated with
1039
* the binder_alloc state to the given seq_file
1040
*/
1041
void binder_alloc_print_allocated(struct seq_file *m,
1042
struct binder_alloc *alloc)
1043
{
1044
struct binder_buffer *buffer;
1045
struct rb_node *n;
1046
1047
guard(mutex)(&alloc->mutex);
1048
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
1049
buffer = rb_entry(n, struct binder_buffer, rb_node);
1050
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
1051
buffer->debug_id,
1052
buffer->user_data - alloc->vm_start,
1053
buffer->data_size, buffer->offsets_size,
1054
buffer->extra_buffers_size,
1055
buffer->transaction ? "active" : "delivered");
1056
}
1057
}
1058
1059
/**
1060
* binder_alloc_print_pages() - print page usage
1061
* @m: seq_file for output via seq_printf()
1062
* @alloc: binder_alloc for this proc
1063
*/
1064
void binder_alloc_print_pages(struct seq_file *m,
1065
struct binder_alloc *alloc)
1066
{
1067
struct page *page;
1068
int i;
1069
int active = 0;
1070
int lru = 0;
1071
int free = 0;
1072
1073
mutex_lock(&alloc->mutex);
1074
/*
1075
* Make sure the binder_alloc is fully initialized, otherwise we might
1076
* read inconsistent state.
1077
*/
1078
if (binder_alloc_is_mapped(alloc)) {
1079
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1080
page = binder_get_installed_page(alloc, i);
1081
if (!page)
1082
free++;
1083
else if (list_empty(page_to_lru(page)))
1084
active++;
1085
else
1086
lru++;
1087
}
1088
}
1089
mutex_unlock(&alloc->mutex);
1090
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
1091
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
1092
}
1093
1094
/**
1095
* binder_alloc_get_allocated_count() - return count of buffers
1096
* @alloc: binder_alloc for this proc
1097
*
1098
* Return: count of allocated buffers
1099
*/
1100
int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1101
{
1102
struct rb_node *n;
1103
int count = 0;
1104
1105
guard(mutex)(&alloc->mutex);
1106
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1107
count++;
1108
return count;
1109
}
1110
1111
1112
/**
1113
* binder_alloc_vma_close() - invalidate address space
1114
* @alloc: binder_alloc for this proc
1115
*
1116
* Called from binder_vma_close() when releasing address space.
1117
* Clears alloc->mapped to prevent new incoming transactions from
1118
* allocating more buffers.
1119
*/
1120
void binder_alloc_vma_close(struct binder_alloc *alloc)
1121
{
1122
binder_alloc_set_mapped(alloc, false);
1123
}
1124
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_vma_close);
1125
1126
/**
1127
* binder_alloc_free_page() - shrinker callback to free pages
1128
* @item: item to free
1129
* @lru: list_lru instance of the item
1130
* @cb_arg: callback argument
1131
*
1132
* Called from list_lru_walk() in binder_shrink_scan() to free
1133
* up pages when the system is under memory pressure.
1134
*/
1135
enum lru_status binder_alloc_free_page(struct list_head *item,
1136
struct list_lru_one *lru,
1137
void *cb_arg)
1138
__must_hold(&lru->lock)
1139
{
1140
struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
1141
struct binder_alloc *alloc = mdata->alloc;
1142
struct mm_struct *mm = alloc->mm;
1143
struct vm_area_struct *vma;
1144
struct page *page_to_free;
1145
unsigned long page_addr;
1146
int mm_locked = 0;
1147
size_t index;
1148
1149
if (!mmget_not_zero(mm))
1150
goto err_mmget;
1151
1152
index = mdata->page_index;
1153
page_addr = alloc->vm_start + index * PAGE_SIZE;
1154
1155
/* attempt per-vma lock first */
1156
vma = lock_vma_under_rcu(mm, page_addr);
1157
if (!vma) {
1158
/* fall back to mmap_lock */
1159
if (!mmap_read_trylock(mm))
1160
goto err_mmap_read_lock_failed;
1161
mm_locked = 1;
1162
vma = vma_lookup(mm, page_addr);
1163
}
1164
1165
if (!mutex_trylock(&alloc->mutex))
1166
goto err_get_alloc_mutex_failed;
1167
1168
/*
1169
* Since a binder_alloc can only be mapped once, we ensure
1170
* the vma corresponds to this mapping by checking whether
1171
* the binder_alloc is still mapped.
1172
*/
1173
if (vma && !binder_alloc_is_mapped(alloc))
1174
goto err_invalid_vma;
1175
1176
trace_binder_unmap_kernel_start(alloc, index);
1177
1178
page_to_free = alloc->pages[index];
1179
binder_set_installed_page(alloc, index, NULL);
1180
1181
trace_binder_unmap_kernel_end(alloc, index);
1182
1183
list_lru_isolate(lru, item);
1184
spin_unlock(&lru->lock);
1185
1186
if (vma) {
1187
trace_binder_unmap_user_start(alloc, index);
1188
1189
zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
1190
1191
trace_binder_unmap_user_end(alloc, index);
1192
}
1193
1194
mutex_unlock(&alloc->mutex);
1195
if (mm_locked)
1196
mmap_read_unlock(mm);
1197
else
1198
vma_end_read(vma);
1199
mmput_async(mm);
1200
binder_free_page(page_to_free);
1201
1202
return LRU_REMOVED_RETRY;
1203
1204
err_invalid_vma:
1205
mutex_unlock(&alloc->mutex);
1206
err_get_alloc_mutex_failed:
1207
if (mm_locked)
1208
mmap_read_unlock(mm);
1209
else
1210
vma_end_read(vma);
1211
err_mmap_read_lock_failed:
1212
mmput_async(mm);
1213
err_mmget:
1214
return LRU_SKIP;
1215
}
1216
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_page);
1217
1218
static unsigned long
1219
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1220
{
1221
return list_lru_count(&binder_freelist);
1222
}
1223
1224
static unsigned long
1225
binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1226
{
1227
return list_lru_walk(&binder_freelist, binder_alloc_free_page,
1228
NULL, sc->nr_to_scan);
1229
}
1230
1231
static struct shrinker *binder_shrinker;
1232
1233
VISIBLE_IF_KUNIT void __binder_alloc_init(struct binder_alloc *alloc,
1234
struct list_lru *freelist)
1235
{
1236
alloc->pid = current->group_leader->pid;
1237
alloc->mm = current->mm;
1238
mmgrab(alloc->mm);
1239
mutex_init(&alloc->mutex);
1240
INIT_LIST_HEAD(&alloc->buffers);
1241
alloc->freelist = freelist;
1242
}
1243
EXPORT_SYMBOL_IF_KUNIT(__binder_alloc_init);
1244
1245
/**
1246
* binder_alloc_init() - called by binder_open() for per-proc initialization
1247
* @alloc: binder_alloc for this proc
1248
*
1249
* Called from binder_open() to initialize binder_alloc fields for
1250
* new binder proc
1251
*/
1252
void binder_alloc_init(struct binder_alloc *alloc)
1253
{
1254
__binder_alloc_init(alloc, &binder_freelist);
1255
}
1256
1257
int binder_alloc_shrinker_init(void)
1258
{
1259
int ret;
1260
1261
ret = list_lru_init(&binder_freelist);
1262
if (ret)
1263
return ret;
1264
1265
binder_shrinker = shrinker_alloc(0, "android-binder");
1266
if (!binder_shrinker) {
1267
list_lru_destroy(&binder_freelist);
1268
return -ENOMEM;
1269
}
1270
1271
binder_shrinker->count_objects = binder_shrink_count;
1272
binder_shrinker->scan_objects = binder_shrink_scan;
1273
1274
shrinker_register(binder_shrinker);
1275
1276
return 0;
1277
}
1278
1279
void binder_alloc_shrinker_exit(void)
1280
{
1281
shrinker_free(binder_shrinker);
1282
list_lru_destroy(&binder_freelist);
1283
}
1284
1285
/**
1286
* check_buffer() - verify that buffer/offset is safe to access
1287
* @alloc: binder_alloc for this proc
1288
* @buffer: binder buffer to be accessed
1289
* @offset: offset into @buffer data
1290
* @bytes: bytes to access from offset
1291
*
1292
* Check that the @offset/@bytes are within the size of the given
1293
* @buffer and that the buffer is currently active and not freeable.
1294
* Offsets must also be multiples of sizeof(u32). The kernel is
1295
* allowed to touch the buffer in two cases:
1296
*
1297
* 1) when the buffer is being created:
1298
* (buffer->free == 0 && buffer->allow_user_free == 0)
1299
* 2) when the buffer is being torn down:
1300
* (buffer->free == 0 && buffer->transaction == NULL).
1301
*
1302
* Return: true if the buffer is safe to access
1303
*/
1304
static inline bool check_buffer(struct binder_alloc *alloc,
1305
struct binder_buffer *buffer,
1306
binder_size_t offset, size_t bytes)
1307
{
1308
size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1309
1310
return buffer_size >= bytes &&
1311
offset <= buffer_size - bytes &&
1312
IS_ALIGNED(offset, sizeof(u32)) &&
1313
!buffer->free &&
1314
(!buffer->allow_user_free || !buffer->transaction);
1315
}
1316
1317
/**
1318
* binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1319
* @alloc: binder_alloc for this proc
1320
* @buffer: binder buffer to be accessed
1321
* @buffer_offset: offset into @buffer data
1322
* @from: userspace pointer to source buffer
1323
* @bytes: bytes to copy
1324
*
1325
* Copy bytes from source userspace to target buffer.
1326
*
1327
* Return: bytes remaining to be copied
1328
*/
1329
unsigned long
1330
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1331
struct binder_buffer *buffer,
1332
binder_size_t buffer_offset,
1333
const void __user *from,
1334
size_t bytes)
1335
{
1336
if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1337
return bytes;
1338
1339
while (bytes) {
1340
unsigned long size;
1341
unsigned long ret;
1342
struct page *page;
1343
pgoff_t pgoff;
1344
void *kptr;
1345
1346
page = binder_alloc_get_page(alloc, buffer,
1347
buffer_offset, &pgoff);
1348
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1349
kptr = kmap_local_page(page) + pgoff;
1350
ret = copy_from_user(kptr, from, size);
1351
kunmap_local(kptr);
1352
if (ret)
1353
return bytes - size + ret;
1354
bytes -= size;
1355
from += size;
1356
buffer_offset += size;
1357
}
1358
return 0;
1359
}
1360
1361
static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1362
bool to_buffer,
1363
struct binder_buffer *buffer,
1364
binder_size_t buffer_offset,
1365
void *ptr,
1366
size_t bytes)
1367
{
1368
/* All copies must be 32-bit aligned and 32-bit size */
1369
if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1370
return -EINVAL;
1371
1372
while (bytes) {
1373
unsigned long size;
1374
struct page *page;
1375
pgoff_t pgoff;
1376
1377
page = binder_alloc_get_page(alloc, buffer,
1378
buffer_offset, &pgoff);
1379
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1380
if (to_buffer)
1381
memcpy_to_page(page, pgoff, ptr, size);
1382
else
1383
memcpy_from_page(ptr, page, pgoff, size);
1384
bytes -= size;
1385
pgoff = 0;
1386
ptr = ptr + size;
1387
buffer_offset += size;
1388
}
1389
return 0;
1390
}
1391
1392
int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1393
struct binder_buffer *buffer,
1394
binder_size_t buffer_offset,
1395
void *src,
1396
size_t bytes)
1397
{
1398
return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1399
src, bytes);
1400
}
1401
1402
int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1403
void *dest,
1404
struct binder_buffer *buffer,
1405
binder_size_t buffer_offset,
1406
size_t bytes)
1407
{
1408
return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1409
dest, bytes);
1410
}
1411
1412