Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/android/binder.c
48962 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* binder.c
3
*
4
* Android IPC Subsystem
5
*
6
* Copyright (C) 2007-2008 Google, Inc.
7
*/
8
9
/*
10
* Locking overview
11
*
12
* There are 3 main spinlocks which must be acquired in the
13
* order shown:
14
*
15
* 1) proc->outer_lock : protects binder_ref
16
* binder_proc_lock() and binder_proc_unlock() are
17
* used to acq/rel.
18
* 2) node->lock : protects most fields of binder_node.
19
* binder_node_lock() and binder_node_unlock() are
20
* used to acq/rel
21
* 3) proc->inner_lock : protects the thread and node lists
22
* (proc->threads, proc->waiting_threads, proc->nodes)
23
* and all todo lists associated with the binder_proc
24
* (proc->todo, thread->todo, proc->delivered_death and
25
* node->async_todo), as well as thread->transaction_stack
26
* binder_inner_proc_lock() and binder_inner_proc_unlock()
27
* are used to acq/rel
28
*
29
* Any lock under procA must never be nested under any lock at the same
30
* level or below on procB.
31
*
32
* Functions that require a lock held on entry indicate which lock
33
* in the suffix of the function name:
34
*
35
* foo_olocked() : requires node->outer_lock
36
* foo_nlocked() : requires node->lock
37
* foo_ilocked() : requires proc->inner_lock
38
* foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39
* foo_nilocked(): requires node->lock and proc->inner_lock
40
* ...
41
*/
42
43
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45
#include <linux/fdtable.h>
46
#include <linux/file.h>
47
#include <linux/freezer.h>
48
#include <linux/fs.h>
49
#include <linux/list.h>
50
#include <linux/miscdevice.h>
51
#include <linux/module.h>
52
#include <linux/mutex.h>
53
#include <linux/nsproxy.h>
54
#include <linux/poll.h>
55
#include <linux/debugfs.h>
56
#include <linux/rbtree.h>
57
#include <linux/sched/signal.h>
58
#include <linux/sched/mm.h>
59
#include <linux/seq_file.h>
60
#include <linux/string.h>
61
#include <linux/uaccess.h>
62
#include <linux/pid_namespace.h>
63
#include <linux/security.h>
64
#include <linux/spinlock.h>
65
#include <linux/ratelimit.h>
66
#include <linux/syscalls.h>
67
#include <linux/task_work.h>
68
#include <linux/sizes.h>
69
#include <linux/ktime.h>
70
71
#include <kunit/visibility.h>
72
73
#include <uapi/linux/android/binder.h>
74
75
#include <linux/cacheflush.h>
76
77
#include "binder_netlink.h"
78
#include "binder_internal.h"
79
#include "binder_trace.h"
80
81
static HLIST_HEAD(binder_deferred_list);
82
static DEFINE_MUTEX(binder_deferred_lock);
83
84
static HLIST_HEAD(binder_devices);
85
static DEFINE_SPINLOCK(binder_devices_lock);
86
87
static HLIST_HEAD(binder_procs);
88
static DEFINE_MUTEX(binder_procs_lock);
89
90
static HLIST_HEAD(binder_dead_nodes);
91
static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93
static struct dentry *binder_debugfs_dir_entry_root;
94
static struct dentry *binder_debugfs_dir_entry_proc;
95
static atomic_t binder_last_id;
96
97
static int proc_show(struct seq_file *m, void *unused);
98
DEFINE_SHOW_ATTRIBUTE(proc);
99
100
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
101
102
enum {
103
BINDER_DEBUG_USER_ERROR = 1U << 0,
104
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
105
BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
106
BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
107
BINDER_DEBUG_DEAD_BINDER = 1U << 4,
108
BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
109
BINDER_DEBUG_READ_WRITE = 1U << 6,
110
BINDER_DEBUG_USER_REFS = 1U << 7,
111
BINDER_DEBUG_THREADS = 1U << 8,
112
BINDER_DEBUG_TRANSACTION = 1U << 9,
113
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
114
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
115
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
116
BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
117
BINDER_DEBUG_SPINLOCKS = 1U << 14,
118
};
119
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
122
123
char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124
module_param_named(devices, binder_devices_param, charp, 0444);
125
126
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127
static int binder_stop_on_user_error;
128
129
static int binder_set_stop_on_user_error(const char *val,
130
const struct kernel_param *kp)
131
{
132
int ret;
133
134
ret = param_set_int(val, kp);
135
if (binder_stop_on_user_error < 2)
136
wake_up(&binder_user_error_wait);
137
return ret;
138
}
139
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140
param_get_int, &binder_stop_on_user_error, 0644);
141
142
static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
143
{
144
struct va_format vaf;
145
va_list args;
146
147
if (binder_debug_mask & mask) {
148
va_start(args, format);
149
vaf.va = &args;
150
vaf.fmt = format;
151
pr_info_ratelimited("%pV", &vaf);
152
va_end(args);
153
}
154
}
155
156
#define binder_txn_error(x...) \
157
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
158
159
static __printf(1, 2) void binder_user_error(const char *format, ...)
160
{
161
struct va_format vaf;
162
va_list args;
163
164
if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
165
va_start(args, format);
166
vaf.va = &args;
167
vaf.fmt = format;
168
pr_info_ratelimited("%pV", &vaf);
169
va_end(args);
170
}
171
172
if (binder_stop_on_user_error)
173
binder_stop_on_user_error = 2;
174
}
175
176
#define binder_set_extended_error(ee, _id, _command, _param) \
177
do { \
178
(ee)->id = _id; \
179
(ee)->command = _command; \
180
(ee)->param = _param; \
181
} while (0)
182
183
#define to_flat_binder_object(hdr) \
184
container_of(hdr, struct flat_binder_object, hdr)
185
186
#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
187
188
#define to_binder_buffer_object(hdr) \
189
container_of(hdr, struct binder_buffer_object, hdr)
190
191
#define to_binder_fd_array_object(hdr) \
192
container_of(hdr, struct binder_fd_array_object, hdr)
193
194
static struct binder_stats binder_stats;
195
196
static inline void binder_stats_deleted(enum binder_stat_types type)
197
{
198
atomic_inc(&binder_stats.obj_deleted[type]);
199
}
200
201
static inline void binder_stats_created(enum binder_stat_types type)
202
{
203
atomic_inc(&binder_stats.obj_created[type]);
204
}
205
206
struct binder_transaction_log_entry {
207
int debug_id;
208
int debug_id_done;
209
int call_type;
210
int from_proc;
211
int from_thread;
212
int target_handle;
213
int to_proc;
214
int to_thread;
215
int to_node;
216
int data_size;
217
int offsets_size;
218
int return_error_line;
219
uint32_t return_error;
220
uint32_t return_error_param;
221
char context_name[BINDERFS_MAX_NAME + 1];
222
};
223
224
struct binder_transaction_log {
225
atomic_t cur;
226
bool full;
227
struct binder_transaction_log_entry entry[32];
228
};
229
230
static struct binder_transaction_log binder_transaction_log;
231
static struct binder_transaction_log binder_transaction_log_failed;
232
233
static struct binder_transaction_log_entry *binder_transaction_log_add(
234
struct binder_transaction_log *log)
235
{
236
struct binder_transaction_log_entry *e;
237
unsigned int cur = atomic_inc_return(&log->cur);
238
239
if (cur >= ARRAY_SIZE(log->entry))
240
log->full = true;
241
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
242
WRITE_ONCE(e->debug_id_done, 0);
243
/*
244
* write-barrier to synchronize access to e->debug_id_done.
245
* We make sure the initialized 0 value is seen before
246
* memset() other fields are zeroed by memset.
247
*/
248
smp_wmb();
249
memset(e, 0, sizeof(*e));
250
return e;
251
}
252
253
enum binder_deferred_state {
254
BINDER_DEFERRED_FLUSH = 0x01,
255
BINDER_DEFERRED_RELEASE = 0x02,
256
};
257
258
enum {
259
BINDER_LOOPER_STATE_REGISTERED = 0x01,
260
BINDER_LOOPER_STATE_ENTERED = 0x02,
261
BINDER_LOOPER_STATE_EXITED = 0x04,
262
BINDER_LOOPER_STATE_INVALID = 0x08,
263
BINDER_LOOPER_STATE_WAITING = 0x10,
264
BINDER_LOOPER_STATE_POLL = 0x20,
265
};
266
267
/**
268
* binder_proc_lock() - Acquire outer lock for given binder_proc
269
* @proc: struct binder_proc to acquire
270
*
271
* Acquires proc->outer_lock. Used to protect binder_ref
272
* structures associated with the given proc.
273
*/
274
#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
275
static void
276
_binder_proc_lock(struct binder_proc *proc, int line)
277
__acquires(&proc->outer_lock)
278
{
279
binder_debug(BINDER_DEBUG_SPINLOCKS,
280
"%s: line=%d\n", __func__, line);
281
spin_lock(&proc->outer_lock);
282
}
283
284
/**
285
* binder_proc_unlock() - Release outer lock for given binder_proc
286
* @proc: struct binder_proc to acquire
287
*
288
* Release lock acquired via binder_proc_lock()
289
*/
290
#define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
291
static void
292
_binder_proc_unlock(struct binder_proc *proc, int line)
293
__releases(&proc->outer_lock)
294
{
295
binder_debug(BINDER_DEBUG_SPINLOCKS,
296
"%s: line=%d\n", __func__, line);
297
spin_unlock(&proc->outer_lock);
298
}
299
300
/**
301
* binder_inner_proc_lock() - Acquire inner lock for given binder_proc
302
* @proc: struct binder_proc to acquire
303
*
304
* Acquires proc->inner_lock. Used to protect todo lists
305
*/
306
#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
307
static void
308
_binder_inner_proc_lock(struct binder_proc *proc, int line)
309
__acquires(&proc->inner_lock)
310
{
311
binder_debug(BINDER_DEBUG_SPINLOCKS,
312
"%s: line=%d\n", __func__, line);
313
spin_lock(&proc->inner_lock);
314
}
315
316
/**
317
* binder_inner_proc_unlock() - Release inner lock for given binder_proc
318
* @proc: struct binder_proc to acquire
319
*
320
* Release lock acquired via binder_inner_proc_lock()
321
*/
322
#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
323
static void
324
_binder_inner_proc_unlock(struct binder_proc *proc, int line)
325
__releases(&proc->inner_lock)
326
{
327
binder_debug(BINDER_DEBUG_SPINLOCKS,
328
"%s: line=%d\n", __func__, line);
329
spin_unlock(&proc->inner_lock);
330
}
331
332
/**
333
* binder_node_lock() - Acquire spinlock for given binder_node
334
* @node: struct binder_node to acquire
335
*
336
* Acquires node->lock. Used to protect binder_node fields
337
*/
338
#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
339
static void
340
_binder_node_lock(struct binder_node *node, int line)
341
__acquires(&node->lock)
342
{
343
binder_debug(BINDER_DEBUG_SPINLOCKS,
344
"%s: line=%d\n", __func__, line);
345
spin_lock(&node->lock);
346
}
347
348
/**
349
* binder_node_unlock() - Release spinlock for given binder_proc
350
* @node: struct binder_node to acquire
351
*
352
* Release lock acquired via binder_node_lock()
353
*/
354
#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
355
static void
356
_binder_node_unlock(struct binder_node *node, int line)
357
__releases(&node->lock)
358
{
359
binder_debug(BINDER_DEBUG_SPINLOCKS,
360
"%s: line=%d\n", __func__, line);
361
spin_unlock(&node->lock);
362
}
363
364
/**
365
* binder_node_inner_lock() - Acquire node and inner locks
366
* @node: struct binder_node to acquire
367
*
368
* Acquires node->lock. If node->proc also acquires
369
* proc->inner_lock. Used to protect binder_node fields
370
*/
371
#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
372
static void
373
_binder_node_inner_lock(struct binder_node *node, int line)
374
__acquires(&node->lock) __acquires(&node->proc->inner_lock)
375
{
376
binder_debug(BINDER_DEBUG_SPINLOCKS,
377
"%s: line=%d\n", __func__, line);
378
spin_lock(&node->lock);
379
if (node->proc)
380
binder_inner_proc_lock(node->proc);
381
else
382
/* annotation for sparse */
383
__acquire(&node->proc->inner_lock);
384
}
385
386
/**
387
* binder_node_inner_unlock() - Release node and inner locks
388
* @node: struct binder_node to acquire
389
*
390
* Release lock acquired via binder_node_lock()
391
*/
392
#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
393
static void
394
_binder_node_inner_unlock(struct binder_node *node, int line)
395
__releases(&node->lock) __releases(&node->proc->inner_lock)
396
{
397
struct binder_proc *proc = node->proc;
398
399
binder_debug(BINDER_DEBUG_SPINLOCKS,
400
"%s: line=%d\n", __func__, line);
401
if (proc)
402
binder_inner_proc_unlock(proc);
403
else
404
/* annotation for sparse */
405
__release(&node->proc->inner_lock);
406
spin_unlock(&node->lock);
407
}
408
409
static bool binder_worklist_empty_ilocked(struct list_head *list)
410
{
411
return list_empty(list);
412
}
413
414
/**
415
* binder_worklist_empty() - Check if no items on the work list
416
* @proc: binder_proc associated with list
417
* @list: list to check
418
*
419
* Return: true if there are no items on list, else false
420
*/
421
static bool binder_worklist_empty(struct binder_proc *proc,
422
struct list_head *list)
423
{
424
bool ret;
425
426
binder_inner_proc_lock(proc);
427
ret = binder_worklist_empty_ilocked(list);
428
binder_inner_proc_unlock(proc);
429
return ret;
430
}
431
432
/**
433
* binder_enqueue_work_ilocked() - Add an item to the work list
434
* @work: struct binder_work to add to list
435
* @target_list: list to add work to
436
*
437
* Adds the work to the specified list. Asserts that work
438
* is not already on a list.
439
*
440
* Requires the proc->inner_lock to be held.
441
*/
442
static void
443
binder_enqueue_work_ilocked(struct binder_work *work,
444
struct list_head *target_list)
445
{
446
BUG_ON(target_list == NULL);
447
BUG_ON(work->entry.next && !list_empty(&work->entry));
448
list_add_tail(&work->entry, target_list);
449
}
450
451
/**
452
* binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
453
* @thread: thread to queue work to
454
* @work: struct binder_work to add to list
455
*
456
* Adds the work to the todo list of the thread. Doesn't set the process_todo
457
* flag, which means that (if it wasn't already set) the thread will go to
458
* sleep without handling this work when it calls read.
459
*
460
* Requires the proc->inner_lock to be held.
461
*/
462
static void
463
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
464
struct binder_work *work)
465
{
466
WARN_ON(!list_empty(&thread->waiting_thread_node));
467
binder_enqueue_work_ilocked(work, &thread->todo);
468
}
469
470
/**
471
* binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
472
* @thread: thread to queue work to
473
* @work: struct binder_work to add to list
474
*
475
* Adds the work to the todo list of the thread, and enables processing
476
* of the todo queue.
477
*
478
* Requires the proc->inner_lock to be held.
479
*/
480
static void
481
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
482
struct binder_work *work)
483
{
484
WARN_ON(!list_empty(&thread->waiting_thread_node));
485
binder_enqueue_work_ilocked(work, &thread->todo);
486
487
/* (e)poll-based threads require an explicit wakeup signal when
488
* queuing their own work; they rely on these events to consume
489
* messages without I/O block. Without it, threads risk waiting
490
* indefinitely without handling the work.
491
*/
492
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
493
thread->pid == current->pid && !thread->process_todo)
494
wake_up_interruptible_sync(&thread->wait);
495
496
thread->process_todo = true;
497
}
498
499
/**
500
* binder_enqueue_thread_work() - Add an item to the thread work list
501
* @thread: thread to queue work to
502
* @work: struct binder_work to add to list
503
*
504
* Adds the work to the todo list of the thread, and enables processing
505
* of the todo queue.
506
*/
507
static void
508
binder_enqueue_thread_work(struct binder_thread *thread,
509
struct binder_work *work)
510
{
511
binder_inner_proc_lock(thread->proc);
512
binder_enqueue_thread_work_ilocked(thread, work);
513
binder_inner_proc_unlock(thread->proc);
514
}
515
516
static void
517
binder_dequeue_work_ilocked(struct binder_work *work)
518
{
519
list_del_init(&work->entry);
520
}
521
522
/**
523
* binder_dequeue_work() - Removes an item from the work list
524
* @proc: binder_proc associated with list
525
* @work: struct binder_work to remove from list
526
*
527
* Removes the specified work item from whatever list it is on.
528
* Can safely be called if work is not on any list.
529
*/
530
static void
531
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
532
{
533
binder_inner_proc_lock(proc);
534
binder_dequeue_work_ilocked(work);
535
binder_inner_proc_unlock(proc);
536
}
537
538
static struct binder_work *binder_dequeue_work_head_ilocked(
539
struct list_head *list)
540
{
541
struct binder_work *w;
542
543
w = list_first_entry_or_null(list, struct binder_work, entry);
544
if (w)
545
list_del_init(&w->entry);
546
return w;
547
}
548
549
static void
550
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
551
static void binder_free_thread(struct binder_thread *thread);
552
static void binder_free_proc(struct binder_proc *proc);
553
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
554
555
static bool binder_has_work_ilocked(struct binder_thread *thread,
556
bool do_proc_work)
557
{
558
return thread->process_todo ||
559
thread->looper_need_return ||
560
(do_proc_work &&
561
!binder_worklist_empty_ilocked(&thread->proc->todo));
562
}
563
564
static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
565
{
566
bool has_work;
567
568
binder_inner_proc_lock(thread->proc);
569
has_work = binder_has_work_ilocked(thread, do_proc_work);
570
binder_inner_proc_unlock(thread->proc);
571
572
return has_work;
573
}
574
575
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
576
{
577
return !thread->transaction_stack &&
578
binder_worklist_empty_ilocked(&thread->todo);
579
}
580
581
static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
582
bool sync)
583
{
584
struct rb_node *n;
585
struct binder_thread *thread;
586
587
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
588
thread = rb_entry(n, struct binder_thread, rb_node);
589
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
590
binder_available_for_proc_work_ilocked(thread)) {
591
if (sync)
592
wake_up_interruptible_sync(&thread->wait);
593
else
594
wake_up_interruptible(&thread->wait);
595
}
596
}
597
}
598
599
/**
600
* binder_select_thread_ilocked() - selects a thread for doing proc work.
601
* @proc: process to select a thread from
602
*
603
* Note that calling this function moves the thread off the waiting_threads
604
* list, so it can only be woken up by the caller of this function, or a
605
* signal. Therefore, callers *should* always wake up the thread this function
606
* returns.
607
*
608
* Return: If there's a thread currently waiting for process work,
609
* returns that thread. Otherwise returns NULL.
610
*/
611
static struct binder_thread *
612
binder_select_thread_ilocked(struct binder_proc *proc)
613
{
614
struct binder_thread *thread;
615
616
assert_spin_locked(&proc->inner_lock);
617
thread = list_first_entry_or_null(&proc->waiting_threads,
618
struct binder_thread,
619
waiting_thread_node);
620
621
if (thread)
622
list_del_init(&thread->waiting_thread_node);
623
624
return thread;
625
}
626
627
/**
628
* binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
629
* @proc: process to wake up a thread in
630
* @thread: specific thread to wake-up (may be NULL)
631
* @sync: whether to do a synchronous wake-up
632
*
633
* This function wakes up a thread in the @proc process.
634
* The caller may provide a specific thread to wake-up in
635
* the @thread parameter. If @thread is NULL, this function
636
* will wake up threads that have called poll().
637
*
638
* Note that for this function to work as expected, callers
639
* should first call binder_select_thread() to find a thread
640
* to handle the work (if they don't have a thread already),
641
* and pass the result into the @thread parameter.
642
*/
643
static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
644
struct binder_thread *thread,
645
bool sync)
646
{
647
assert_spin_locked(&proc->inner_lock);
648
649
if (thread) {
650
if (sync)
651
wake_up_interruptible_sync(&thread->wait);
652
else
653
wake_up_interruptible(&thread->wait);
654
return;
655
}
656
657
/* Didn't find a thread waiting for proc work; this can happen
658
* in two scenarios:
659
* 1. All threads are busy handling transactions
660
* In that case, one of those threads should call back into
661
* the kernel driver soon and pick up this work.
662
* 2. Threads are using the (e)poll interface, in which case
663
* they may be blocked on the waitqueue without having been
664
* added to waiting_threads. For this case, we just iterate
665
* over all threads not handling transaction work, and
666
* wake them all up. We wake all because we don't know whether
667
* a thread that called into (e)poll is handling non-binder
668
* work currently.
669
*/
670
binder_wakeup_poll_threads_ilocked(proc, sync);
671
}
672
673
static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
674
{
675
struct binder_thread *thread = binder_select_thread_ilocked(proc);
676
677
binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
678
}
679
680
static void binder_set_nice(long nice)
681
{
682
long min_nice;
683
684
if (can_nice(current, nice)) {
685
set_user_nice(current, nice);
686
return;
687
}
688
min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
689
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
690
"%d: nice value %ld not allowed use %ld instead\n",
691
current->pid, nice, min_nice);
692
set_user_nice(current, min_nice);
693
if (min_nice <= MAX_NICE)
694
return;
695
binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
696
}
697
698
static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
699
binder_uintptr_t ptr)
700
{
701
struct rb_node *n = proc->nodes.rb_node;
702
struct binder_node *node;
703
704
assert_spin_locked(&proc->inner_lock);
705
706
while (n) {
707
node = rb_entry(n, struct binder_node, rb_node);
708
709
if (ptr < node->ptr)
710
n = n->rb_left;
711
else if (ptr > node->ptr)
712
n = n->rb_right;
713
else {
714
/*
715
* take an implicit weak reference
716
* to ensure node stays alive until
717
* call to binder_put_node()
718
*/
719
binder_inc_node_tmpref_ilocked(node);
720
return node;
721
}
722
}
723
return NULL;
724
}
725
726
static struct binder_node *binder_get_node(struct binder_proc *proc,
727
binder_uintptr_t ptr)
728
{
729
struct binder_node *node;
730
731
binder_inner_proc_lock(proc);
732
node = binder_get_node_ilocked(proc, ptr);
733
binder_inner_proc_unlock(proc);
734
return node;
735
}
736
737
static struct binder_node *binder_init_node_ilocked(
738
struct binder_proc *proc,
739
struct binder_node *new_node,
740
struct flat_binder_object *fp)
741
{
742
struct rb_node **p = &proc->nodes.rb_node;
743
struct rb_node *parent = NULL;
744
struct binder_node *node;
745
binder_uintptr_t ptr = fp ? fp->binder : 0;
746
binder_uintptr_t cookie = fp ? fp->cookie : 0;
747
__u32 flags = fp ? fp->flags : 0;
748
749
assert_spin_locked(&proc->inner_lock);
750
751
while (*p) {
752
753
parent = *p;
754
node = rb_entry(parent, struct binder_node, rb_node);
755
756
if (ptr < node->ptr)
757
p = &(*p)->rb_left;
758
else if (ptr > node->ptr)
759
p = &(*p)->rb_right;
760
else {
761
/*
762
* A matching node is already in
763
* the rb tree. Abandon the init
764
* and return it.
765
*/
766
binder_inc_node_tmpref_ilocked(node);
767
return node;
768
}
769
}
770
node = new_node;
771
binder_stats_created(BINDER_STAT_NODE);
772
node->tmp_refs++;
773
rb_link_node(&node->rb_node, parent, p);
774
rb_insert_color(&node->rb_node, &proc->nodes);
775
node->debug_id = atomic_inc_return(&binder_last_id);
776
node->proc = proc;
777
node->ptr = ptr;
778
node->cookie = cookie;
779
node->work.type = BINDER_WORK_NODE;
780
node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
781
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
782
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
783
spin_lock_init(&node->lock);
784
INIT_LIST_HEAD(&node->work.entry);
785
INIT_LIST_HEAD(&node->async_todo);
786
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
787
"%d:%d node %d u%016llx c%016llx created\n",
788
proc->pid, current->pid, node->debug_id,
789
(u64)node->ptr, (u64)node->cookie);
790
791
return node;
792
}
793
794
static struct binder_node *binder_new_node(struct binder_proc *proc,
795
struct flat_binder_object *fp)
796
{
797
struct binder_node *node;
798
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
799
800
if (!new_node)
801
return NULL;
802
binder_inner_proc_lock(proc);
803
node = binder_init_node_ilocked(proc, new_node, fp);
804
binder_inner_proc_unlock(proc);
805
if (node != new_node)
806
/*
807
* The node was already added by another thread
808
*/
809
kfree(new_node);
810
811
return node;
812
}
813
814
static void binder_free_node(struct binder_node *node)
815
{
816
kfree(node);
817
binder_stats_deleted(BINDER_STAT_NODE);
818
}
819
820
static int binder_inc_node_nilocked(struct binder_node *node, int strong,
821
int internal,
822
struct list_head *target_list)
823
{
824
struct binder_proc *proc = node->proc;
825
826
assert_spin_locked(&node->lock);
827
if (proc)
828
assert_spin_locked(&proc->inner_lock);
829
if (strong) {
830
if (internal) {
831
if (target_list == NULL &&
832
node->internal_strong_refs == 0 &&
833
!(node->proc &&
834
node == node->proc->context->binder_context_mgr_node &&
835
node->has_strong_ref)) {
836
pr_err("invalid inc strong node for %d\n",
837
node->debug_id);
838
return -EINVAL;
839
}
840
node->internal_strong_refs++;
841
} else
842
node->local_strong_refs++;
843
if (!node->has_strong_ref && target_list) {
844
struct binder_thread *thread = container_of(target_list,
845
struct binder_thread, todo);
846
binder_dequeue_work_ilocked(&node->work);
847
BUG_ON(&thread->todo != target_list);
848
binder_enqueue_deferred_thread_work_ilocked(thread,
849
&node->work);
850
}
851
} else {
852
if (!internal)
853
node->local_weak_refs++;
854
if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
855
binder_enqueue_work_ilocked(&node->work, target_list);
856
}
857
return 0;
858
}
859
860
static int binder_inc_node(struct binder_node *node, int strong, int internal,
861
struct list_head *target_list)
862
{
863
int ret;
864
865
binder_node_inner_lock(node);
866
ret = binder_inc_node_nilocked(node, strong, internal, target_list);
867
binder_node_inner_unlock(node);
868
869
return ret;
870
}
871
872
static bool binder_dec_node_nilocked(struct binder_node *node,
873
int strong, int internal)
874
{
875
struct binder_proc *proc = node->proc;
876
877
assert_spin_locked(&node->lock);
878
if (proc)
879
assert_spin_locked(&proc->inner_lock);
880
if (strong) {
881
if (internal)
882
node->internal_strong_refs--;
883
else
884
node->local_strong_refs--;
885
if (node->local_strong_refs || node->internal_strong_refs)
886
return false;
887
} else {
888
if (!internal)
889
node->local_weak_refs--;
890
if (node->local_weak_refs || node->tmp_refs ||
891
!hlist_empty(&node->refs))
892
return false;
893
}
894
895
if (proc && (node->has_strong_ref || node->has_weak_ref)) {
896
if (list_empty(&node->work.entry)) {
897
binder_enqueue_work_ilocked(&node->work, &proc->todo);
898
binder_wakeup_proc_ilocked(proc);
899
}
900
} else {
901
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
902
!node->local_weak_refs && !node->tmp_refs) {
903
if (proc) {
904
binder_dequeue_work_ilocked(&node->work);
905
rb_erase(&node->rb_node, &proc->nodes);
906
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
907
"refless node %d deleted\n",
908
node->debug_id);
909
} else {
910
BUG_ON(!list_empty(&node->work.entry));
911
spin_lock(&binder_dead_nodes_lock);
912
/*
913
* tmp_refs could have changed so
914
* check it again
915
*/
916
if (node->tmp_refs) {
917
spin_unlock(&binder_dead_nodes_lock);
918
return false;
919
}
920
hlist_del(&node->dead_node);
921
spin_unlock(&binder_dead_nodes_lock);
922
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
923
"dead node %d deleted\n",
924
node->debug_id);
925
}
926
return true;
927
}
928
}
929
return false;
930
}
931
932
static void binder_dec_node(struct binder_node *node, int strong, int internal)
933
{
934
bool free_node;
935
936
binder_node_inner_lock(node);
937
free_node = binder_dec_node_nilocked(node, strong, internal);
938
binder_node_inner_unlock(node);
939
if (free_node)
940
binder_free_node(node);
941
}
942
943
static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
944
{
945
/*
946
* No call to binder_inc_node() is needed since we
947
* don't need to inform userspace of any changes to
948
* tmp_refs
949
*/
950
node->tmp_refs++;
951
}
952
953
/**
954
* binder_inc_node_tmpref() - take a temporary reference on node
955
* @node: node to reference
956
*
957
* Take reference on node to prevent the node from being freed
958
* while referenced only by a local variable. The inner lock is
959
* needed to serialize with the node work on the queue (which
960
* isn't needed after the node is dead). If the node is dead
961
* (node->proc is NULL), use binder_dead_nodes_lock to protect
962
* node->tmp_refs against dead-node-only cases where the node
963
* lock cannot be acquired (eg traversing the dead node list to
964
* print nodes)
965
*/
966
static void binder_inc_node_tmpref(struct binder_node *node)
967
{
968
binder_node_lock(node);
969
if (node->proc)
970
binder_inner_proc_lock(node->proc);
971
else
972
spin_lock(&binder_dead_nodes_lock);
973
binder_inc_node_tmpref_ilocked(node);
974
if (node->proc)
975
binder_inner_proc_unlock(node->proc);
976
else
977
spin_unlock(&binder_dead_nodes_lock);
978
binder_node_unlock(node);
979
}
980
981
/**
982
* binder_dec_node_tmpref() - remove a temporary reference on node
983
* @node: node to reference
984
*
985
* Release temporary reference on node taken via binder_inc_node_tmpref()
986
*/
987
static void binder_dec_node_tmpref(struct binder_node *node)
988
{
989
bool free_node;
990
991
binder_node_inner_lock(node);
992
if (!node->proc)
993
spin_lock(&binder_dead_nodes_lock);
994
else
995
__acquire(&binder_dead_nodes_lock);
996
node->tmp_refs--;
997
BUG_ON(node->tmp_refs < 0);
998
if (!node->proc)
999
spin_unlock(&binder_dead_nodes_lock);
1000
else
1001
__release(&binder_dead_nodes_lock);
1002
/*
1003
* Call binder_dec_node() to check if all refcounts are 0
1004
* and cleanup is needed. Calling with strong=0 and internal=1
1005
* causes no actual reference to be released in binder_dec_node().
1006
* If that changes, a change is needed here too.
1007
*/
1008
free_node = binder_dec_node_nilocked(node, 0, 1);
1009
binder_node_inner_unlock(node);
1010
if (free_node)
1011
binder_free_node(node);
1012
}
1013
1014
static void binder_put_node(struct binder_node *node)
1015
{
1016
binder_dec_node_tmpref(node);
1017
}
1018
1019
static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1020
u32 desc, bool need_strong_ref)
1021
{
1022
struct rb_node *n = proc->refs_by_desc.rb_node;
1023
struct binder_ref *ref;
1024
1025
while (n) {
1026
ref = rb_entry(n, struct binder_ref, rb_node_desc);
1027
1028
if (desc < ref->data.desc) {
1029
n = n->rb_left;
1030
} else if (desc > ref->data.desc) {
1031
n = n->rb_right;
1032
} else if (need_strong_ref && !ref->data.strong) {
1033
binder_user_error("tried to use weak ref as strong ref\n");
1034
return NULL;
1035
} else {
1036
return ref;
1037
}
1038
}
1039
return NULL;
1040
}
1041
1042
/* Find the smallest unused descriptor the "slow way" */
1043
static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1044
{
1045
struct binder_ref *ref;
1046
struct rb_node *n;
1047
u32 desc;
1048
1049
desc = offset;
1050
for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1051
ref = rb_entry(n, struct binder_ref, rb_node_desc);
1052
if (ref->data.desc > desc)
1053
break;
1054
desc = ref->data.desc + 1;
1055
}
1056
1057
return desc;
1058
}
1059
1060
/*
1061
* Find an available reference descriptor ID. The proc->outer_lock might
1062
* be released in the process, in which case -EAGAIN is returned and the
1063
* @desc should be considered invalid.
1064
*/
1065
static int get_ref_desc_olocked(struct binder_proc *proc,
1066
struct binder_node *node,
1067
u32 *desc)
1068
{
1069
struct dbitmap *dmap = &proc->dmap;
1070
unsigned int nbits, offset;
1071
unsigned long *new, bit;
1072
1073
/* 0 is reserved for the context manager */
1074
offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1075
1076
if (!dbitmap_enabled(dmap)) {
1077
*desc = slow_desc_lookup_olocked(proc, offset);
1078
return 0;
1079
}
1080
1081
if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1082
*desc = bit;
1083
return 0;
1084
}
1085
1086
/*
1087
* The dbitmap is full and needs to grow. The proc->outer_lock
1088
* is briefly released to allocate the new bitmap safely.
1089
*/
1090
nbits = dbitmap_grow_nbits(dmap);
1091
binder_proc_unlock(proc);
1092
new = bitmap_zalloc(nbits, GFP_KERNEL);
1093
binder_proc_lock(proc);
1094
dbitmap_grow(dmap, new, nbits);
1095
1096
return -EAGAIN;
1097
}
1098
1099
/**
1100
* binder_get_ref_for_node_olocked() - get the ref associated with given node
1101
* @proc: binder_proc that owns the ref
1102
* @node: binder_node of target
1103
* @new_ref: newly allocated binder_ref to be initialized or %NULL
1104
*
1105
* Look up the ref for the given node and return it if it exists
1106
*
1107
* If it doesn't exist and the caller provides a newly allocated
1108
* ref, initialize the fields of the newly allocated ref and insert
1109
* into the given proc rb_trees and node refs list.
1110
*
1111
* Return: the ref for node. It is possible that another thread
1112
* allocated/initialized the ref first in which case the
1113
* returned ref would be different than the passed-in
1114
* new_ref. new_ref must be kfree'd by the caller in
1115
* this case.
1116
*/
1117
static struct binder_ref *binder_get_ref_for_node_olocked(
1118
struct binder_proc *proc,
1119
struct binder_node *node,
1120
struct binder_ref *new_ref)
1121
{
1122
struct binder_ref *ref;
1123
struct rb_node *parent;
1124
struct rb_node **p;
1125
u32 desc;
1126
1127
retry:
1128
p = &proc->refs_by_node.rb_node;
1129
parent = NULL;
1130
while (*p) {
1131
parent = *p;
1132
ref = rb_entry(parent, struct binder_ref, rb_node_node);
1133
1134
if (node < ref->node)
1135
p = &(*p)->rb_left;
1136
else if (node > ref->node)
1137
p = &(*p)->rb_right;
1138
else
1139
return ref;
1140
}
1141
if (!new_ref)
1142
return NULL;
1143
1144
/* might release the proc->outer_lock */
1145
if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1146
goto retry;
1147
1148
binder_stats_created(BINDER_STAT_REF);
1149
new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1150
new_ref->proc = proc;
1151
new_ref->node = node;
1152
rb_link_node(&new_ref->rb_node_node, parent, p);
1153
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1154
1155
new_ref->data.desc = desc;
1156
p = &proc->refs_by_desc.rb_node;
1157
while (*p) {
1158
parent = *p;
1159
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1160
1161
if (new_ref->data.desc < ref->data.desc)
1162
p = &(*p)->rb_left;
1163
else if (new_ref->data.desc > ref->data.desc)
1164
p = &(*p)->rb_right;
1165
else
1166
BUG();
1167
}
1168
rb_link_node(&new_ref->rb_node_desc, parent, p);
1169
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1170
1171
binder_node_lock(node);
1172
hlist_add_head(&new_ref->node_entry, &node->refs);
1173
1174
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1175
"%d new ref %d desc %d for node %d\n",
1176
proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1177
node->debug_id);
1178
binder_node_unlock(node);
1179
return new_ref;
1180
}
1181
1182
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1183
{
1184
struct dbitmap *dmap = &ref->proc->dmap;
1185
bool delete_node = false;
1186
1187
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1188
"%d delete ref %d desc %d for node %d\n",
1189
ref->proc->pid, ref->data.debug_id, ref->data.desc,
1190
ref->node->debug_id);
1191
1192
if (dbitmap_enabled(dmap))
1193
dbitmap_clear_bit(dmap, ref->data.desc);
1194
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1195
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1196
1197
binder_node_inner_lock(ref->node);
1198
if (ref->data.strong)
1199
binder_dec_node_nilocked(ref->node, 1, 1);
1200
1201
hlist_del(&ref->node_entry);
1202
delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1203
binder_node_inner_unlock(ref->node);
1204
/*
1205
* Clear ref->node unless we want the caller to free the node
1206
*/
1207
if (!delete_node) {
1208
/*
1209
* The caller uses ref->node to determine
1210
* whether the node needs to be freed. Clear
1211
* it since the node is still alive.
1212
*/
1213
ref->node = NULL;
1214
}
1215
1216
if (ref->death) {
1217
binder_debug(BINDER_DEBUG_DEAD_BINDER,
1218
"%d delete ref %d desc %d has death notification\n",
1219
ref->proc->pid, ref->data.debug_id,
1220
ref->data.desc);
1221
binder_dequeue_work(ref->proc, &ref->death->work);
1222
binder_stats_deleted(BINDER_STAT_DEATH);
1223
}
1224
1225
if (ref->freeze) {
1226
binder_dequeue_work(ref->proc, &ref->freeze->work);
1227
binder_stats_deleted(BINDER_STAT_FREEZE);
1228
}
1229
1230
binder_stats_deleted(BINDER_STAT_REF);
1231
}
1232
1233
/**
1234
* binder_inc_ref_olocked() - increment the ref for given handle
1235
* @ref: ref to be incremented
1236
* @strong: if true, strong increment, else weak
1237
* @target_list: list to queue node work on
1238
*
1239
* Increment the ref. @ref->proc->outer_lock must be held on entry
1240
*
1241
* Return: 0, if successful, else errno
1242
*/
1243
static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1244
struct list_head *target_list)
1245
{
1246
int ret;
1247
1248
if (strong) {
1249
if (ref->data.strong == 0) {
1250
ret = binder_inc_node(ref->node, 1, 1, target_list);
1251
if (ret)
1252
return ret;
1253
}
1254
ref->data.strong++;
1255
} else {
1256
if (ref->data.weak == 0) {
1257
ret = binder_inc_node(ref->node, 0, 1, target_list);
1258
if (ret)
1259
return ret;
1260
}
1261
ref->data.weak++;
1262
}
1263
return 0;
1264
}
1265
1266
/**
1267
* binder_dec_ref_olocked() - dec the ref for given handle
1268
* @ref: ref to be decremented
1269
* @strong: if true, strong decrement, else weak
1270
*
1271
* Decrement the ref.
1272
*
1273
* Return: %true if ref is cleaned up and ready to be freed.
1274
*/
1275
static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1276
{
1277
if (strong) {
1278
if (ref->data.strong == 0) {
1279
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1280
ref->proc->pid, ref->data.debug_id,
1281
ref->data.desc, ref->data.strong,
1282
ref->data.weak);
1283
return false;
1284
}
1285
ref->data.strong--;
1286
if (ref->data.strong == 0)
1287
binder_dec_node(ref->node, strong, 1);
1288
} else {
1289
if (ref->data.weak == 0) {
1290
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1291
ref->proc->pid, ref->data.debug_id,
1292
ref->data.desc, ref->data.strong,
1293
ref->data.weak);
1294
return false;
1295
}
1296
ref->data.weak--;
1297
}
1298
if (ref->data.strong == 0 && ref->data.weak == 0) {
1299
binder_cleanup_ref_olocked(ref);
1300
return true;
1301
}
1302
return false;
1303
}
1304
1305
/**
1306
* binder_get_node_from_ref() - get the node from the given proc/desc
1307
* @proc: proc containing the ref
1308
* @desc: the handle associated with the ref
1309
* @need_strong_ref: if true, only return node if ref is strong
1310
* @rdata: the id/refcount data for the ref
1311
*
1312
* Given a proc and ref handle, return the associated binder_node
1313
*
1314
* Return: a binder_node or NULL if not found or not strong when strong required
1315
*/
1316
static struct binder_node *binder_get_node_from_ref(
1317
struct binder_proc *proc,
1318
u32 desc, bool need_strong_ref,
1319
struct binder_ref_data *rdata)
1320
{
1321
struct binder_node *node;
1322
struct binder_ref *ref;
1323
1324
binder_proc_lock(proc);
1325
ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1326
if (!ref)
1327
goto err_no_ref;
1328
node = ref->node;
1329
/*
1330
* Take an implicit reference on the node to ensure
1331
* it stays alive until the call to binder_put_node()
1332
*/
1333
binder_inc_node_tmpref(node);
1334
if (rdata)
1335
*rdata = ref->data;
1336
binder_proc_unlock(proc);
1337
1338
return node;
1339
1340
err_no_ref:
1341
binder_proc_unlock(proc);
1342
return NULL;
1343
}
1344
1345
/**
1346
* binder_free_ref() - free the binder_ref
1347
* @ref: ref to free
1348
*
1349
* Free the binder_ref. Free the binder_node indicated by ref->node
1350
* (if non-NULL) and the binder_ref_death indicated by ref->death.
1351
*/
1352
static void binder_free_ref(struct binder_ref *ref)
1353
{
1354
if (ref->node)
1355
binder_free_node(ref->node);
1356
kfree(ref->death);
1357
kfree(ref->freeze);
1358
kfree(ref);
1359
}
1360
1361
/* shrink descriptor bitmap if needed */
1362
static void try_shrink_dmap(struct binder_proc *proc)
1363
{
1364
unsigned long *new;
1365
int nbits;
1366
1367
binder_proc_lock(proc);
1368
nbits = dbitmap_shrink_nbits(&proc->dmap);
1369
binder_proc_unlock(proc);
1370
1371
if (!nbits)
1372
return;
1373
1374
new = bitmap_zalloc(nbits, GFP_KERNEL);
1375
binder_proc_lock(proc);
1376
dbitmap_shrink(&proc->dmap, new, nbits);
1377
binder_proc_unlock(proc);
1378
}
1379
1380
/**
1381
* binder_update_ref_for_handle() - inc/dec the ref for given handle
1382
* @proc: proc containing the ref
1383
* @desc: the handle associated with the ref
1384
* @increment: true=inc reference, false=dec reference
1385
* @strong: true=strong reference, false=weak reference
1386
* @rdata: the id/refcount data for the ref
1387
*
1388
* Given a proc and ref handle, increment or decrement the ref
1389
* according to "increment" arg.
1390
*
1391
* Return: 0 if successful, else errno
1392
*/
1393
static int binder_update_ref_for_handle(struct binder_proc *proc,
1394
uint32_t desc, bool increment, bool strong,
1395
struct binder_ref_data *rdata)
1396
{
1397
int ret = 0;
1398
struct binder_ref *ref;
1399
bool delete_ref = false;
1400
1401
binder_proc_lock(proc);
1402
ref = binder_get_ref_olocked(proc, desc, strong);
1403
if (!ref) {
1404
ret = -EINVAL;
1405
goto err_no_ref;
1406
}
1407
if (increment)
1408
ret = binder_inc_ref_olocked(ref, strong, NULL);
1409
else
1410
delete_ref = binder_dec_ref_olocked(ref, strong);
1411
1412
if (rdata)
1413
*rdata = ref->data;
1414
binder_proc_unlock(proc);
1415
1416
if (delete_ref) {
1417
binder_free_ref(ref);
1418
try_shrink_dmap(proc);
1419
}
1420
return ret;
1421
1422
err_no_ref:
1423
binder_proc_unlock(proc);
1424
return ret;
1425
}
1426
1427
/**
1428
* binder_dec_ref_for_handle() - dec the ref for given handle
1429
* @proc: proc containing the ref
1430
* @desc: the handle associated with the ref
1431
* @strong: true=strong reference, false=weak reference
1432
* @rdata: the id/refcount data for the ref
1433
*
1434
* Just calls binder_update_ref_for_handle() to decrement the ref.
1435
*
1436
* Return: 0 if successful, else errno
1437
*/
1438
static int binder_dec_ref_for_handle(struct binder_proc *proc,
1439
uint32_t desc, bool strong, struct binder_ref_data *rdata)
1440
{
1441
return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1442
}
1443
1444
1445
/**
1446
* binder_inc_ref_for_node() - increment the ref for given proc/node
1447
* @proc: proc containing the ref
1448
* @node: target node
1449
* @strong: true=strong reference, false=weak reference
1450
* @target_list: worklist to use if node is incremented
1451
* @rdata: the id/refcount data for the ref
1452
*
1453
* Given a proc and node, increment the ref. Create the ref if it
1454
* doesn't already exist
1455
*
1456
* Return: 0 if successful, else errno
1457
*/
1458
static int binder_inc_ref_for_node(struct binder_proc *proc,
1459
struct binder_node *node,
1460
bool strong,
1461
struct list_head *target_list,
1462
struct binder_ref_data *rdata)
1463
{
1464
struct binder_ref *ref;
1465
struct binder_ref *new_ref = NULL;
1466
int ret = 0;
1467
1468
binder_proc_lock(proc);
1469
ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1470
if (!ref) {
1471
binder_proc_unlock(proc);
1472
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1473
if (!new_ref)
1474
return -ENOMEM;
1475
binder_proc_lock(proc);
1476
ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1477
}
1478
ret = binder_inc_ref_olocked(ref, strong, target_list);
1479
*rdata = ref->data;
1480
if (ret && ref == new_ref) {
1481
/*
1482
* Cleanup the failed reference here as the target
1483
* could now be dead and have already released its
1484
* references by now. Calling on the new reference
1485
* with strong=0 and a tmp_refs will not decrement
1486
* the node. The new_ref gets kfree'd below.
1487
*/
1488
binder_cleanup_ref_olocked(new_ref);
1489
ref = NULL;
1490
}
1491
1492
binder_proc_unlock(proc);
1493
if (new_ref && ref != new_ref)
1494
/*
1495
* Another thread created the ref first so
1496
* free the one we allocated
1497
*/
1498
kfree(new_ref);
1499
return ret;
1500
}
1501
1502
static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1503
struct binder_transaction *t)
1504
{
1505
BUG_ON(!target_thread);
1506
assert_spin_locked(&target_thread->proc->inner_lock);
1507
BUG_ON(target_thread->transaction_stack != t);
1508
BUG_ON(target_thread->transaction_stack->from != target_thread);
1509
target_thread->transaction_stack =
1510
target_thread->transaction_stack->from_parent;
1511
t->from = NULL;
1512
}
1513
1514
/**
1515
* binder_thread_dec_tmpref() - decrement thread->tmp_ref
1516
* @thread: thread to decrement
1517
*
1518
* A thread needs to be kept alive while being used to create or
1519
* handle a transaction. binder_get_txn_from() is used to safely
1520
* extract t->from from a binder_transaction and keep the thread
1521
* indicated by t->from from being freed. When done with that
1522
* binder_thread, this function is called to decrement the
1523
* tmp_ref and free if appropriate (thread has been released
1524
* and no transaction being processed by the driver)
1525
*/
1526
static void binder_thread_dec_tmpref(struct binder_thread *thread)
1527
{
1528
/*
1529
* atomic is used to protect the counter value while
1530
* it cannot reach zero or thread->is_dead is false
1531
*/
1532
binder_inner_proc_lock(thread->proc);
1533
atomic_dec(&thread->tmp_ref);
1534
if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1535
binder_inner_proc_unlock(thread->proc);
1536
binder_free_thread(thread);
1537
return;
1538
}
1539
binder_inner_proc_unlock(thread->proc);
1540
}
1541
1542
/**
1543
* binder_proc_dec_tmpref() - decrement proc->tmp_ref
1544
* @proc: proc to decrement
1545
*
1546
* A binder_proc needs to be kept alive while being used to create or
1547
* handle a transaction. proc->tmp_ref is incremented when
1548
* creating a new transaction or the binder_proc is currently in-use
1549
* by threads that are being released. When done with the binder_proc,
1550
* this function is called to decrement the counter and free the
1551
* proc if appropriate (proc has been released, all threads have
1552
* been released and not currently in-use to process a transaction).
1553
*/
1554
static void binder_proc_dec_tmpref(struct binder_proc *proc)
1555
{
1556
binder_inner_proc_lock(proc);
1557
proc->tmp_ref--;
1558
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1559
!proc->tmp_ref) {
1560
binder_inner_proc_unlock(proc);
1561
binder_free_proc(proc);
1562
return;
1563
}
1564
binder_inner_proc_unlock(proc);
1565
}
1566
1567
/**
1568
* binder_get_txn_from() - safely extract the "from" thread in transaction
1569
* @t: binder transaction for t->from
1570
*
1571
* Atomically return the "from" thread and increment the tmp_ref
1572
* count for the thread to ensure it stays alive until
1573
* binder_thread_dec_tmpref() is called.
1574
*
1575
* Return: the value of t->from
1576
*/
1577
static struct binder_thread *binder_get_txn_from(
1578
struct binder_transaction *t)
1579
{
1580
struct binder_thread *from;
1581
1582
guard(spinlock)(&t->lock);
1583
from = t->from;
1584
if (from)
1585
atomic_inc(&from->tmp_ref);
1586
return from;
1587
}
1588
1589
/**
1590
* binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1591
* @t: binder transaction for t->from
1592
*
1593
* Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1594
* to guarantee that the thread cannot be released while operating on it.
1595
* The caller must call binder_inner_proc_unlock() to release the inner lock
1596
* as well as call binder_dec_thread_txn() to release the reference.
1597
*
1598
* Return: the value of t->from
1599
*/
1600
static struct binder_thread *binder_get_txn_from_and_acq_inner(
1601
struct binder_transaction *t)
1602
__acquires(&t->from->proc->inner_lock)
1603
{
1604
struct binder_thread *from;
1605
1606
from = binder_get_txn_from(t);
1607
if (!from) {
1608
__acquire(&from->proc->inner_lock);
1609
return NULL;
1610
}
1611
binder_inner_proc_lock(from->proc);
1612
if (t->from) {
1613
BUG_ON(from != t->from);
1614
return from;
1615
}
1616
binder_inner_proc_unlock(from->proc);
1617
__acquire(&from->proc->inner_lock);
1618
binder_thread_dec_tmpref(from);
1619
return NULL;
1620
}
1621
1622
/**
1623
* binder_free_txn_fixups() - free unprocessed fd fixups
1624
* @t: binder transaction for t->from
1625
*
1626
* If the transaction is being torn down prior to being
1627
* processed by the target process, free all of the
1628
* fd fixups and fput the file structs. It is safe to
1629
* call this function after the fixups have been
1630
* processed -- in that case, the list will be empty.
1631
*/
1632
static void binder_free_txn_fixups(struct binder_transaction *t)
1633
{
1634
struct binder_txn_fd_fixup *fixup, *tmp;
1635
1636
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1637
fput(fixup->file);
1638
if (fixup->target_fd >= 0)
1639
put_unused_fd(fixup->target_fd);
1640
list_del(&fixup->fixup_entry);
1641
kfree(fixup);
1642
}
1643
}
1644
1645
static void binder_txn_latency_free(struct binder_transaction *t)
1646
{
1647
int from_proc, from_thread, to_proc, to_thread;
1648
1649
spin_lock(&t->lock);
1650
from_proc = t->from ? t->from->proc->pid : 0;
1651
from_thread = t->from ? t->from->pid : 0;
1652
to_proc = t->to_proc ? t->to_proc->pid : 0;
1653
to_thread = t->to_thread ? t->to_thread->pid : 0;
1654
spin_unlock(&t->lock);
1655
1656
trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1657
}
1658
1659
static void binder_free_transaction(struct binder_transaction *t)
1660
{
1661
struct binder_proc *target_proc = t->to_proc;
1662
1663
if (target_proc) {
1664
binder_inner_proc_lock(target_proc);
1665
target_proc->outstanding_txns--;
1666
if (target_proc->outstanding_txns < 0)
1667
pr_warn("%s: Unexpected outstanding_txns %d\n",
1668
__func__, target_proc->outstanding_txns);
1669
if (!target_proc->outstanding_txns && target_proc->is_frozen)
1670
wake_up_interruptible_all(&target_proc->freeze_wait);
1671
if (t->buffer)
1672
t->buffer->transaction = NULL;
1673
binder_inner_proc_unlock(target_proc);
1674
}
1675
if (trace_binder_txn_latency_free_enabled())
1676
binder_txn_latency_free(t);
1677
/*
1678
* If the transaction has no target_proc, then
1679
* t->buffer->transaction has already been cleared.
1680
*/
1681
binder_free_txn_fixups(t);
1682
kfree(t);
1683
binder_stats_deleted(BINDER_STAT_TRANSACTION);
1684
}
1685
1686
static void binder_send_failed_reply(struct binder_transaction *t,
1687
uint32_t error_code)
1688
{
1689
struct binder_thread *target_thread;
1690
struct binder_transaction *next;
1691
1692
BUG_ON(t->flags & TF_ONE_WAY);
1693
while (1) {
1694
target_thread = binder_get_txn_from_and_acq_inner(t);
1695
if (target_thread) {
1696
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1697
"send failed reply for transaction %d to %d:%d\n",
1698
t->debug_id,
1699
target_thread->proc->pid,
1700
target_thread->pid);
1701
1702
binder_pop_transaction_ilocked(target_thread, t);
1703
if (target_thread->reply_error.cmd == BR_OK) {
1704
target_thread->reply_error.cmd = error_code;
1705
binder_enqueue_thread_work_ilocked(
1706
target_thread,
1707
&target_thread->reply_error.work);
1708
wake_up_interruptible(&target_thread->wait);
1709
} else {
1710
/*
1711
* Cannot get here for normal operation, but
1712
* we can if multiple synchronous transactions
1713
* are sent without blocking for responses.
1714
* Just ignore the 2nd error in this case.
1715
*/
1716
pr_warn("Unexpected reply error: %u\n",
1717
target_thread->reply_error.cmd);
1718
}
1719
binder_inner_proc_unlock(target_thread->proc);
1720
binder_thread_dec_tmpref(target_thread);
1721
binder_free_transaction(t);
1722
return;
1723
}
1724
__release(&target_thread->proc->inner_lock);
1725
next = t->from_parent;
1726
1727
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1728
"send failed reply for transaction %d, target dead\n",
1729
t->debug_id);
1730
1731
binder_free_transaction(t);
1732
if (next == NULL) {
1733
binder_debug(BINDER_DEBUG_DEAD_BINDER,
1734
"reply failed, no target thread at root\n");
1735
return;
1736
}
1737
t = next;
1738
binder_debug(BINDER_DEBUG_DEAD_BINDER,
1739
"reply failed, no target thread -- retry %d\n",
1740
t->debug_id);
1741
}
1742
}
1743
1744
/**
1745
* binder_cleanup_transaction() - cleans up undelivered transaction
1746
* @t: transaction that needs to be cleaned up
1747
* @reason: reason the transaction wasn't delivered
1748
* @error_code: error to return to caller (if synchronous call)
1749
*/
1750
static void binder_cleanup_transaction(struct binder_transaction *t,
1751
const char *reason,
1752
uint32_t error_code)
1753
{
1754
if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1755
binder_send_failed_reply(t, error_code);
1756
} else {
1757
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1758
"undelivered transaction %d, %s\n",
1759
t->debug_id, reason);
1760
binder_free_transaction(t);
1761
}
1762
}
1763
1764
/**
1765
* binder_get_object() - gets object and checks for valid metadata
1766
* @proc: binder_proc owning the buffer
1767
* @u: sender's user pointer to base of buffer
1768
* @buffer: binder_buffer that we're parsing.
1769
* @offset: offset in the @buffer at which to validate an object.
1770
* @object: struct binder_object to read into
1771
*
1772
* Copy the binder object at the given offset into @object. If @u is
1773
* provided then the copy is from the sender's buffer. If not, then
1774
* it is copied from the target's @buffer.
1775
*
1776
* Return: If there's a valid metadata object at @offset, the
1777
* size of that object. Otherwise, it returns zero. The object
1778
* is read into the struct binder_object pointed to by @object.
1779
*/
1780
static size_t binder_get_object(struct binder_proc *proc,
1781
const void __user *u,
1782
struct binder_buffer *buffer,
1783
unsigned long offset,
1784
struct binder_object *object)
1785
{
1786
size_t read_size;
1787
struct binder_object_header *hdr;
1788
size_t object_size = 0;
1789
1790
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1791
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1792
!IS_ALIGNED(offset, sizeof(u32)))
1793
return 0;
1794
1795
if (u) {
1796
if (copy_from_user(object, u + offset, read_size))
1797
return 0;
1798
} else {
1799
if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1800
offset, read_size))
1801
return 0;
1802
}
1803
1804
/* Ok, now see if we read a complete object. */
1805
hdr = &object->hdr;
1806
switch (hdr->type) {
1807
case BINDER_TYPE_BINDER:
1808
case BINDER_TYPE_WEAK_BINDER:
1809
case BINDER_TYPE_HANDLE:
1810
case BINDER_TYPE_WEAK_HANDLE:
1811
object_size = sizeof(struct flat_binder_object);
1812
break;
1813
case BINDER_TYPE_FD:
1814
object_size = sizeof(struct binder_fd_object);
1815
break;
1816
case BINDER_TYPE_PTR:
1817
object_size = sizeof(struct binder_buffer_object);
1818
break;
1819
case BINDER_TYPE_FDA:
1820
object_size = sizeof(struct binder_fd_array_object);
1821
break;
1822
default:
1823
return 0;
1824
}
1825
if (offset <= buffer->data_size - object_size &&
1826
buffer->data_size >= object_size)
1827
return object_size;
1828
else
1829
return 0;
1830
}
1831
1832
/**
1833
* binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1834
* @proc: binder_proc owning the buffer
1835
* @b: binder_buffer containing the object
1836
* @object: struct binder_object to read into
1837
* @index: index in offset array at which the binder_buffer_object is
1838
* located
1839
* @start_offset: points to the start of the offset array
1840
* @object_offsetp: offset of @object read from @b
1841
* @num_valid: the number of valid offsets in the offset array
1842
*
1843
* Return: If @index is within the valid range of the offset array
1844
* described by @start and @num_valid, and if there's a valid
1845
* binder_buffer_object at the offset found in index @index
1846
* of the offset array, that object is returned. Otherwise,
1847
* %NULL is returned.
1848
* Note that the offset found in index @index itself is not
1849
* verified; this function assumes that @num_valid elements
1850
* from @start were previously verified to have valid offsets.
1851
* If @object_offsetp is non-NULL, then the offset within
1852
* @b is written to it.
1853
*/
1854
static struct binder_buffer_object *binder_validate_ptr(
1855
struct binder_proc *proc,
1856
struct binder_buffer *b,
1857
struct binder_object *object,
1858
binder_size_t index,
1859
binder_size_t start_offset,
1860
binder_size_t *object_offsetp,
1861
binder_size_t num_valid)
1862
{
1863
size_t object_size;
1864
binder_size_t object_offset;
1865
unsigned long buffer_offset;
1866
1867
if (index >= num_valid)
1868
return NULL;
1869
1870
buffer_offset = start_offset + sizeof(binder_size_t) * index;
1871
if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1872
b, buffer_offset,
1873
sizeof(object_offset)))
1874
return NULL;
1875
object_size = binder_get_object(proc, NULL, b, object_offset, object);
1876
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1877
return NULL;
1878
if (object_offsetp)
1879
*object_offsetp = object_offset;
1880
1881
return &object->bbo;
1882
}
1883
1884
/**
1885
* binder_validate_fixup() - validates pointer/fd fixups happen in order.
1886
* @proc: binder_proc owning the buffer
1887
* @b: transaction buffer
1888
* @objects_start_offset: offset to start of objects buffer
1889
* @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1890
* @fixup_offset: start offset in @buffer to fix up
1891
* @last_obj_offset: offset to last binder_buffer_object that we fixed
1892
* @last_min_offset: minimum fixup offset in object at @last_obj_offset
1893
*
1894
* Return: %true if a fixup in buffer @buffer at offset @offset is
1895
* allowed.
1896
*
1897
* For safety reasons, we only allow fixups inside a buffer to happen
1898
* at increasing offsets; additionally, we only allow fixup on the last
1899
* buffer object that was verified, or one of its parents.
1900
*
1901
* Example of what is allowed:
1902
*
1903
* A
1904
* B (parent = A, offset = 0)
1905
* C (parent = A, offset = 16)
1906
* D (parent = C, offset = 0)
1907
* E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1908
*
1909
* Examples of what is not allowed:
1910
*
1911
* Decreasing offsets within the same parent:
1912
* A
1913
* C (parent = A, offset = 16)
1914
* B (parent = A, offset = 0) // decreasing offset within A
1915
*
1916
* Referring to a parent that wasn't the last object or any of its parents:
1917
* A
1918
* B (parent = A, offset = 0)
1919
* C (parent = A, offset = 0)
1920
* C (parent = A, offset = 16)
1921
* D (parent = B, offset = 0) // B is not A or any of A's parents
1922
*/
1923
static bool binder_validate_fixup(struct binder_proc *proc,
1924
struct binder_buffer *b,
1925
binder_size_t objects_start_offset,
1926
binder_size_t buffer_obj_offset,
1927
binder_size_t fixup_offset,
1928
binder_size_t last_obj_offset,
1929
binder_size_t last_min_offset)
1930
{
1931
if (!last_obj_offset) {
1932
/* Nothing to fix up in */
1933
return false;
1934
}
1935
1936
while (last_obj_offset != buffer_obj_offset) {
1937
unsigned long buffer_offset;
1938
struct binder_object last_object;
1939
struct binder_buffer_object *last_bbo;
1940
size_t object_size = binder_get_object(proc, NULL, b,
1941
last_obj_offset,
1942
&last_object);
1943
if (object_size != sizeof(*last_bbo))
1944
return false;
1945
1946
last_bbo = &last_object.bbo;
1947
/*
1948
* Safe to retrieve the parent of last_obj, since it
1949
* was already previously verified by the driver.
1950
*/
1951
if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1952
return false;
1953
last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1954
buffer_offset = objects_start_offset +
1955
sizeof(binder_size_t) * last_bbo->parent;
1956
if (binder_alloc_copy_from_buffer(&proc->alloc,
1957
&last_obj_offset,
1958
b, buffer_offset,
1959
sizeof(last_obj_offset)))
1960
return false;
1961
}
1962
return (fixup_offset >= last_min_offset);
1963
}
1964
1965
/**
1966
* struct binder_task_work_cb - for deferred close
1967
*
1968
* @twork: callback_head for task work
1969
* @file: file to close
1970
*
1971
* Structure to pass task work to be handled after
1972
* returning from binder_ioctl() via task_work_add().
1973
*/
1974
struct binder_task_work_cb {
1975
struct callback_head twork;
1976
struct file *file;
1977
};
1978
1979
/**
1980
* binder_do_fd_close() - close list of file descriptors
1981
* @twork: callback head for task work
1982
*
1983
* It is not safe to call ksys_close() during the binder_ioctl()
1984
* function if there is a chance that binder's own file descriptor
1985
* might be closed. This is to meet the requirements for using
1986
* fdget() (see comments for __fget_light()). Therefore use
1987
* task_work_add() to schedule the close operation once we have
1988
* returned from binder_ioctl(). This function is a callback
1989
* for that mechanism and does the actual ksys_close() on the
1990
* given file descriptor.
1991
*/
1992
static void binder_do_fd_close(struct callback_head *twork)
1993
{
1994
struct binder_task_work_cb *twcb = container_of(twork,
1995
struct binder_task_work_cb, twork);
1996
1997
fput(twcb->file);
1998
kfree(twcb);
1999
}
2000
2001
/**
2002
* binder_deferred_fd_close() - schedule a close for the given file-descriptor
2003
* @fd: file-descriptor to close
2004
*
2005
* See comments in binder_do_fd_close(). This function is used to schedule
2006
* a file-descriptor to be closed after returning from binder_ioctl().
2007
*/
2008
static void binder_deferred_fd_close(int fd)
2009
{
2010
struct binder_task_work_cb *twcb;
2011
2012
twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2013
if (!twcb)
2014
return;
2015
init_task_work(&twcb->twork, binder_do_fd_close);
2016
twcb->file = file_close_fd(fd);
2017
if (twcb->file) {
2018
// pin it until binder_do_fd_close(); see comments there
2019
get_file(twcb->file);
2020
filp_close(twcb->file, current->files);
2021
task_work_add(current, &twcb->twork, TWA_RESUME);
2022
} else {
2023
kfree(twcb);
2024
}
2025
}
2026
2027
static void binder_transaction_buffer_release(struct binder_proc *proc,
2028
struct binder_thread *thread,
2029
struct binder_buffer *buffer,
2030
binder_size_t off_end_offset,
2031
bool is_failure)
2032
{
2033
int debug_id = buffer->debug_id;
2034
binder_size_t off_start_offset, buffer_offset;
2035
2036
binder_debug(BINDER_DEBUG_TRANSACTION,
2037
"%d buffer release %d, size %zd-%zd, failed at %llx\n",
2038
proc->pid, buffer->debug_id,
2039
buffer->data_size, buffer->offsets_size,
2040
(unsigned long long)off_end_offset);
2041
2042
if (buffer->target_node)
2043
binder_dec_node(buffer->target_node, 1, 0);
2044
2045
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2046
2047
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2048
buffer_offset += sizeof(binder_size_t)) {
2049
struct binder_object_header *hdr;
2050
size_t object_size = 0;
2051
struct binder_object object;
2052
binder_size_t object_offset;
2053
2054
if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2055
buffer, buffer_offset,
2056
sizeof(object_offset)))
2057
object_size = binder_get_object(proc, NULL, buffer,
2058
object_offset, &object);
2059
if (object_size == 0) {
2060
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2061
debug_id, (u64)object_offset, buffer->data_size);
2062
continue;
2063
}
2064
hdr = &object.hdr;
2065
switch (hdr->type) {
2066
case BINDER_TYPE_BINDER:
2067
case BINDER_TYPE_WEAK_BINDER: {
2068
struct flat_binder_object *fp;
2069
struct binder_node *node;
2070
2071
fp = to_flat_binder_object(hdr);
2072
node = binder_get_node(proc, fp->binder);
2073
if (node == NULL) {
2074
pr_err("transaction release %d bad node %016llx\n",
2075
debug_id, (u64)fp->binder);
2076
break;
2077
}
2078
binder_debug(BINDER_DEBUG_TRANSACTION,
2079
" node %d u%016llx\n",
2080
node->debug_id, (u64)node->ptr);
2081
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2082
0);
2083
binder_put_node(node);
2084
} break;
2085
case BINDER_TYPE_HANDLE:
2086
case BINDER_TYPE_WEAK_HANDLE: {
2087
struct flat_binder_object *fp;
2088
struct binder_ref_data rdata;
2089
int ret;
2090
2091
fp = to_flat_binder_object(hdr);
2092
ret = binder_dec_ref_for_handle(proc, fp->handle,
2093
hdr->type == BINDER_TYPE_HANDLE, &rdata);
2094
2095
if (ret) {
2096
pr_err("transaction release %d bad handle %d, ret = %d\n",
2097
debug_id, fp->handle, ret);
2098
break;
2099
}
2100
binder_debug(BINDER_DEBUG_TRANSACTION,
2101
" ref %d desc %d\n",
2102
rdata.debug_id, rdata.desc);
2103
} break;
2104
2105
case BINDER_TYPE_FD: {
2106
/*
2107
* No need to close the file here since user-space
2108
* closes it for successfully delivered
2109
* transactions. For transactions that weren't
2110
* delivered, the new fd was never allocated so
2111
* there is no need to close and the fput on the
2112
* file is done when the transaction is torn
2113
* down.
2114
*/
2115
} break;
2116
case BINDER_TYPE_PTR:
2117
/*
2118
* Nothing to do here, this will get cleaned up when the
2119
* transaction buffer gets freed
2120
*/
2121
break;
2122
case BINDER_TYPE_FDA: {
2123
struct binder_fd_array_object *fda;
2124
struct binder_buffer_object *parent;
2125
struct binder_object ptr_object;
2126
binder_size_t fda_offset;
2127
size_t fd_index;
2128
binder_size_t fd_buf_size;
2129
binder_size_t num_valid;
2130
2131
if (is_failure) {
2132
/*
2133
* The fd fixups have not been applied so no
2134
* fds need to be closed.
2135
*/
2136
continue;
2137
}
2138
2139
num_valid = (buffer_offset - off_start_offset) /
2140
sizeof(binder_size_t);
2141
fda = to_binder_fd_array_object(hdr);
2142
parent = binder_validate_ptr(proc, buffer, &ptr_object,
2143
fda->parent,
2144
off_start_offset,
2145
NULL,
2146
num_valid);
2147
if (!parent) {
2148
pr_err("transaction release %d bad parent offset\n",
2149
debug_id);
2150
continue;
2151
}
2152
fd_buf_size = sizeof(u32) * fda->num_fds;
2153
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2154
pr_err("transaction release %d invalid number of fds (%lld)\n",
2155
debug_id, (u64)fda->num_fds);
2156
continue;
2157
}
2158
if (fd_buf_size > parent->length ||
2159
fda->parent_offset > parent->length - fd_buf_size) {
2160
/* No space for all file descriptors here. */
2161
pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2162
debug_id, (u64)fda->num_fds);
2163
continue;
2164
}
2165
/*
2166
* the source data for binder_buffer_object is visible
2167
* to user-space and the @buffer element is the user
2168
* pointer to the buffer_object containing the fd_array.
2169
* Convert the address to an offset relative to
2170
* the base of the transaction buffer.
2171
*/
2172
fda_offset = parent->buffer - buffer->user_data +
2173
fda->parent_offset;
2174
for (fd_index = 0; fd_index < fda->num_fds;
2175
fd_index++) {
2176
u32 fd;
2177
int err;
2178
binder_size_t offset = fda_offset +
2179
fd_index * sizeof(fd);
2180
2181
err = binder_alloc_copy_from_buffer(
2182
&proc->alloc, &fd, buffer,
2183
offset, sizeof(fd));
2184
WARN_ON(err);
2185
if (!err) {
2186
binder_deferred_fd_close(fd);
2187
/*
2188
* Need to make sure the thread goes
2189
* back to userspace to complete the
2190
* deferred close
2191
*/
2192
if (thread)
2193
thread->looper_need_return = true;
2194
}
2195
}
2196
} break;
2197
default:
2198
pr_err("transaction release %d bad object type %x\n",
2199
debug_id, hdr->type);
2200
break;
2201
}
2202
}
2203
}
2204
2205
/* Clean up all the objects in the buffer */
2206
static inline void binder_release_entire_buffer(struct binder_proc *proc,
2207
struct binder_thread *thread,
2208
struct binder_buffer *buffer,
2209
bool is_failure)
2210
{
2211
binder_size_t off_end_offset;
2212
2213
off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2214
off_end_offset += buffer->offsets_size;
2215
2216
binder_transaction_buffer_release(proc, thread, buffer,
2217
off_end_offset, is_failure);
2218
}
2219
2220
static int binder_translate_binder(struct flat_binder_object *fp,
2221
struct binder_transaction *t,
2222
struct binder_thread *thread)
2223
{
2224
struct binder_node *node;
2225
struct binder_proc *proc = thread->proc;
2226
struct binder_proc *target_proc = t->to_proc;
2227
struct binder_ref_data rdata;
2228
int ret = 0;
2229
2230
node = binder_get_node(proc, fp->binder);
2231
if (!node) {
2232
node = binder_new_node(proc, fp);
2233
if (!node)
2234
return -ENOMEM;
2235
}
2236
if (fp->cookie != node->cookie) {
2237
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2238
proc->pid, thread->pid, (u64)fp->binder,
2239
node->debug_id, (u64)fp->cookie,
2240
(u64)node->cookie);
2241
ret = -EINVAL;
2242
goto done;
2243
}
2244
if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2245
ret = -EPERM;
2246
goto done;
2247
}
2248
2249
ret = binder_inc_ref_for_node(target_proc, node,
2250
fp->hdr.type == BINDER_TYPE_BINDER,
2251
&thread->todo, &rdata);
2252
if (ret)
2253
goto done;
2254
2255
if (fp->hdr.type == BINDER_TYPE_BINDER)
2256
fp->hdr.type = BINDER_TYPE_HANDLE;
2257
else
2258
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2259
fp->binder = 0;
2260
fp->handle = rdata.desc;
2261
fp->cookie = 0;
2262
2263
trace_binder_transaction_node_to_ref(t, node, &rdata);
2264
binder_debug(BINDER_DEBUG_TRANSACTION,
2265
" node %d u%016llx -> ref %d desc %d\n",
2266
node->debug_id, (u64)node->ptr,
2267
rdata.debug_id, rdata.desc);
2268
done:
2269
binder_put_node(node);
2270
return ret;
2271
}
2272
2273
static int binder_translate_handle(struct flat_binder_object *fp,
2274
struct binder_transaction *t,
2275
struct binder_thread *thread)
2276
{
2277
struct binder_proc *proc = thread->proc;
2278
struct binder_proc *target_proc = t->to_proc;
2279
struct binder_node *node;
2280
struct binder_ref_data src_rdata;
2281
int ret = 0;
2282
2283
node = binder_get_node_from_ref(proc, fp->handle,
2284
fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2285
if (!node) {
2286
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2287
proc->pid, thread->pid, fp->handle);
2288
return -EINVAL;
2289
}
2290
if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2291
ret = -EPERM;
2292
goto done;
2293
}
2294
2295
binder_node_lock(node);
2296
if (node->proc == target_proc) {
2297
if (fp->hdr.type == BINDER_TYPE_HANDLE)
2298
fp->hdr.type = BINDER_TYPE_BINDER;
2299
else
2300
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2301
fp->binder = node->ptr;
2302
fp->cookie = node->cookie;
2303
if (node->proc)
2304
binder_inner_proc_lock(node->proc);
2305
else
2306
__acquire(&node->proc->inner_lock);
2307
binder_inc_node_nilocked(node,
2308
fp->hdr.type == BINDER_TYPE_BINDER,
2309
0, NULL);
2310
if (node->proc)
2311
binder_inner_proc_unlock(node->proc);
2312
else
2313
__release(&node->proc->inner_lock);
2314
trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2315
binder_debug(BINDER_DEBUG_TRANSACTION,
2316
" ref %d desc %d -> node %d u%016llx\n",
2317
src_rdata.debug_id, src_rdata.desc, node->debug_id,
2318
(u64)node->ptr);
2319
binder_node_unlock(node);
2320
} else {
2321
struct binder_ref_data dest_rdata;
2322
2323
binder_node_unlock(node);
2324
ret = binder_inc_ref_for_node(target_proc, node,
2325
fp->hdr.type == BINDER_TYPE_HANDLE,
2326
NULL, &dest_rdata);
2327
if (ret)
2328
goto done;
2329
2330
fp->binder = 0;
2331
fp->handle = dest_rdata.desc;
2332
fp->cookie = 0;
2333
trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2334
&dest_rdata);
2335
binder_debug(BINDER_DEBUG_TRANSACTION,
2336
" ref %d desc %d -> ref %d desc %d (node %d)\n",
2337
src_rdata.debug_id, src_rdata.desc,
2338
dest_rdata.debug_id, dest_rdata.desc,
2339
node->debug_id);
2340
}
2341
done:
2342
binder_put_node(node);
2343
return ret;
2344
}
2345
2346
static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2347
struct binder_transaction *t,
2348
struct binder_thread *thread,
2349
struct binder_transaction *in_reply_to)
2350
{
2351
struct binder_proc *proc = thread->proc;
2352
struct binder_proc *target_proc = t->to_proc;
2353
struct binder_txn_fd_fixup *fixup;
2354
struct file *file;
2355
int ret = 0;
2356
bool target_allows_fd;
2357
2358
if (in_reply_to)
2359
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2360
else
2361
target_allows_fd = t->buffer->target_node->accept_fds;
2362
if (!target_allows_fd) {
2363
binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2364
proc->pid, thread->pid,
2365
in_reply_to ? "reply" : "transaction",
2366
fd);
2367
ret = -EPERM;
2368
goto err_fd_not_accepted;
2369
}
2370
2371
file = fget(fd);
2372
if (!file) {
2373
binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2374
proc->pid, thread->pid, fd);
2375
ret = -EBADF;
2376
goto err_fget;
2377
}
2378
ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2379
if (ret < 0) {
2380
ret = -EPERM;
2381
goto err_security;
2382
}
2383
2384
/*
2385
* Add fixup record for this transaction. The allocation
2386
* of the fd in the target needs to be done from a
2387
* target thread.
2388
*/
2389
fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2390
if (!fixup) {
2391
ret = -ENOMEM;
2392
goto err_alloc;
2393
}
2394
fixup->file = file;
2395
fixup->offset = fd_offset;
2396
fixup->target_fd = -1;
2397
trace_binder_transaction_fd_send(t, fd, fixup->offset);
2398
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2399
2400
return ret;
2401
2402
err_alloc:
2403
err_security:
2404
fput(file);
2405
err_fget:
2406
err_fd_not_accepted:
2407
return ret;
2408
}
2409
2410
/**
2411
* struct binder_ptr_fixup - data to be fixed-up in target buffer
2412
* @offset: offset in target buffer to fixup
2413
* @skip_size: bytes to skip in copy (fixup will be written later)
2414
* @fixup_data: data to write at fixup offset
2415
* @node: list node
2416
*
2417
* This is used for the pointer fixup list (pf) which is created and consumed
2418
* during binder_transaction() and is only accessed locally. No
2419
* locking is necessary.
2420
*
2421
* The list is ordered by @offset.
2422
*/
2423
struct binder_ptr_fixup {
2424
binder_size_t offset;
2425
size_t skip_size;
2426
binder_uintptr_t fixup_data;
2427
struct list_head node;
2428
};
2429
2430
/**
2431
* struct binder_sg_copy - scatter-gather data to be copied
2432
* @offset: offset in target buffer
2433
* @sender_uaddr: user address in source buffer
2434
* @length: bytes to copy
2435
* @node: list node
2436
*
2437
* This is used for the sg copy list (sgc) which is created and consumed
2438
* during binder_transaction() and is only accessed locally. No
2439
* locking is necessary.
2440
*
2441
* The list is ordered by @offset.
2442
*/
2443
struct binder_sg_copy {
2444
binder_size_t offset;
2445
const void __user *sender_uaddr;
2446
size_t length;
2447
struct list_head node;
2448
};
2449
2450
/**
2451
* binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2452
* @alloc: binder_alloc associated with @buffer
2453
* @buffer: binder buffer in target process
2454
* @sgc_head: list_head of scatter-gather copy list
2455
* @pf_head: list_head of pointer fixup list
2456
*
2457
* Processes all elements of @sgc_head, applying fixups from @pf_head
2458
* and copying the scatter-gather data from the source process' user
2459
* buffer to the target's buffer. It is expected that the list creation
2460
* and processing all occurs during binder_transaction() so these lists
2461
* are only accessed in local context.
2462
*
2463
* Return: 0=success, else -errno
2464
*/
2465
static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2466
struct binder_buffer *buffer,
2467
struct list_head *sgc_head,
2468
struct list_head *pf_head)
2469
{
2470
int ret = 0;
2471
struct binder_sg_copy *sgc, *tmpsgc;
2472
struct binder_ptr_fixup *tmppf;
2473
struct binder_ptr_fixup *pf =
2474
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2475
node);
2476
2477
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2478
size_t bytes_copied = 0;
2479
2480
while (bytes_copied < sgc->length) {
2481
size_t copy_size;
2482
size_t bytes_left = sgc->length - bytes_copied;
2483
size_t offset = sgc->offset + bytes_copied;
2484
2485
/*
2486
* We copy up to the fixup (pointed to by pf)
2487
*/
2488
copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2489
: bytes_left;
2490
if (!ret && copy_size)
2491
ret = binder_alloc_copy_user_to_buffer(
2492
alloc, buffer,
2493
offset,
2494
sgc->sender_uaddr + bytes_copied,
2495
copy_size);
2496
bytes_copied += copy_size;
2497
if (copy_size != bytes_left) {
2498
BUG_ON(!pf);
2499
/* we stopped at a fixup offset */
2500
if (pf->skip_size) {
2501
/*
2502
* we are just skipping. This is for
2503
* BINDER_TYPE_FDA where the translated
2504
* fds will be fixed up when we get
2505
* to target context.
2506
*/
2507
bytes_copied += pf->skip_size;
2508
} else {
2509
/* apply the fixup indicated by pf */
2510
if (!ret)
2511
ret = binder_alloc_copy_to_buffer(
2512
alloc, buffer,
2513
pf->offset,
2514
&pf->fixup_data,
2515
sizeof(pf->fixup_data));
2516
bytes_copied += sizeof(pf->fixup_data);
2517
}
2518
list_del(&pf->node);
2519
kfree(pf);
2520
pf = list_first_entry_or_null(pf_head,
2521
struct binder_ptr_fixup, node);
2522
}
2523
}
2524
list_del(&sgc->node);
2525
kfree(sgc);
2526
}
2527
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2528
BUG_ON(pf->skip_size == 0);
2529
list_del(&pf->node);
2530
kfree(pf);
2531
}
2532
BUG_ON(!list_empty(sgc_head));
2533
2534
return ret > 0 ? -EINVAL : ret;
2535
}
2536
2537
/**
2538
* binder_cleanup_deferred_txn_lists() - free specified lists
2539
* @sgc_head: list_head of scatter-gather copy list
2540
* @pf_head: list_head of pointer fixup list
2541
*
2542
* Called to clean up @sgc_head and @pf_head if there is an
2543
* error.
2544
*/
2545
static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2546
struct list_head *pf_head)
2547
{
2548
struct binder_sg_copy *sgc, *tmpsgc;
2549
struct binder_ptr_fixup *pf, *tmppf;
2550
2551
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2552
list_del(&sgc->node);
2553
kfree(sgc);
2554
}
2555
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2556
list_del(&pf->node);
2557
kfree(pf);
2558
}
2559
}
2560
2561
/**
2562
* binder_defer_copy() - queue a scatter-gather buffer for copy
2563
* @sgc_head: list_head of scatter-gather copy list
2564
* @offset: binder buffer offset in target process
2565
* @sender_uaddr: user address in source process
2566
* @length: bytes to copy
2567
*
2568
* Specify a scatter-gather block to be copied. The actual copy must
2569
* be deferred until all the needed fixups are identified and queued.
2570
* Then the copy and fixups are done together so un-translated values
2571
* from the source are never visible in the target buffer.
2572
*
2573
* We are guaranteed that repeated calls to this function will have
2574
* monotonically increasing @offset values so the list will naturally
2575
* be ordered.
2576
*
2577
* Return: 0=success, else -errno
2578
*/
2579
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2580
const void __user *sender_uaddr, size_t length)
2581
{
2582
struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2583
2584
if (!bc)
2585
return -ENOMEM;
2586
2587
bc->offset = offset;
2588
bc->sender_uaddr = sender_uaddr;
2589
bc->length = length;
2590
INIT_LIST_HEAD(&bc->node);
2591
2592
/*
2593
* We are guaranteed that the deferred copies are in-order
2594
* so just add to the tail.
2595
*/
2596
list_add_tail(&bc->node, sgc_head);
2597
2598
return 0;
2599
}
2600
2601
/**
2602
* binder_add_fixup() - queue a fixup to be applied to sg copy
2603
* @pf_head: list_head of binder ptr fixup list
2604
* @offset: binder buffer offset in target process
2605
* @fixup: bytes to be copied for fixup
2606
* @skip_size: bytes to skip when copying (fixup will be applied later)
2607
*
2608
* Add the specified fixup to a list ordered by @offset. When copying
2609
* the scatter-gather buffers, the fixup will be copied instead of
2610
* data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2611
* will be applied later (in target process context), so we just skip
2612
* the bytes specified by @skip_size. If @skip_size is 0, we copy the
2613
* value in @fixup.
2614
*
2615
* This function is called *mostly* in @offset order, but there are
2616
* exceptions. Since out-of-order inserts are relatively uncommon,
2617
* we insert the new element by searching backward from the tail of
2618
* the list.
2619
*
2620
* Return: 0=success, else -errno
2621
*/
2622
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2623
binder_uintptr_t fixup, size_t skip_size)
2624
{
2625
struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2626
struct binder_ptr_fixup *tmppf;
2627
2628
if (!pf)
2629
return -ENOMEM;
2630
2631
pf->offset = offset;
2632
pf->fixup_data = fixup;
2633
pf->skip_size = skip_size;
2634
INIT_LIST_HEAD(&pf->node);
2635
2636
/* Fixups are *mostly* added in-order, but there are some
2637
* exceptions. Look backwards through list for insertion point.
2638
*/
2639
list_for_each_entry_reverse(tmppf, pf_head, node) {
2640
if (tmppf->offset < pf->offset) {
2641
list_add(&pf->node, &tmppf->node);
2642
return 0;
2643
}
2644
}
2645
/*
2646
* if we get here, then the new offset is the lowest so
2647
* insert at the head
2648
*/
2649
list_add(&pf->node, pf_head);
2650
return 0;
2651
}
2652
2653
static int binder_translate_fd_array(struct list_head *pf_head,
2654
struct binder_fd_array_object *fda,
2655
const void __user *sender_ubuffer,
2656
struct binder_buffer_object *parent,
2657
struct binder_buffer_object *sender_uparent,
2658
struct binder_transaction *t,
2659
struct binder_thread *thread,
2660
struct binder_transaction *in_reply_to)
2661
{
2662
binder_size_t fdi, fd_buf_size;
2663
binder_size_t fda_offset;
2664
const void __user *sender_ufda_base;
2665
struct binder_proc *proc = thread->proc;
2666
int ret;
2667
2668
if (fda->num_fds == 0)
2669
return 0;
2670
2671
fd_buf_size = sizeof(u32) * fda->num_fds;
2672
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2673
binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2674
proc->pid, thread->pid, (u64)fda->num_fds);
2675
return -EINVAL;
2676
}
2677
if (fd_buf_size > parent->length ||
2678
fda->parent_offset > parent->length - fd_buf_size) {
2679
/* No space for all file descriptors here. */
2680
binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2681
proc->pid, thread->pid, (u64)fda->num_fds);
2682
return -EINVAL;
2683
}
2684
/*
2685
* the source data for binder_buffer_object is visible
2686
* to user-space and the @buffer element is the user
2687
* pointer to the buffer_object containing the fd_array.
2688
* Convert the address to an offset relative to
2689
* the base of the transaction buffer.
2690
*/
2691
fda_offset = parent->buffer - t->buffer->user_data +
2692
fda->parent_offset;
2693
sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2694
fda->parent_offset;
2695
2696
if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2697
!IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2698
binder_user_error("%d:%d parent offset not aligned correctly.\n",
2699
proc->pid, thread->pid);
2700
return -EINVAL;
2701
}
2702
ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2703
if (ret)
2704
return ret;
2705
2706
for (fdi = 0; fdi < fda->num_fds; fdi++) {
2707
u32 fd;
2708
binder_size_t offset = fda_offset + fdi * sizeof(fd);
2709
binder_size_t sender_uoffset = fdi * sizeof(fd);
2710
2711
ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2712
if (!ret)
2713
ret = binder_translate_fd(fd, offset, t, thread,
2714
in_reply_to);
2715
if (ret)
2716
return ret > 0 ? -EINVAL : ret;
2717
}
2718
return 0;
2719
}
2720
2721
static int binder_fixup_parent(struct list_head *pf_head,
2722
struct binder_transaction *t,
2723
struct binder_thread *thread,
2724
struct binder_buffer_object *bp,
2725
binder_size_t off_start_offset,
2726
binder_size_t num_valid,
2727
binder_size_t last_fixup_obj_off,
2728
binder_size_t last_fixup_min_off)
2729
{
2730
struct binder_buffer_object *parent;
2731
struct binder_buffer *b = t->buffer;
2732
struct binder_proc *proc = thread->proc;
2733
struct binder_proc *target_proc = t->to_proc;
2734
struct binder_object object;
2735
binder_size_t buffer_offset;
2736
binder_size_t parent_offset;
2737
2738
if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2739
return 0;
2740
2741
parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2742
off_start_offset, &parent_offset,
2743
num_valid);
2744
if (!parent) {
2745
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2746
proc->pid, thread->pid);
2747
return -EINVAL;
2748
}
2749
2750
if (!binder_validate_fixup(target_proc, b, off_start_offset,
2751
parent_offset, bp->parent_offset,
2752
last_fixup_obj_off,
2753
last_fixup_min_off)) {
2754
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2755
proc->pid, thread->pid);
2756
return -EINVAL;
2757
}
2758
2759
if (parent->length < sizeof(binder_uintptr_t) ||
2760
bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2761
/* No space for a pointer here! */
2762
binder_user_error("%d:%d got transaction with invalid parent offset\n",
2763
proc->pid, thread->pid);
2764
return -EINVAL;
2765
}
2766
2767
buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2768
2769
return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2770
}
2771
2772
/**
2773
* binder_can_update_transaction() - Can a txn be superseded by an updated one?
2774
* @t1: the pending async txn in the frozen process
2775
* @t2: the new async txn to supersede the outdated pending one
2776
*
2777
* Return: true if t2 can supersede t1
2778
* false if t2 can not supersede t1
2779
*/
2780
static bool binder_can_update_transaction(struct binder_transaction *t1,
2781
struct binder_transaction *t2)
2782
{
2783
if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2784
(TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2785
return false;
2786
if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2787
t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2788
t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2789
t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2790
return true;
2791
return false;
2792
}
2793
2794
/**
2795
* binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2796
* @t: new async transaction
2797
* @target_list: list to find outdated transaction
2798
*
2799
* Return: the outdated transaction if found
2800
* NULL if no outdated transacton can be found
2801
*
2802
* Requires the proc->inner_lock to be held.
2803
*/
2804
static struct binder_transaction *
2805
binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2806
struct list_head *target_list)
2807
{
2808
struct binder_work *w;
2809
2810
list_for_each_entry(w, target_list, entry) {
2811
struct binder_transaction *t_queued;
2812
2813
if (w->type != BINDER_WORK_TRANSACTION)
2814
continue;
2815
t_queued = container_of(w, struct binder_transaction, work);
2816
if (binder_can_update_transaction(t_queued, t))
2817
return t_queued;
2818
}
2819
return NULL;
2820
}
2821
2822
/**
2823
* binder_proc_transaction() - sends a transaction to a process and wakes it up
2824
* @t: transaction to send
2825
* @proc: process to send the transaction to
2826
* @thread: thread in @proc to send the transaction to (may be NULL)
2827
*
2828
* This function queues a transaction to the specified process. It will try
2829
* to find a thread in the target process to handle the transaction and
2830
* wake it up. If no thread is found, the work is queued to the proc
2831
* waitqueue.
2832
*
2833
* If the @thread parameter is not NULL, the transaction is always queued
2834
* to the waitlist of that specific thread.
2835
*
2836
* Return: 0 if the transaction was successfully queued
2837
* BR_DEAD_REPLY if the target process or thread is dead
2838
* BR_FROZEN_REPLY if the target process or thread is frozen and
2839
* the sync transaction was rejected
2840
* BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2841
* and the async transaction was successfully queued
2842
*/
2843
static int binder_proc_transaction(struct binder_transaction *t,
2844
struct binder_proc *proc,
2845
struct binder_thread *thread)
2846
{
2847
struct binder_node *node = t->buffer->target_node;
2848
bool oneway = !!(t->flags & TF_ONE_WAY);
2849
bool pending_async = false;
2850
struct binder_transaction *t_outdated = NULL;
2851
bool frozen = false;
2852
2853
BUG_ON(!node);
2854
binder_node_lock(node);
2855
if (oneway) {
2856
BUG_ON(thread);
2857
if (node->has_async_transaction)
2858
pending_async = true;
2859
else
2860
node->has_async_transaction = true;
2861
}
2862
2863
binder_inner_proc_lock(proc);
2864
if (proc->is_frozen) {
2865
frozen = true;
2866
proc->sync_recv |= !oneway;
2867
proc->async_recv |= oneway;
2868
}
2869
2870
if ((frozen && !oneway) || proc->is_dead ||
2871
(thread && thread->is_dead)) {
2872
binder_inner_proc_unlock(proc);
2873
binder_node_unlock(node);
2874
return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2875
}
2876
2877
if (!thread && !pending_async)
2878
thread = binder_select_thread_ilocked(proc);
2879
2880
if (thread) {
2881
binder_enqueue_thread_work_ilocked(thread, &t->work);
2882
} else if (!pending_async) {
2883
binder_enqueue_work_ilocked(&t->work, &proc->todo);
2884
} else {
2885
if ((t->flags & TF_UPDATE_TXN) && frozen) {
2886
t_outdated = binder_find_outdated_transaction_ilocked(t,
2887
&node->async_todo);
2888
if (t_outdated) {
2889
binder_debug(BINDER_DEBUG_TRANSACTION,
2890
"txn %d supersedes %d\n",
2891
t->debug_id, t_outdated->debug_id);
2892
list_del_init(&t_outdated->work.entry);
2893
proc->outstanding_txns--;
2894
}
2895
}
2896
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2897
}
2898
2899
if (!pending_async)
2900
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2901
2902
proc->outstanding_txns++;
2903
binder_inner_proc_unlock(proc);
2904
binder_node_unlock(node);
2905
2906
/*
2907
* To reduce potential contention, free the outdated transaction and
2908
* buffer after releasing the locks.
2909
*/
2910
if (t_outdated) {
2911
struct binder_buffer *buffer = t_outdated->buffer;
2912
2913
t_outdated->buffer = NULL;
2914
buffer->transaction = NULL;
2915
trace_binder_transaction_update_buffer_release(buffer);
2916
binder_release_entire_buffer(proc, NULL, buffer, false);
2917
binder_alloc_free_buf(&proc->alloc, buffer);
2918
kfree(t_outdated);
2919
binder_stats_deleted(BINDER_STAT_TRANSACTION);
2920
}
2921
2922
if (oneway && frozen)
2923
return BR_TRANSACTION_PENDING_FROZEN;
2924
2925
return 0;
2926
}
2927
2928
/**
2929
* binder_get_node_refs_for_txn() - Get required refs on node for txn
2930
* @node: struct binder_node for which to get refs
2931
* @procp: returns @node->proc if valid
2932
* @error: if no @procp then returns BR_DEAD_REPLY
2933
*
2934
* User-space normally keeps the node alive when creating a transaction
2935
* since it has a reference to the target. The local strong ref keeps it
2936
* alive if the sending process dies before the target process processes
2937
* the transaction. If the source process is malicious or has a reference
2938
* counting bug, relying on the local strong ref can fail.
2939
*
2940
* Since user-space can cause the local strong ref to go away, we also take
2941
* a tmpref on the node to ensure it survives while we are constructing
2942
* the transaction. We also need a tmpref on the proc while we are
2943
* constructing the transaction, so we take that here as well.
2944
*
2945
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2946
* Also sets @procp if valid. If the @node->proc is NULL indicating that the
2947
* target proc has died, @error is set to BR_DEAD_REPLY.
2948
*/
2949
static struct binder_node *binder_get_node_refs_for_txn(
2950
struct binder_node *node,
2951
struct binder_proc **procp,
2952
uint32_t *error)
2953
{
2954
struct binder_node *target_node = NULL;
2955
2956
binder_node_inner_lock(node);
2957
if (node->proc) {
2958
target_node = node;
2959
binder_inc_node_nilocked(node, 1, 0, NULL);
2960
binder_inc_node_tmpref_ilocked(node);
2961
node->proc->tmp_ref++;
2962
*procp = node->proc;
2963
} else
2964
*error = BR_DEAD_REPLY;
2965
binder_node_inner_unlock(node);
2966
2967
return target_node;
2968
}
2969
2970
static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2971
uint32_t command, int32_t param)
2972
{
2973
struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2974
2975
if (!from) {
2976
/* annotation for sparse */
2977
__release(&from->proc->inner_lock);
2978
return;
2979
}
2980
2981
/* don't override existing errors */
2982
if (from->ee.command == BR_OK)
2983
binder_set_extended_error(&from->ee, id, command, param);
2984
binder_inner_proc_unlock(from->proc);
2985
binder_thread_dec_tmpref(from);
2986
}
2987
2988
/**
2989
* binder_netlink_report() - report a transaction failure via netlink
2990
* @proc: the binder proc sending the transaction
2991
* @t: the binder transaction that failed
2992
* @data_size: the user provided data size for the transaction
2993
* @error: enum binder_driver_return_protocol returned to sender
2994
*/
2995
static void binder_netlink_report(struct binder_proc *proc,
2996
struct binder_transaction *t,
2997
u32 data_size,
2998
u32 error)
2999
{
3000
const char *context = proc->context->name;
3001
struct sk_buff *skb;
3002
void *hdr;
3003
3004
if (!genl_has_listeners(&binder_nl_family, &init_net,
3005
BINDER_NLGRP_REPORT))
3006
return;
3007
3008
trace_binder_netlink_report(context, t, data_size, error);
3009
3010
skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
3011
if (!skb)
3012
return;
3013
3014
hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
3015
if (!hdr)
3016
goto free_skb;
3017
3018
if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
3019
nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
3020
nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
3021
nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
3022
goto cancel_skb;
3023
3024
if (t->to_proc &&
3025
nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
3026
goto cancel_skb;
3027
3028
if (t->to_thread &&
3029
nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
3030
goto cancel_skb;
3031
3032
if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
3033
goto cancel_skb;
3034
3035
if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
3036
nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
3037
nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
3038
goto cancel_skb;
3039
3040
genlmsg_end(skb, hdr);
3041
genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
3042
GFP_KERNEL);
3043
return;
3044
3045
cancel_skb:
3046
genlmsg_cancel(skb, hdr);
3047
free_skb:
3048
nlmsg_free(skb);
3049
}
3050
3051
static void binder_transaction(struct binder_proc *proc,
3052
struct binder_thread *thread,
3053
struct binder_transaction_data *tr, int reply,
3054
binder_size_t extra_buffers_size)
3055
{
3056
int ret;
3057
struct binder_transaction *t;
3058
struct binder_work *w;
3059
struct binder_work *tcomplete;
3060
binder_size_t buffer_offset = 0;
3061
binder_size_t off_start_offset, off_end_offset;
3062
binder_size_t off_min;
3063
binder_size_t sg_buf_offset, sg_buf_end_offset;
3064
binder_size_t user_offset = 0;
3065
struct binder_proc *target_proc = NULL;
3066
struct binder_thread *target_thread = NULL;
3067
struct binder_node *target_node = NULL;
3068
struct binder_transaction *in_reply_to = NULL;
3069
struct binder_transaction_log_entry *e;
3070
uint32_t return_error = 0;
3071
uint32_t return_error_param = 0;
3072
uint32_t return_error_line = 0;
3073
binder_size_t last_fixup_obj_off = 0;
3074
binder_size_t last_fixup_min_off = 0;
3075
struct binder_context *context = proc->context;
3076
int t_debug_id = atomic_inc_return(&binder_last_id);
3077
ktime_t t_start_time = ktime_get();
3078
struct lsm_context lsmctx = { };
3079
struct list_head sgc_head;
3080
struct list_head pf_head;
3081
const void __user *user_buffer = (const void __user *)
3082
(uintptr_t)tr->data.ptr.buffer;
3083
INIT_LIST_HEAD(&sgc_head);
3084
INIT_LIST_HEAD(&pf_head);
3085
3086
e = binder_transaction_log_add(&binder_transaction_log);
3087
e->debug_id = t_debug_id;
3088
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3089
e->from_proc = proc->pid;
3090
e->from_thread = thread->pid;
3091
e->target_handle = tr->target.handle;
3092
e->data_size = tr->data_size;
3093
e->offsets_size = tr->offsets_size;
3094
strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3095
3096
binder_inner_proc_lock(proc);
3097
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3098
binder_inner_proc_unlock(proc);
3099
3100
t = kzalloc(sizeof(*t), GFP_KERNEL);
3101
if (!t) {
3102
binder_txn_error("%d:%d cannot allocate transaction\n",
3103
thread->pid, proc->pid);
3104
return_error = BR_FAILED_REPLY;
3105
return_error_param = -ENOMEM;
3106
return_error_line = __LINE__;
3107
goto err_alloc_t_failed;
3108
}
3109
INIT_LIST_HEAD(&t->fd_fixups);
3110
binder_stats_created(BINDER_STAT_TRANSACTION);
3111
spin_lock_init(&t->lock);
3112
t->debug_id = t_debug_id;
3113
t->start_time = t_start_time;
3114
t->from_pid = proc->pid;
3115
t->from_tid = thread->pid;
3116
t->sender_euid = task_euid(proc->tsk);
3117
t->code = tr->code;
3118
t->flags = tr->flags;
3119
t->priority = task_nice(current);
3120
t->work.type = BINDER_WORK_TRANSACTION;
3121
t->is_async = !reply && (tr->flags & TF_ONE_WAY);
3122
t->is_reply = reply;
3123
if (!reply && !(tr->flags & TF_ONE_WAY))
3124
t->from = thread;
3125
3126
if (reply) {
3127
binder_inner_proc_lock(proc);
3128
in_reply_to = thread->transaction_stack;
3129
if (in_reply_to == NULL) {
3130
binder_inner_proc_unlock(proc);
3131
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3132
proc->pid, thread->pid);
3133
return_error = BR_FAILED_REPLY;
3134
return_error_param = -EPROTO;
3135
return_error_line = __LINE__;
3136
goto err_empty_call_stack;
3137
}
3138
if (in_reply_to->to_thread != thread) {
3139
spin_lock(&in_reply_to->lock);
3140
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3141
proc->pid, thread->pid, in_reply_to->debug_id,
3142
in_reply_to->to_proc ?
3143
in_reply_to->to_proc->pid : 0,
3144
in_reply_to->to_thread ?
3145
in_reply_to->to_thread->pid : 0);
3146
spin_unlock(&in_reply_to->lock);
3147
binder_inner_proc_unlock(proc);
3148
return_error = BR_FAILED_REPLY;
3149
return_error_param = -EPROTO;
3150
return_error_line = __LINE__;
3151
in_reply_to = NULL;
3152
goto err_bad_call_stack;
3153
}
3154
thread->transaction_stack = in_reply_to->to_parent;
3155
binder_inner_proc_unlock(proc);
3156
binder_set_nice(in_reply_to->saved_priority);
3157
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3158
if (target_thread == NULL) {
3159
/* annotation for sparse */
3160
__release(&target_thread->proc->inner_lock);
3161
binder_txn_error("%d:%d reply target not found\n",
3162
thread->pid, proc->pid);
3163
return_error = BR_DEAD_REPLY;
3164
return_error_line = __LINE__;
3165
goto err_dead_binder;
3166
}
3167
if (target_thread->transaction_stack != in_reply_to) {
3168
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3169
proc->pid, thread->pid,
3170
target_thread->transaction_stack ?
3171
target_thread->transaction_stack->debug_id : 0,
3172
in_reply_to->debug_id);
3173
binder_inner_proc_unlock(target_thread->proc);
3174
return_error = BR_FAILED_REPLY;
3175
return_error_param = -EPROTO;
3176
return_error_line = __LINE__;
3177
in_reply_to = NULL;
3178
target_thread = NULL;
3179
goto err_dead_binder;
3180
}
3181
target_proc = target_thread->proc;
3182
target_proc->tmp_ref++;
3183
binder_inner_proc_unlock(target_thread->proc);
3184
} else {
3185
if (tr->target.handle) {
3186
struct binder_ref *ref;
3187
3188
/*
3189
* There must already be a strong ref
3190
* on this node. If so, do a strong
3191
* increment on the node to ensure it
3192
* stays alive until the transaction is
3193
* done.
3194
*/
3195
binder_proc_lock(proc);
3196
ref = binder_get_ref_olocked(proc, tr->target.handle,
3197
true);
3198
if (ref) {
3199
target_node = binder_get_node_refs_for_txn(
3200
ref->node, &target_proc,
3201
&return_error);
3202
} else {
3203
binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3204
proc->pid, thread->pid, tr->target.handle);
3205
return_error = BR_FAILED_REPLY;
3206
}
3207
binder_proc_unlock(proc);
3208
} else {
3209
mutex_lock(&context->context_mgr_node_lock);
3210
target_node = context->binder_context_mgr_node;
3211
if (target_node)
3212
target_node = binder_get_node_refs_for_txn(
3213
target_node, &target_proc,
3214
&return_error);
3215
else
3216
return_error = BR_DEAD_REPLY;
3217
mutex_unlock(&context->context_mgr_node_lock);
3218
if (target_node && target_proc->pid == proc->pid) {
3219
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3220
proc->pid, thread->pid);
3221
return_error = BR_FAILED_REPLY;
3222
return_error_param = -EINVAL;
3223
return_error_line = __LINE__;
3224
goto err_invalid_target_handle;
3225
}
3226
}
3227
if (!target_node) {
3228
binder_txn_error("%d:%d cannot find target node\n",
3229
proc->pid, thread->pid);
3230
/* return_error is set above */
3231
return_error_param = -EINVAL;
3232
return_error_line = __LINE__;
3233
goto err_dead_binder;
3234
}
3235
e->to_node = target_node->debug_id;
3236
if (WARN_ON(proc == target_proc)) {
3237
binder_txn_error("%d:%d self transactions not allowed\n",
3238
thread->pid, proc->pid);
3239
return_error = BR_FAILED_REPLY;
3240
return_error_param = -EINVAL;
3241
return_error_line = __LINE__;
3242
goto err_invalid_target_handle;
3243
}
3244
if (security_binder_transaction(proc->cred,
3245
target_proc->cred) < 0) {
3246
binder_txn_error("%d:%d transaction credentials failed\n",
3247
thread->pid, proc->pid);
3248
return_error = BR_FAILED_REPLY;
3249
return_error_param = -EPERM;
3250
return_error_line = __LINE__;
3251
goto err_invalid_target_handle;
3252
}
3253
binder_inner_proc_lock(proc);
3254
3255
w = list_first_entry_or_null(&thread->todo,
3256
struct binder_work, entry);
3257
if (!(tr->flags & TF_ONE_WAY) && w &&
3258
w->type == BINDER_WORK_TRANSACTION) {
3259
/*
3260
* Do not allow new outgoing transaction from a
3261
* thread that has a transaction at the head of
3262
* its todo list. Only need to check the head
3263
* because binder_select_thread_ilocked picks a
3264
* thread from proc->waiting_threads to enqueue
3265
* the transaction, and nothing is queued to the
3266
* todo list while the thread is on waiting_threads.
3267
*/
3268
binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3269
proc->pid, thread->pid);
3270
binder_inner_proc_unlock(proc);
3271
return_error = BR_FAILED_REPLY;
3272
return_error_param = -EPROTO;
3273
return_error_line = __LINE__;
3274
goto err_bad_todo_list;
3275
}
3276
3277
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3278
struct binder_transaction *tmp;
3279
3280
tmp = thread->transaction_stack;
3281
if (tmp->to_thread != thread) {
3282
spin_lock(&tmp->lock);
3283
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3284
proc->pid, thread->pid, tmp->debug_id,
3285
tmp->to_proc ? tmp->to_proc->pid : 0,
3286
tmp->to_thread ?
3287
tmp->to_thread->pid : 0);
3288
spin_unlock(&tmp->lock);
3289
binder_inner_proc_unlock(proc);
3290
return_error = BR_FAILED_REPLY;
3291
return_error_param = -EPROTO;
3292
return_error_line = __LINE__;
3293
goto err_bad_call_stack;
3294
}
3295
while (tmp) {
3296
struct binder_thread *from;
3297
3298
spin_lock(&tmp->lock);
3299
from = tmp->from;
3300
if (from && from->proc == target_proc) {
3301
atomic_inc(&from->tmp_ref);
3302
target_thread = from;
3303
spin_unlock(&tmp->lock);
3304
break;
3305
}
3306
spin_unlock(&tmp->lock);
3307
tmp = tmp->from_parent;
3308
}
3309
}
3310
binder_inner_proc_unlock(proc);
3311
}
3312
3313
t->to_proc = target_proc;
3314
t->to_thread = target_thread;
3315
if (target_thread)
3316
e->to_thread = target_thread->pid;
3317
e->to_proc = target_proc->pid;
3318
3319
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3320
if (tcomplete == NULL) {
3321
binder_txn_error("%d:%d cannot allocate work for transaction\n",
3322
thread->pid, proc->pid);
3323
return_error = BR_FAILED_REPLY;
3324
return_error_param = -ENOMEM;
3325
return_error_line = __LINE__;
3326
goto err_alloc_tcomplete_failed;
3327
}
3328
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3329
3330
if (reply)
3331
binder_debug(BINDER_DEBUG_TRANSACTION,
3332
"%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3333
proc->pid, thread->pid, t->debug_id,
3334
target_proc->pid, target_thread->pid,
3335
(u64)tr->data_size, (u64)tr->offsets_size,
3336
(u64)extra_buffers_size);
3337
else
3338
binder_debug(BINDER_DEBUG_TRANSACTION,
3339
"%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3340
proc->pid, thread->pid, t->debug_id,
3341
target_proc->pid, target_node->debug_id,
3342
(u64)tr->data_size, (u64)tr->offsets_size,
3343
(u64)extra_buffers_size);
3344
3345
if (target_node && target_node->txn_security_ctx) {
3346
u32 secid;
3347
size_t added_size;
3348
3349
security_cred_getsecid(proc->cred, &secid);
3350
ret = security_secid_to_secctx(secid, &lsmctx);
3351
if (ret < 0) {
3352
binder_txn_error("%d:%d failed to get security context\n",
3353
thread->pid, proc->pid);
3354
return_error = BR_FAILED_REPLY;
3355
return_error_param = ret;
3356
return_error_line = __LINE__;
3357
goto err_get_secctx_failed;
3358
}
3359
added_size = ALIGN(lsmctx.len, sizeof(u64));
3360
extra_buffers_size += added_size;
3361
if (extra_buffers_size < added_size) {
3362
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3363
thread->pid, proc->pid);
3364
return_error = BR_FAILED_REPLY;
3365
return_error_param = -EINVAL;
3366
return_error_line = __LINE__;
3367
goto err_bad_extra_size;
3368
}
3369
}
3370
3371
trace_binder_transaction(reply, t, target_node);
3372
3373
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3374
tr->offsets_size, extra_buffers_size,
3375
!reply && (t->flags & TF_ONE_WAY));
3376
if (IS_ERR(t->buffer)) {
3377
char *s;
3378
3379
ret = PTR_ERR(t->buffer);
3380
s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3381
: (ret == -ENOSPC) ? ": no space left"
3382
: (ret == -ENOMEM) ? ": memory allocation failed"
3383
: "";
3384
binder_txn_error("cannot allocate buffer%s", s);
3385
3386
return_error_param = PTR_ERR(t->buffer);
3387
return_error = return_error_param == -ESRCH ?
3388
BR_DEAD_REPLY : BR_FAILED_REPLY;
3389
return_error_line = __LINE__;
3390
t->buffer = NULL;
3391
goto err_binder_alloc_buf_failed;
3392
}
3393
if (lsmctx.context) {
3394
int err;
3395
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3396
ALIGN(tr->offsets_size, sizeof(void *)) +
3397
ALIGN(extra_buffers_size, sizeof(void *)) -
3398
ALIGN(lsmctx.len, sizeof(u64));
3399
3400
t->security_ctx = t->buffer->user_data + buf_offset;
3401
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3402
t->buffer, buf_offset,
3403
lsmctx.context, lsmctx.len);
3404
if (err) {
3405
t->security_ctx = 0;
3406
WARN_ON(1);
3407
}
3408
security_release_secctx(&lsmctx);
3409
lsmctx.context = NULL;
3410
}
3411
t->buffer->debug_id = t->debug_id;
3412
t->buffer->transaction = t;
3413
t->buffer->target_node = target_node;
3414
t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3415
trace_binder_transaction_alloc_buf(t->buffer);
3416
3417
if (binder_alloc_copy_user_to_buffer(
3418
&target_proc->alloc,
3419
t->buffer,
3420
ALIGN(tr->data_size, sizeof(void *)),
3421
(const void __user *)
3422
(uintptr_t)tr->data.ptr.offsets,
3423
tr->offsets_size)) {
3424
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3425
proc->pid, thread->pid);
3426
return_error = BR_FAILED_REPLY;
3427
return_error_param = -EFAULT;
3428
return_error_line = __LINE__;
3429
goto err_copy_data_failed;
3430
}
3431
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3432
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3433
proc->pid, thread->pid, (u64)tr->offsets_size);
3434
return_error = BR_FAILED_REPLY;
3435
return_error_param = -EINVAL;
3436
return_error_line = __LINE__;
3437
goto err_bad_offset;
3438
}
3439
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3440
binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3441
proc->pid, thread->pid,
3442
(u64)extra_buffers_size);
3443
return_error = BR_FAILED_REPLY;
3444
return_error_param = -EINVAL;
3445
return_error_line = __LINE__;
3446
goto err_bad_offset;
3447
}
3448
off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3449
buffer_offset = off_start_offset;
3450
off_end_offset = off_start_offset + tr->offsets_size;
3451
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3452
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3453
ALIGN(lsmctx.len, sizeof(u64));
3454
off_min = 0;
3455
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3456
buffer_offset += sizeof(binder_size_t)) {
3457
struct binder_object_header *hdr;
3458
size_t object_size;
3459
struct binder_object object;
3460
binder_size_t object_offset;
3461
binder_size_t copy_size;
3462
3463
if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3464
&object_offset,
3465
t->buffer,
3466
buffer_offset,
3467
sizeof(object_offset))) {
3468
binder_txn_error("%d:%d copy offset from buffer failed\n",
3469
thread->pid, proc->pid);
3470
return_error = BR_FAILED_REPLY;
3471
return_error_param = -EINVAL;
3472
return_error_line = __LINE__;
3473
goto err_bad_offset;
3474
}
3475
3476
/*
3477
* Copy the source user buffer up to the next object
3478
* that will be processed.
3479
*/
3480
copy_size = object_offset - user_offset;
3481
if (copy_size && (user_offset > object_offset ||
3482
object_offset > tr->data_size ||
3483
binder_alloc_copy_user_to_buffer(
3484
&target_proc->alloc,
3485
t->buffer, user_offset,
3486
user_buffer + user_offset,
3487
copy_size))) {
3488
binder_user_error("%d:%d got transaction with invalid data ptr\n",
3489
proc->pid, thread->pid);
3490
return_error = BR_FAILED_REPLY;
3491
return_error_param = -EFAULT;
3492
return_error_line = __LINE__;
3493
goto err_copy_data_failed;
3494
}
3495
object_size = binder_get_object(target_proc, user_buffer,
3496
t->buffer, object_offset, &object);
3497
if (object_size == 0 || object_offset < off_min) {
3498
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3499
proc->pid, thread->pid,
3500
(u64)object_offset,
3501
(u64)off_min,
3502
(u64)t->buffer->data_size);
3503
return_error = BR_FAILED_REPLY;
3504
return_error_param = -EINVAL;
3505
return_error_line = __LINE__;
3506
goto err_bad_offset;
3507
}
3508
/*
3509
* Set offset to the next buffer fragment to be
3510
* copied
3511
*/
3512
user_offset = object_offset + object_size;
3513
3514
hdr = &object.hdr;
3515
off_min = object_offset + object_size;
3516
switch (hdr->type) {
3517
case BINDER_TYPE_BINDER:
3518
case BINDER_TYPE_WEAK_BINDER: {
3519
struct flat_binder_object *fp;
3520
3521
fp = to_flat_binder_object(hdr);
3522
ret = binder_translate_binder(fp, t, thread);
3523
3524
if (ret < 0 ||
3525
binder_alloc_copy_to_buffer(&target_proc->alloc,
3526
t->buffer,
3527
object_offset,
3528
fp, sizeof(*fp))) {
3529
binder_txn_error("%d:%d translate binder failed\n",
3530
thread->pid, proc->pid);
3531
return_error = BR_FAILED_REPLY;
3532
return_error_param = ret;
3533
return_error_line = __LINE__;
3534
goto err_translate_failed;
3535
}
3536
} break;
3537
case BINDER_TYPE_HANDLE:
3538
case BINDER_TYPE_WEAK_HANDLE: {
3539
struct flat_binder_object *fp;
3540
3541
fp = to_flat_binder_object(hdr);
3542
ret = binder_translate_handle(fp, t, thread);
3543
if (ret < 0 ||
3544
binder_alloc_copy_to_buffer(&target_proc->alloc,
3545
t->buffer,
3546
object_offset,
3547
fp, sizeof(*fp))) {
3548
binder_txn_error("%d:%d translate handle failed\n",
3549
thread->pid, proc->pid);
3550
return_error = BR_FAILED_REPLY;
3551
return_error_param = ret;
3552
return_error_line = __LINE__;
3553
goto err_translate_failed;
3554
}
3555
} break;
3556
3557
case BINDER_TYPE_FD: {
3558
struct binder_fd_object *fp = to_binder_fd_object(hdr);
3559
binder_size_t fd_offset = object_offset +
3560
(uintptr_t)&fp->fd - (uintptr_t)fp;
3561
int ret = binder_translate_fd(fp->fd, fd_offset, t,
3562
thread, in_reply_to);
3563
3564
fp->pad_binder = 0;
3565
if (ret < 0 ||
3566
binder_alloc_copy_to_buffer(&target_proc->alloc,
3567
t->buffer,
3568
object_offset,
3569
fp, sizeof(*fp))) {
3570
binder_txn_error("%d:%d translate fd failed\n",
3571
thread->pid, proc->pid);
3572
return_error = BR_FAILED_REPLY;
3573
return_error_param = ret;
3574
return_error_line = __LINE__;
3575
goto err_translate_failed;
3576
}
3577
} break;
3578
case BINDER_TYPE_FDA: {
3579
struct binder_object ptr_object;
3580
binder_size_t parent_offset;
3581
struct binder_object user_object;
3582
size_t user_parent_size;
3583
struct binder_fd_array_object *fda =
3584
to_binder_fd_array_object(hdr);
3585
size_t num_valid = (buffer_offset - off_start_offset) /
3586
sizeof(binder_size_t);
3587
struct binder_buffer_object *parent =
3588
binder_validate_ptr(target_proc, t->buffer,
3589
&ptr_object, fda->parent,
3590
off_start_offset,
3591
&parent_offset,
3592
num_valid);
3593
if (!parent) {
3594
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3595
proc->pid, thread->pid);
3596
return_error = BR_FAILED_REPLY;
3597
return_error_param = -EINVAL;
3598
return_error_line = __LINE__;
3599
goto err_bad_parent;
3600
}
3601
if (!binder_validate_fixup(target_proc, t->buffer,
3602
off_start_offset,
3603
parent_offset,
3604
fda->parent_offset,
3605
last_fixup_obj_off,
3606
last_fixup_min_off)) {
3607
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3608
proc->pid, thread->pid);
3609
return_error = BR_FAILED_REPLY;
3610
return_error_param = -EINVAL;
3611
return_error_line = __LINE__;
3612
goto err_bad_parent;
3613
}
3614
/*
3615
* We need to read the user version of the parent
3616
* object to get the original user offset
3617
*/
3618
user_parent_size =
3619
binder_get_object(proc, user_buffer, t->buffer,
3620
parent_offset, &user_object);
3621
if (user_parent_size != sizeof(user_object.bbo)) {
3622
binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3623
proc->pid, thread->pid,
3624
user_parent_size,
3625
sizeof(user_object.bbo));
3626
return_error = BR_FAILED_REPLY;
3627
return_error_param = -EINVAL;
3628
return_error_line = __LINE__;
3629
goto err_bad_parent;
3630
}
3631
ret = binder_translate_fd_array(&pf_head, fda,
3632
user_buffer, parent,
3633
&user_object.bbo, t,
3634
thread, in_reply_to);
3635
if (!ret)
3636
ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3637
t->buffer,
3638
object_offset,
3639
fda, sizeof(*fda));
3640
if (ret) {
3641
binder_txn_error("%d:%d translate fd array failed\n",
3642
thread->pid, proc->pid);
3643
return_error = BR_FAILED_REPLY;
3644
return_error_param = ret > 0 ? -EINVAL : ret;
3645
return_error_line = __LINE__;
3646
goto err_translate_failed;
3647
}
3648
last_fixup_obj_off = parent_offset;
3649
last_fixup_min_off =
3650
fda->parent_offset + sizeof(u32) * fda->num_fds;
3651
} break;
3652
case BINDER_TYPE_PTR: {
3653
struct binder_buffer_object *bp =
3654
to_binder_buffer_object(hdr);
3655
size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3656
size_t num_valid;
3657
3658
if (bp->length > buf_left) {
3659
binder_user_error("%d:%d got transaction with too large buffer\n",
3660
proc->pid, thread->pid);
3661
return_error = BR_FAILED_REPLY;
3662
return_error_param = -EINVAL;
3663
return_error_line = __LINE__;
3664
goto err_bad_offset;
3665
}
3666
ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3667
(const void __user *)(uintptr_t)bp->buffer,
3668
bp->length);
3669
if (ret) {
3670
binder_txn_error("%d:%d deferred copy failed\n",
3671
thread->pid, proc->pid);
3672
return_error = BR_FAILED_REPLY;
3673
return_error_param = ret;
3674
return_error_line = __LINE__;
3675
goto err_translate_failed;
3676
}
3677
/* Fixup buffer pointer to target proc address space */
3678
bp->buffer = t->buffer->user_data + sg_buf_offset;
3679
sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3680
3681
num_valid = (buffer_offset - off_start_offset) /
3682
sizeof(binder_size_t);
3683
ret = binder_fixup_parent(&pf_head, t,
3684
thread, bp,
3685
off_start_offset,
3686
num_valid,
3687
last_fixup_obj_off,
3688
last_fixup_min_off);
3689
if (ret < 0 ||
3690
binder_alloc_copy_to_buffer(&target_proc->alloc,
3691
t->buffer,
3692
object_offset,
3693
bp, sizeof(*bp))) {
3694
binder_txn_error("%d:%d failed to fixup parent\n",
3695
thread->pid, proc->pid);
3696
return_error = BR_FAILED_REPLY;
3697
return_error_param = ret;
3698
return_error_line = __LINE__;
3699
goto err_translate_failed;
3700
}
3701
last_fixup_obj_off = object_offset;
3702
last_fixup_min_off = 0;
3703
} break;
3704
default:
3705
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3706
proc->pid, thread->pid, hdr->type);
3707
return_error = BR_FAILED_REPLY;
3708
return_error_param = -EINVAL;
3709
return_error_line = __LINE__;
3710
goto err_bad_object_type;
3711
}
3712
}
3713
/* Done processing objects, copy the rest of the buffer */
3714
if (binder_alloc_copy_user_to_buffer(
3715
&target_proc->alloc,
3716
t->buffer, user_offset,
3717
user_buffer + user_offset,
3718
tr->data_size - user_offset)) {
3719
binder_user_error("%d:%d got transaction with invalid data ptr\n",
3720
proc->pid, thread->pid);
3721
return_error = BR_FAILED_REPLY;
3722
return_error_param = -EFAULT;
3723
return_error_line = __LINE__;
3724
goto err_copy_data_failed;
3725
}
3726
3727
ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3728
&sgc_head, &pf_head);
3729
if (ret) {
3730
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3731
proc->pid, thread->pid);
3732
return_error = BR_FAILED_REPLY;
3733
return_error_param = ret;
3734
return_error_line = __LINE__;
3735
goto err_copy_data_failed;
3736
}
3737
if (t->buffer->oneway_spam_suspect) {
3738
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3739
binder_netlink_report(proc, t, tr->data_size,
3740
BR_ONEWAY_SPAM_SUSPECT);
3741
} else {
3742
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3743
}
3744
3745
if (reply) {
3746
binder_enqueue_thread_work(thread, tcomplete);
3747
binder_inner_proc_lock(target_proc);
3748
if (target_thread->is_dead) {
3749
return_error = BR_DEAD_REPLY;
3750
binder_inner_proc_unlock(target_proc);
3751
goto err_dead_proc_or_thread;
3752
}
3753
BUG_ON(t->buffer->async_transaction != 0);
3754
binder_pop_transaction_ilocked(target_thread, in_reply_to);
3755
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3756
target_proc->outstanding_txns++;
3757
binder_inner_proc_unlock(target_proc);
3758
wake_up_interruptible_sync(&target_thread->wait);
3759
binder_free_transaction(in_reply_to);
3760
} else if (!(t->flags & TF_ONE_WAY)) {
3761
BUG_ON(t->buffer->async_transaction != 0);
3762
binder_inner_proc_lock(proc);
3763
/*
3764
* Defer the TRANSACTION_COMPLETE, so we don't return to
3765
* userspace immediately; this allows the target process to
3766
* immediately start processing this transaction, reducing
3767
* latency. We will then return the TRANSACTION_COMPLETE when
3768
* the target replies (or there is an error).
3769
*/
3770
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3771
t->from_parent = thread->transaction_stack;
3772
thread->transaction_stack = t;
3773
binder_inner_proc_unlock(proc);
3774
return_error = binder_proc_transaction(t,
3775
target_proc, target_thread);
3776
if (return_error) {
3777
binder_inner_proc_lock(proc);
3778
binder_pop_transaction_ilocked(thread, t);
3779
binder_inner_proc_unlock(proc);
3780
goto err_dead_proc_or_thread;
3781
}
3782
} else {
3783
BUG_ON(target_node == NULL);
3784
BUG_ON(t->buffer->async_transaction != 1);
3785
return_error = binder_proc_transaction(t, target_proc, NULL);
3786
/*
3787
* Let the caller know when async transaction reaches a frozen
3788
* process and is put in a pending queue, waiting for the target
3789
* process to be unfrozen.
3790
*/
3791
if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
3792
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3793
binder_netlink_report(proc, t, tr->data_size,
3794
return_error);
3795
}
3796
binder_enqueue_thread_work(thread, tcomplete);
3797
if (return_error &&
3798
return_error != BR_TRANSACTION_PENDING_FROZEN)
3799
goto err_dead_proc_or_thread;
3800
}
3801
if (target_thread)
3802
binder_thread_dec_tmpref(target_thread);
3803
binder_proc_dec_tmpref(target_proc);
3804
if (target_node)
3805
binder_dec_node_tmpref(target_node);
3806
/*
3807
* write barrier to synchronize with initialization
3808
* of log entry
3809
*/
3810
smp_wmb();
3811
WRITE_ONCE(e->debug_id_done, t_debug_id);
3812
return;
3813
3814
err_dead_proc_or_thread:
3815
binder_txn_error("%d:%d dead process or thread\n",
3816
thread->pid, proc->pid);
3817
return_error_line = __LINE__;
3818
binder_dequeue_work(proc, tcomplete);
3819
err_translate_failed:
3820
err_bad_object_type:
3821
err_bad_offset:
3822
err_bad_parent:
3823
err_copy_data_failed:
3824
binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3825
binder_free_txn_fixups(t);
3826
trace_binder_transaction_failed_buffer_release(t->buffer);
3827
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3828
buffer_offset, true);
3829
if (target_node)
3830
binder_dec_node_tmpref(target_node);
3831
target_node = NULL;
3832
t->buffer->transaction = NULL;
3833
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3834
err_binder_alloc_buf_failed:
3835
err_bad_extra_size:
3836
if (lsmctx.context)
3837
security_release_secctx(&lsmctx);
3838
err_get_secctx_failed:
3839
kfree(tcomplete);
3840
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3841
err_alloc_tcomplete_failed:
3842
if (trace_binder_txn_latency_free_enabled())
3843
binder_txn_latency_free(t);
3844
err_bad_todo_list:
3845
err_bad_call_stack:
3846
err_empty_call_stack:
3847
err_dead_binder:
3848
err_invalid_target_handle:
3849
if (target_node) {
3850
binder_dec_node(target_node, 1, 0);
3851
binder_dec_node_tmpref(target_node);
3852
}
3853
3854
binder_netlink_report(proc, t, tr->data_size, return_error);
3855
kfree(t);
3856
binder_stats_deleted(BINDER_STAT_TRANSACTION);
3857
err_alloc_t_failed:
3858
3859
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3860
"%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3861
proc->pid, thread->pid, reply ? "reply" :
3862
(tr->flags & TF_ONE_WAY ? "async" : "call"),
3863
target_proc ? target_proc->pid : 0,
3864
target_thread ? target_thread->pid : 0,
3865
t_debug_id, return_error, return_error_param,
3866
tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3867
return_error_line);
3868
3869
if (target_thread)
3870
binder_thread_dec_tmpref(target_thread);
3871
if (target_proc)
3872
binder_proc_dec_tmpref(target_proc);
3873
3874
{
3875
struct binder_transaction_log_entry *fe;
3876
3877
e->return_error = return_error;
3878
e->return_error_param = return_error_param;
3879
e->return_error_line = return_error_line;
3880
fe = binder_transaction_log_add(&binder_transaction_log_failed);
3881
*fe = *e;
3882
/*
3883
* write barrier to synchronize with initialization
3884
* of log entry
3885
*/
3886
smp_wmb();
3887
WRITE_ONCE(e->debug_id_done, t_debug_id);
3888
WRITE_ONCE(fe->debug_id_done, t_debug_id);
3889
}
3890
3891
BUG_ON(thread->return_error.cmd != BR_OK);
3892
if (in_reply_to) {
3893
binder_set_txn_from_error(in_reply_to, t_debug_id,
3894
return_error, return_error_param);
3895
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3896
binder_enqueue_thread_work(thread, &thread->return_error.work);
3897
binder_send_failed_reply(in_reply_to, return_error);
3898
} else {
3899
binder_inner_proc_lock(proc);
3900
binder_set_extended_error(&thread->ee, t_debug_id,
3901
return_error, return_error_param);
3902
binder_inner_proc_unlock(proc);
3903
thread->return_error.cmd = return_error;
3904
binder_enqueue_thread_work(thread, &thread->return_error.work);
3905
}
3906
}
3907
3908
static int
3909
binder_request_freeze_notification(struct binder_proc *proc,
3910
struct binder_thread *thread,
3911
struct binder_handle_cookie *handle_cookie)
3912
{
3913
struct binder_ref_freeze *freeze;
3914
struct binder_ref *ref;
3915
3916
freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3917
if (!freeze)
3918
return -ENOMEM;
3919
binder_proc_lock(proc);
3920
ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3921
if (!ref) {
3922
binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3923
proc->pid, thread->pid, handle_cookie->handle);
3924
binder_proc_unlock(proc);
3925
kfree(freeze);
3926
return -EINVAL;
3927
}
3928
3929
binder_node_lock(ref->node);
3930
if (ref->freeze) {
3931
binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3932
proc->pid, thread->pid);
3933
binder_node_unlock(ref->node);
3934
binder_proc_unlock(proc);
3935
kfree(freeze);
3936
return -EINVAL;
3937
}
3938
3939
binder_stats_created(BINDER_STAT_FREEZE);
3940
INIT_LIST_HEAD(&freeze->work.entry);
3941
freeze->cookie = handle_cookie->cookie;
3942
freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3943
ref->freeze = freeze;
3944
3945
if (ref->node->proc) {
3946
binder_inner_proc_lock(ref->node->proc);
3947
freeze->is_frozen = ref->node->proc->is_frozen;
3948
binder_inner_proc_unlock(ref->node->proc);
3949
3950
binder_inner_proc_lock(proc);
3951
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3952
binder_wakeup_proc_ilocked(proc);
3953
binder_inner_proc_unlock(proc);
3954
}
3955
3956
binder_node_unlock(ref->node);
3957
binder_proc_unlock(proc);
3958
return 0;
3959
}
3960
3961
static int
3962
binder_clear_freeze_notification(struct binder_proc *proc,
3963
struct binder_thread *thread,
3964
struct binder_handle_cookie *handle_cookie)
3965
{
3966
struct binder_ref_freeze *freeze;
3967
struct binder_ref *ref;
3968
3969
binder_proc_lock(proc);
3970
ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3971
if (!ref) {
3972
binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3973
proc->pid, thread->pid, handle_cookie->handle);
3974
binder_proc_unlock(proc);
3975
return -EINVAL;
3976
}
3977
3978
binder_node_lock(ref->node);
3979
3980
if (!ref->freeze) {
3981
binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3982
proc->pid, thread->pid);
3983
binder_node_unlock(ref->node);
3984
binder_proc_unlock(proc);
3985
return -EINVAL;
3986
}
3987
freeze = ref->freeze;
3988
binder_inner_proc_lock(proc);
3989
if (freeze->cookie != handle_cookie->cookie) {
3990
binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3991
proc->pid, thread->pid, (u64)freeze->cookie,
3992
(u64)handle_cookie->cookie);
3993
binder_inner_proc_unlock(proc);
3994
binder_node_unlock(ref->node);
3995
binder_proc_unlock(proc);
3996
return -EINVAL;
3997
}
3998
ref->freeze = NULL;
3999
/*
4000
* Take the existing freeze object and overwrite its work type. There are three cases here:
4001
* 1. No pending notification. In this case just add the work to the queue.
4002
* 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
4003
* should resend with the new work type.
4004
* 3. A notification is pending to be sent. Since the work is already in the queue, nothing
4005
* needs to be done here.
4006
*/
4007
freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
4008
if (list_empty(&freeze->work.entry)) {
4009
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4010
binder_wakeup_proc_ilocked(proc);
4011
} else if (freeze->sent) {
4012
freeze->resend = true;
4013
}
4014
binder_inner_proc_unlock(proc);
4015
binder_node_unlock(ref->node);
4016
binder_proc_unlock(proc);
4017
return 0;
4018
}
4019
4020
static int
4021
binder_freeze_notification_done(struct binder_proc *proc,
4022
struct binder_thread *thread,
4023
binder_uintptr_t cookie)
4024
{
4025
struct binder_ref_freeze *freeze = NULL;
4026
struct binder_work *w;
4027
4028
binder_inner_proc_lock(proc);
4029
list_for_each_entry(w, &proc->delivered_freeze, entry) {
4030
struct binder_ref_freeze *tmp_freeze =
4031
container_of(w, struct binder_ref_freeze, work);
4032
4033
if (tmp_freeze->cookie == cookie) {
4034
freeze = tmp_freeze;
4035
break;
4036
}
4037
}
4038
if (!freeze) {
4039
binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
4040
proc->pid, thread->pid, (u64)cookie);
4041
binder_inner_proc_unlock(proc);
4042
return -EINVAL;
4043
}
4044
binder_dequeue_work_ilocked(&freeze->work);
4045
freeze->sent = false;
4046
if (freeze->resend) {
4047
freeze->resend = false;
4048
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4049
binder_wakeup_proc_ilocked(proc);
4050
}
4051
binder_inner_proc_unlock(proc);
4052
return 0;
4053
}
4054
4055
/**
4056
* binder_free_buf() - free the specified buffer
4057
* @proc: binder proc that owns buffer
4058
* @thread: binder thread performing the buffer release
4059
* @buffer: buffer to be freed
4060
* @is_failure: failed to send transaction
4061
*
4062
* If the buffer is for an async transaction, enqueue the next async
4063
* transaction from the node.
4064
*
4065
* Cleanup the buffer and free it.
4066
*/
4067
static void
4068
binder_free_buf(struct binder_proc *proc,
4069
struct binder_thread *thread,
4070
struct binder_buffer *buffer, bool is_failure)
4071
{
4072
binder_inner_proc_lock(proc);
4073
if (buffer->transaction) {
4074
buffer->transaction->buffer = NULL;
4075
buffer->transaction = NULL;
4076
}
4077
binder_inner_proc_unlock(proc);
4078
if (buffer->async_transaction && buffer->target_node) {
4079
struct binder_node *buf_node;
4080
struct binder_work *w;
4081
4082
buf_node = buffer->target_node;
4083
binder_node_inner_lock(buf_node);
4084
BUG_ON(!buf_node->has_async_transaction);
4085
BUG_ON(buf_node->proc != proc);
4086
w = binder_dequeue_work_head_ilocked(
4087
&buf_node->async_todo);
4088
if (!w) {
4089
buf_node->has_async_transaction = false;
4090
} else {
4091
binder_enqueue_work_ilocked(
4092
w, &proc->todo);
4093
binder_wakeup_proc_ilocked(proc);
4094
}
4095
binder_node_inner_unlock(buf_node);
4096
}
4097
trace_binder_transaction_buffer_release(buffer);
4098
binder_release_entire_buffer(proc, thread, buffer, is_failure);
4099
binder_alloc_free_buf(&proc->alloc, buffer);
4100
}
4101
4102
static int binder_thread_write(struct binder_proc *proc,
4103
struct binder_thread *thread,
4104
binder_uintptr_t binder_buffer, size_t size,
4105
binder_size_t *consumed)
4106
{
4107
uint32_t cmd;
4108
struct binder_context *context = proc->context;
4109
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4110
void __user *ptr = buffer + *consumed;
4111
void __user *end = buffer + size;
4112
4113
while (ptr < end && thread->return_error.cmd == BR_OK) {
4114
int ret;
4115
4116
if (get_user(cmd, (uint32_t __user *)ptr))
4117
return -EFAULT;
4118
ptr += sizeof(uint32_t);
4119
trace_binder_command(cmd);
4120
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4121
atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4122
atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4123
atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4124
}
4125
switch (cmd) {
4126
case BC_INCREFS:
4127
case BC_ACQUIRE:
4128
case BC_RELEASE:
4129
case BC_DECREFS: {
4130
uint32_t target;
4131
const char *debug_string;
4132
bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4133
bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4134
struct binder_ref_data rdata;
4135
4136
if (get_user(target, (uint32_t __user *)ptr))
4137
return -EFAULT;
4138
4139
ptr += sizeof(uint32_t);
4140
ret = -1;
4141
if (increment && !target) {
4142
struct binder_node *ctx_mgr_node;
4143
4144
mutex_lock(&context->context_mgr_node_lock);
4145
ctx_mgr_node = context->binder_context_mgr_node;
4146
if (ctx_mgr_node) {
4147
if (ctx_mgr_node->proc == proc) {
4148
binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4149
proc->pid, thread->pid);
4150
mutex_unlock(&context->context_mgr_node_lock);
4151
return -EINVAL;
4152
}
4153
ret = binder_inc_ref_for_node(
4154
proc, ctx_mgr_node,
4155
strong, NULL, &rdata);
4156
}
4157
mutex_unlock(&context->context_mgr_node_lock);
4158
}
4159
if (ret)
4160
ret = binder_update_ref_for_handle(
4161
proc, target, increment, strong,
4162
&rdata);
4163
if (!ret && rdata.desc != target) {
4164
binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4165
proc->pid, thread->pid,
4166
target, rdata.desc);
4167
}
4168
switch (cmd) {
4169
case BC_INCREFS:
4170
debug_string = "IncRefs";
4171
break;
4172
case BC_ACQUIRE:
4173
debug_string = "Acquire";
4174
break;
4175
case BC_RELEASE:
4176
debug_string = "Release";
4177
break;
4178
case BC_DECREFS:
4179
default:
4180
debug_string = "DecRefs";
4181
break;
4182
}
4183
if (ret) {
4184
binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4185
proc->pid, thread->pid, debug_string,
4186
strong, target, ret);
4187
break;
4188
}
4189
binder_debug(BINDER_DEBUG_USER_REFS,
4190
"%d:%d %s ref %d desc %d s %d w %d\n",
4191
proc->pid, thread->pid, debug_string,
4192
rdata.debug_id, rdata.desc, rdata.strong,
4193
rdata.weak);
4194
break;
4195
}
4196
case BC_INCREFS_DONE:
4197
case BC_ACQUIRE_DONE: {
4198
binder_uintptr_t node_ptr;
4199
binder_uintptr_t cookie;
4200
struct binder_node *node;
4201
bool free_node;
4202
4203
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4204
return -EFAULT;
4205
ptr += sizeof(binder_uintptr_t);
4206
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4207
return -EFAULT;
4208
ptr += sizeof(binder_uintptr_t);
4209
node = binder_get_node(proc, node_ptr);
4210
if (node == NULL) {
4211
binder_user_error("%d:%d %s u%016llx no match\n",
4212
proc->pid, thread->pid,
4213
cmd == BC_INCREFS_DONE ?
4214
"BC_INCREFS_DONE" :
4215
"BC_ACQUIRE_DONE",
4216
(u64)node_ptr);
4217
break;
4218
}
4219
if (cookie != node->cookie) {
4220
binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4221
proc->pid, thread->pid,
4222
cmd == BC_INCREFS_DONE ?
4223
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4224
(u64)node_ptr, node->debug_id,
4225
(u64)cookie, (u64)node->cookie);
4226
binder_put_node(node);
4227
break;
4228
}
4229
binder_node_inner_lock(node);
4230
if (cmd == BC_ACQUIRE_DONE) {
4231
if (node->pending_strong_ref == 0) {
4232
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4233
proc->pid, thread->pid,
4234
node->debug_id);
4235
binder_node_inner_unlock(node);
4236
binder_put_node(node);
4237
break;
4238
}
4239
node->pending_strong_ref = 0;
4240
} else {
4241
if (node->pending_weak_ref == 0) {
4242
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4243
proc->pid, thread->pid,
4244
node->debug_id);
4245
binder_node_inner_unlock(node);
4246
binder_put_node(node);
4247
break;
4248
}
4249
node->pending_weak_ref = 0;
4250
}
4251
free_node = binder_dec_node_nilocked(node,
4252
cmd == BC_ACQUIRE_DONE, 0);
4253
WARN_ON(free_node);
4254
binder_debug(BINDER_DEBUG_USER_REFS,
4255
"%d:%d %s node %d ls %d lw %d tr %d\n",
4256
proc->pid, thread->pid,
4257
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4258
node->debug_id, node->local_strong_refs,
4259
node->local_weak_refs, node->tmp_refs);
4260
binder_node_inner_unlock(node);
4261
binder_put_node(node);
4262
break;
4263
}
4264
case BC_ATTEMPT_ACQUIRE:
4265
pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4266
return -EINVAL;
4267
case BC_ACQUIRE_RESULT:
4268
pr_err("BC_ACQUIRE_RESULT not supported\n");
4269
return -EINVAL;
4270
4271
case BC_FREE_BUFFER: {
4272
binder_uintptr_t data_ptr;
4273
struct binder_buffer *buffer;
4274
4275
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4276
return -EFAULT;
4277
ptr += sizeof(binder_uintptr_t);
4278
4279
buffer = binder_alloc_prepare_to_free(&proc->alloc,
4280
data_ptr);
4281
if (IS_ERR_OR_NULL(buffer)) {
4282
if (PTR_ERR(buffer) == -EPERM) {
4283
binder_user_error(
4284
"%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4285
proc->pid, thread->pid,
4286
(unsigned long)data_ptr - proc->alloc.vm_start);
4287
} else {
4288
binder_user_error(
4289
"%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4290
proc->pid, thread->pid,
4291
(unsigned long)data_ptr - proc->alloc.vm_start);
4292
}
4293
break;
4294
}
4295
binder_debug(BINDER_DEBUG_FREE_BUFFER,
4296
"%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4297
proc->pid, thread->pid,
4298
(unsigned long)data_ptr - proc->alloc.vm_start,
4299
buffer->debug_id,
4300
buffer->transaction ? "active" : "finished");
4301
binder_free_buf(proc, thread, buffer, false);
4302
break;
4303
}
4304
4305
case BC_TRANSACTION_SG:
4306
case BC_REPLY_SG: {
4307
struct binder_transaction_data_sg tr;
4308
4309
if (copy_from_user(&tr, ptr, sizeof(tr)))
4310
return -EFAULT;
4311
ptr += sizeof(tr);
4312
binder_transaction(proc, thread, &tr.transaction_data,
4313
cmd == BC_REPLY_SG, tr.buffers_size);
4314
break;
4315
}
4316
case BC_TRANSACTION:
4317
case BC_REPLY: {
4318
struct binder_transaction_data tr;
4319
4320
if (copy_from_user(&tr, ptr, sizeof(tr)))
4321
return -EFAULT;
4322
ptr += sizeof(tr);
4323
binder_transaction(proc, thread, &tr,
4324
cmd == BC_REPLY, 0);
4325
break;
4326
}
4327
4328
case BC_REGISTER_LOOPER:
4329
binder_debug(BINDER_DEBUG_THREADS,
4330
"%d:%d BC_REGISTER_LOOPER\n",
4331
proc->pid, thread->pid);
4332
binder_inner_proc_lock(proc);
4333
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4334
thread->looper |= BINDER_LOOPER_STATE_INVALID;
4335
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4336
proc->pid, thread->pid);
4337
} else if (proc->requested_threads == 0) {
4338
thread->looper |= BINDER_LOOPER_STATE_INVALID;
4339
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4340
proc->pid, thread->pid);
4341
} else {
4342
proc->requested_threads--;
4343
proc->requested_threads_started++;
4344
}
4345
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4346
binder_inner_proc_unlock(proc);
4347
break;
4348
case BC_ENTER_LOOPER:
4349
binder_debug(BINDER_DEBUG_THREADS,
4350
"%d:%d BC_ENTER_LOOPER\n",
4351
proc->pid, thread->pid);
4352
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4353
thread->looper |= BINDER_LOOPER_STATE_INVALID;
4354
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4355
proc->pid, thread->pid);
4356
}
4357
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4358
break;
4359
case BC_EXIT_LOOPER:
4360
binder_debug(BINDER_DEBUG_THREADS,
4361
"%d:%d BC_EXIT_LOOPER\n",
4362
proc->pid, thread->pid);
4363
thread->looper |= BINDER_LOOPER_STATE_EXITED;
4364
break;
4365
4366
case BC_REQUEST_DEATH_NOTIFICATION:
4367
case BC_CLEAR_DEATH_NOTIFICATION: {
4368
uint32_t target;
4369
binder_uintptr_t cookie;
4370
struct binder_ref *ref;
4371
struct binder_ref_death *death = NULL;
4372
4373
if (get_user(target, (uint32_t __user *)ptr))
4374
return -EFAULT;
4375
ptr += sizeof(uint32_t);
4376
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4377
return -EFAULT;
4378
ptr += sizeof(binder_uintptr_t);
4379
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4380
/*
4381
* Allocate memory for death notification
4382
* before taking lock
4383
*/
4384
death = kzalloc(sizeof(*death), GFP_KERNEL);
4385
if (death == NULL) {
4386
WARN_ON(thread->return_error.cmd !=
4387
BR_OK);
4388
thread->return_error.cmd = BR_ERROR;
4389
binder_enqueue_thread_work(
4390
thread,
4391
&thread->return_error.work);
4392
binder_debug(
4393
BINDER_DEBUG_FAILED_TRANSACTION,
4394
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4395
proc->pid, thread->pid);
4396
break;
4397
}
4398
}
4399
binder_proc_lock(proc);
4400
ref = binder_get_ref_olocked(proc, target, false);
4401
if (ref == NULL) {
4402
binder_user_error("%d:%d %s invalid ref %d\n",
4403
proc->pid, thread->pid,
4404
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4405
"BC_REQUEST_DEATH_NOTIFICATION" :
4406
"BC_CLEAR_DEATH_NOTIFICATION",
4407
target);
4408
binder_proc_unlock(proc);
4409
kfree(death);
4410
break;
4411
}
4412
4413
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4414
"%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4415
proc->pid, thread->pid,
4416
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4417
"BC_REQUEST_DEATH_NOTIFICATION" :
4418
"BC_CLEAR_DEATH_NOTIFICATION",
4419
(u64)cookie, ref->data.debug_id,
4420
ref->data.desc, ref->data.strong,
4421
ref->data.weak, ref->node->debug_id);
4422
4423
binder_node_lock(ref->node);
4424
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4425
if (ref->death) {
4426
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4427
proc->pid, thread->pid);
4428
binder_node_unlock(ref->node);
4429
binder_proc_unlock(proc);
4430
kfree(death);
4431
break;
4432
}
4433
binder_stats_created(BINDER_STAT_DEATH);
4434
INIT_LIST_HEAD(&death->work.entry);
4435
death->cookie = cookie;
4436
ref->death = death;
4437
if (ref->node->proc == NULL) {
4438
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4439
4440
binder_inner_proc_lock(proc);
4441
binder_enqueue_work_ilocked(
4442
&ref->death->work, &proc->todo);
4443
binder_wakeup_proc_ilocked(proc);
4444
binder_inner_proc_unlock(proc);
4445
}
4446
} else {
4447
if (ref->death == NULL) {
4448
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4449
proc->pid, thread->pid);
4450
binder_node_unlock(ref->node);
4451
binder_proc_unlock(proc);
4452
break;
4453
}
4454
death = ref->death;
4455
if (death->cookie != cookie) {
4456
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4457
proc->pid, thread->pid,
4458
(u64)death->cookie,
4459
(u64)cookie);
4460
binder_node_unlock(ref->node);
4461
binder_proc_unlock(proc);
4462
break;
4463
}
4464
ref->death = NULL;
4465
binder_inner_proc_lock(proc);
4466
if (list_empty(&death->work.entry)) {
4467
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4468
if (thread->looper &
4469
(BINDER_LOOPER_STATE_REGISTERED |
4470
BINDER_LOOPER_STATE_ENTERED))
4471
binder_enqueue_thread_work_ilocked(
4472
thread,
4473
&death->work);
4474
else {
4475
binder_enqueue_work_ilocked(
4476
&death->work,
4477
&proc->todo);
4478
binder_wakeup_proc_ilocked(
4479
proc);
4480
}
4481
} else {
4482
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4483
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4484
}
4485
binder_inner_proc_unlock(proc);
4486
}
4487
binder_node_unlock(ref->node);
4488
binder_proc_unlock(proc);
4489
} break;
4490
case BC_DEAD_BINDER_DONE: {
4491
struct binder_work *w;
4492
binder_uintptr_t cookie;
4493
struct binder_ref_death *death = NULL;
4494
4495
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4496
return -EFAULT;
4497
4498
ptr += sizeof(cookie);
4499
binder_inner_proc_lock(proc);
4500
list_for_each_entry(w, &proc->delivered_death,
4501
entry) {
4502
struct binder_ref_death *tmp_death =
4503
container_of(w,
4504
struct binder_ref_death,
4505
work);
4506
4507
if (tmp_death->cookie == cookie) {
4508
death = tmp_death;
4509
break;
4510
}
4511
}
4512
binder_debug(BINDER_DEBUG_DEAD_BINDER,
4513
"%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4514
proc->pid, thread->pid, (u64)cookie,
4515
death);
4516
if (death == NULL) {
4517
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4518
proc->pid, thread->pid, (u64)cookie);
4519
binder_inner_proc_unlock(proc);
4520
break;
4521
}
4522
binder_dequeue_work_ilocked(&death->work);
4523
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4524
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4525
if (thread->looper &
4526
(BINDER_LOOPER_STATE_REGISTERED |
4527
BINDER_LOOPER_STATE_ENTERED))
4528
binder_enqueue_thread_work_ilocked(
4529
thread, &death->work);
4530
else {
4531
binder_enqueue_work_ilocked(
4532
&death->work,
4533
&proc->todo);
4534
binder_wakeup_proc_ilocked(proc);
4535
}
4536
}
4537
binder_inner_proc_unlock(proc);
4538
} break;
4539
4540
case BC_REQUEST_FREEZE_NOTIFICATION: {
4541
struct binder_handle_cookie handle_cookie;
4542
int error;
4543
4544
if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4545
return -EFAULT;
4546
ptr += sizeof(handle_cookie);
4547
error = binder_request_freeze_notification(proc, thread,
4548
&handle_cookie);
4549
if (error)
4550
return error;
4551
} break;
4552
4553
case BC_CLEAR_FREEZE_NOTIFICATION: {
4554
struct binder_handle_cookie handle_cookie;
4555
int error;
4556
4557
if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4558
return -EFAULT;
4559
ptr += sizeof(handle_cookie);
4560
error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4561
if (error)
4562
return error;
4563
} break;
4564
4565
case BC_FREEZE_NOTIFICATION_DONE: {
4566
binder_uintptr_t cookie;
4567
int error;
4568
4569
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4570
return -EFAULT;
4571
4572
ptr += sizeof(cookie);
4573
error = binder_freeze_notification_done(proc, thread, cookie);
4574
if (error)
4575
return error;
4576
} break;
4577
4578
default:
4579
pr_err("%d:%d unknown command %u\n",
4580
proc->pid, thread->pid, cmd);
4581
return -EINVAL;
4582
}
4583
*consumed = ptr - buffer;
4584
}
4585
return 0;
4586
}
4587
4588
static void binder_stat_br(struct binder_proc *proc,
4589
struct binder_thread *thread, uint32_t cmd)
4590
{
4591
trace_binder_return(cmd);
4592
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4593
atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4594
atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4595
atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4596
}
4597
}
4598
4599
static int binder_put_node_cmd(struct binder_proc *proc,
4600
struct binder_thread *thread,
4601
void __user **ptrp,
4602
binder_uintptr_t node_ptr,
4603
binder_uintptr_t node_cookie,
4604
int node_debug_id,
4605
uint32_t cmd, const char *cmd_name)
4606
{
4607
void __user *ptr = *ptrp;
4608
4609
if (put_user(cmd, (uint32_t __user *)ptr))
4610
return -EFAULT;
4611
ptr += sizeof(uint32_t);
4612
4613
if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4614
return -EFAULT;
4615
ptr += sizeof(binder_uintptr_t);
4616
4617
if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4618
return -EFAULT;
4619
ptr += sizeof(binder_uintptr_t);
4620
4621
binder_stat_br(proc, thread, cmd);
4622
binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4623
proc->pid, thread->pid, cmd_name, node_debug_id,
4624
(u64)node_ptr, (u64)node_cookie);
4625
4626
*ptrp = ptr;
4627
return 0;
4628
}
4629
4630
static int binder_wait_for_work(struct binder_thread *thread,
4631
bool do_proc_work)
4632
{
4633
DEFINE_WAIT(wait);
4634
struct binder_proc *proc = thread->proc;
4635
int ret = 0;
4636
4637
binder_inner_proc_lock(proc);
4638
for (;;) {
4639
prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4640
if (binder_has_work_ilocked(thread, do_proc_work))
4641
break;
4642
if (do_proc_work)
4643
list_add(&thread->waiting_thread_node,
4644
&proc->waiting_threads);
4645
binder_inner_proc_unlock(proc);
4646
schedule();
4647
binder_inner_proc_lock(proc);
4648
list_del_init(&thread->waiting_thread_node);
4649
if (signal_pending(current)) {
4650
ret = -EINTR;
4651
break;
4652
}
4653
}
4654
finish_wait(&thread->wait, &wait);
4655
binder_inner_proc_unlock(proc);
4656
4657
return ret;
4658
}
4659
4660
/**
4661
* binder_apply_fd_fixups() - finish fd translation
4662
* @proc: binder_proc associated @t->buffer
4663
* @t: binder transaction with list of fd fixups
4664
*
4665
* Now that we are in the context of the transaction target
4666
* process, we can allocate and install fds. Process the
4667
* list of fds to translate and fixup the buffer with the
4668
* new fds first and only then install the files.
4669
*
4670
* If we fail to allocate an fd, skip the install and release
4671
* any fds that have already been allocated.
4672
*
4673
* Return: 0 on success, a negative errno code on failure.
4674
*/
4675
static int binder_apply_fd_fixups(struct binder_proc *proc,
4676
struct binder_transaction *t)
4677
{
4678
struct binder_txn_fd_fixup *fixup, *tmp;
4679
int ret = 0;
4680
4681
list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4682
int fd = get_unused_fd_flags(O_CLOEXEC);
4683
4684
if (fd < 0) {
4685
binder_debug(BINDER_DEBUG_TRANSACTION,
4686
"failed fd fixup txn %d fd %d\n",
4687
t->debug_id, fd);
4688
ret = -ENOMEM;
4689
goto err;
4690
}
4691
binder_debug(BINDER_DEBUG_TRANSACTION,
4692
"fd fixup txn %d fd %d\n",
4693
t->debug_id, fd);
4694
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4695
fixup->target_fd = fd;
4696
if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4697
fixup->offset, &fd,
4698
sizeof(u32))) {
4699
ret = -EINVAL;
4700
goto err;
4701
}
4702
}
4703
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4704
fd_install(fixup->target_fd, fixup->file);
4705
list_del(&fixup->fixup_entry);
4706
kfree(fixup);
4707
}
4708
4709
return ret;
4710
4711
err:
4712
binder_free_txn_fixups(t);
4713
return ret;
4714
}
4715
4716
static int binder_thread_read(struct binder_proc *proc,
4717
struct binder_thread *thread,
4718
binder_uintptr_t binder_buffer, size_t size,
4719
binder_size_t *consumed, int non_block)
4720
{
4721
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4722
void __user *ptr = buffer + *consumed;
4723
void __user *end = buffer + size;
4724
4725
int ret = 0;
4726
int wait_for_proc_work;
4727
4728
if (*consumed == 0) {
4729
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4730
return -EFAULT;
4731
ptr += sizeof(uint32_t);
4732
}
4733
4734
retry:
4735
binder_inner_proc_lock(proc);
4736
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4737
binder_inner_proc_unlock(proc);
4738
4739
thread->looper |= BINDER_LOOPER_STATE_WAITING;
4740
4741
trace_binder_wait_for_work(wait_for_proc_work,
4742
!!thread->transaction_stack,
4743
!binder_worklist_empty(proc, &thread->todo));
4744
if (wait_for_proc_work) {
4745
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4746
BINDER_LOOPER_STATE_ENTERED))) {
4747
binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4748
proc->pid, thread->pid, thread->looper);
4749
wait_event_interruptible(binder_user_error_wait,
4750
binder_stop_on_user_error < 2);
4751
}
4752
binder_set_nice(proc->default_priority);
4753
}
4754
4755
if (non_block) {
4756
if (!binder_has_work(thread, wait_for_proc_work))
4757
ret = -EAGAIN;
4758
} else {
4759
ret = binder_wait_for_work(thread, wait_for_proc_work);
4760
}
4761
4762
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4763
4764
if (ret)
4765
return ret;
4766
4767
while (1) {
4768
uint32_t cmd;
4769
struct binder_transaction_data_secctx tr;
4770
struct binder_transaction_data *trd = &tr.transaction_data;
4771
struct binder_work *w = NULL;
4772
struct list_head *list = NULL;
4773
struct binder_transaction *t = NULL;
4774
struct binder_thread *t_from;
4775
size_t trsize = sizeof(*trd);
4776
4777
binder_inner_proc_lock(proc);
4778
if (!binder_worklist_empty_ilocked(&thread->todo))
4779
list = &thread->todo;
4780
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4781
wait_for_proc_work)
4782
list = &proc->todo;
4783
else {
4784
binder_inner_proc_unlock(proc);
4785
4786
/* no data added */
4787
if (ptr - buffer == 4 && !thread->looper_need_return)
4788
goto retry;
4789
break;
4790
}
4791
4792
if (end - ptr < sizeof(tr) + 4) {
4793
binder_inner_proc_unlock(proc);
4794
break;
4795
}
4796
w = binder_dequeue_work_head_ilocked(list);
4797
if (binder_worklist_empty_ilocked(&thread->todo))
4798
thread->process_todo = false;
4799
4800
switch (w->type) {
4801
case BINDER_WORK_TRANSACTION: {
4802
binder_inner_proc_unlock(proc);
4803
t = container_of(w, struct binder_transaction, work);
4804
} break;
4805
case BINDER_WORK_RETURN_ERROR: {
4806
struct binder_error *e = container_of(
4807
w, struct binder_error, work);
4808
4809
WARN_ON(e->cmd == BR_OK);
4810
binder_inner_proc_unlock(proc);
4811
if (put_user(e->cmd, (uint32_t __user *)ptr))
4812
return -EFAULT;
4813
cmd = e->cmd;
4814
e->cmd = BR_OK;
4815
ptr += sizeof(uint32_t);
4816
4817
binder_stat_br(proc, thread, cmd);
4818
} break;
4819
case BINDER_WORK_TRANSACTION_COMPLETE:
4820
case BINDER_WORK_TRANSACTION_PENDING:
4821
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4822
if (proc->oneway_spam_detection_enabled &&
4823
w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4824
cmd = BR_ONEWAY_SPAM_SUSPECT;
4825
else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4826
cmd = BR_TRANSACTION_PENDING_FROZEN;
4827
else
4828
cmd = BR_TRANSACTION_COMPLETE;
4829
binder_inner_proc_unlock(proc);
4830
kfree(w);
4831
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4832
if (put_user(cmd, (uint32_t __user *)ptr))
4833
return -EFAULT;
4834
ptr += sizeof(uint32_t);
4835
4836
binder_stat_br(proc, thread, cmd);
4837
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4838
"%d:%d BR_TRANSACTION_COMPLETE\n",
4839
proc->pid, thread->pid);
4840
} break;
4841
case BINDER_WORK_NODE: {
4842
struct binder_node *node = container_of(w, struct binder_node, work);
4843
int strong, weak;
4844
binder_uintptr_t node_ptr = node->ptr;
4845
binder_uintptr_t node_cookie = node->cookie;
4846
int node_debug_id = node->debug_id;
4847
int has_weak_ref;
4848
int has_strong_ref;
4849
void __user *orig_ptr = ptr;
4850
4851
BUG_ON(proc != node->proc);
4852
strong = node->internal_strong_refs ||
4853
node->local_strong_refs;
4854
weak = !hlist_empty(&node->refs) ||
4855
node->local_weak_refs ||
4856
node->tmp_refs || strong;
4857
has_strong_ref = node->has_strong_ref;
4858
has_weak_ref = node->has_weak_ref;
4859
4860
if (weak && !has_weak_ref) {
4861
node->has_weak_ref = 1;
4862
node->pending_weak_ref = 1;
4863
node->local_weak_refs++;
4864
}
4865
if (strong && !has_strong_ref) {
4866
node->has_strong_ref = 1;
4867
node->pending_strong_ref = 1;
4868
node->local_strong_refs++;
4869
}
4870
if (!strong && has_strong_ref)
4871
node->has_strong_ref = 0;
4872
if (!weak && has_weak_ref)
4873
node->has_weak_ref = 0;
4874
if (!weak && !strong) {
4875
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4876
"%d:%d node %d u%016llx c%016llx deleted\n",
4877
proc->pid, thread->pid,
4878
node_debug_id,
4879
(u64)node_ptr,
4880
(u64)node_cookie);
4881
rb_erase(&node->rb_node, &proc->nodes);
4882
binder_inner_proc_unlock(proc);
4883
binder_node_lock(node);
4884
/*
4885
* Acquire the node lock before freeing the
4886
* node to serialize with other threads that
4887
* may have been holding the node lock while
4888
* decrementing this node (avoids race where
4889
* this thread frees while the other thread
4890
* is unlocking the node after the final
4891
* decrement)
4892
*/
4893
binder_node_unlock(node);
4894
binder_free_node(node);
4895
} else
4896
binder_inner_proc_unlock(proc);
4897
4898
if (weak && !has_weak_ref)
4899
ret = binder_put_node_cmd(
4900
proc, thread, &ptr, node_ptr,
4901
node_cookie, node_debug_id,
4902
BR_INCREFS, "BR_INCREFS");
4903
if (!ret && strong && !has_strong_ref)
4904
ret = binder_put_node_cmd(
4905
proc, thread, &ptr, node_ptr,
4906
node_cookie, node_debug_id,
4907
BR_ACQUIRE, "BR_ACQUIRE");
4908
if (!ret && !strong && has_strong_ref)
4909
ret = binder_put_node_cmd(
4910
proc, thread, &ptr, node_ptr,
4911
node_cookie, node_debug_id,
4912
BR_RELEASE, "BR_RELEASE");
4913
if (!ret && !weak && has_weak_ref)
4914
ret = binder_put_node_cmd(
4915
proc, thread, &ptr, node_ptr,
4916
node_cookie, node_debug_id,
4917
BR_DECREFS, "BR_DECREFS");
4918
if (orig_ptr == ptr)
4919
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4920
"%d:%d node %d u%016llx c%016llx state unchanged\n",
4921
proc->pid, thread->pid,
4922
node_debug_id,
4923
(u64)node_ptr,
4924
(u64)node_cookie);
4925
if (ret)
4926
return ret;
4927
} break;
4928
case BINDER_WORK_DEAD_BINDER:
4929
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4930
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4931
struct binder_ref_death *death;
4932
uint32_t cmd;
4933
binder_uintptr_t cookie;
4934
4935
death = container_of(w, struct binder_ref_death, work);
4936
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4937
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4938
else
4939
cmd = BR_DEAD_BINDER;
4940
cookie = death->cookie;
4941
4942
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4943
"%d:%d %s %016llx\n",
4944
proc->pid, thread->pid,
4945
cmd == BR_DEAD_BINDER ?
4946
"BR_DEAD_BINDER" :
4947
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
4948
(u64)cookie);
4949
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4950
binder_inner_proc_unlock(proc);
4951
kfree(death);
4952
binder_stats_deleted(BINDER_STAT_DEATH);
4953
} else {
4954
binder_enqueue_work_ilocked(
4955
w, &proc->delivered_death);
4956
binder_inner_proc_unlock(proc);
4957
}
4958
if (put_user(cmd, (uint32_t __user *)ptr))
4959
return -EFAULT;
4960
ptr += sizeof(uint32_t);
4961
if (put_user(cookie,
4962
(binder_uintptr_t __user *)ptr))
4963
return -EFAULT;
4964
ptr += sizeof(binder_uintptr_t);
4965
binder_stat_br(proc, thread, cmd);
4966
if (cmd == BR_DEAD_BINDER)
4967
goto done; /* DEAD_BINDER notifications can cause transactions */
4968
} break;
4969
4970
case BINDER_WORK_FROZEN_BINDER: {
4971
struct binder_ref_freeze *freeze;
4972
struct binder_frozen_state_info info;
4973
4974
memset(&info, 0, sizeof(info));
4975
freeze = container_of(w, struct binder_ref_freeze, work);
4976
info.is_frozen = freeze->is_frozen;
4977
info.cookie = freeze->cookie;
4978
freeze->sent = true;
4979
binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4980
binder_inner_proc_unlock(proc);
4981
4982
if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4983
return -EFAULT;
4984
ptr += sizeof(uint32_t);
4985
if (copy_to_user(ptr, &info, sizeof(info)))
4986
return -EFAULT;
4987
ptr += sizeof(info);
4988
binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4989
goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4990
} break;
4991
4992
case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4993
struct binder_ref_freeze *freeze =
4994
container_of(w, struct binder_ref_freeze, work);
4995
binder_uintptr_t cookie = freeze->cookie;
4996
4997
binder_inner_proc_unlock(proc);
4998
kfree(freeze);
4999
binder_stats_deleted(BINDER_STAT_FREEZE);
5000
if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
5001
return -EFAULT;
5002
ptr += sizeof(uint32_t);
5003
if (put_user(cookie, (binder_uintptr_t __user *)ptr))
5004
return -EFAULT;
5005
ptr += sizeof(binder_uintptr_t);
5006
binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
5007
} break;
5008
5009
default:
5010
binder_inner_proc_unlock(proc);
5011
pr_err("%d:%d: bad work type %d\n",
5012
proc->pid, thread->pid, w->type);
5013
break;
5014
}
5015
5016
if (!t)
5017
continue;
5018
5019
BUG_ON(t->buffer == NULL);
5020
if (t->buffer->target_node) {
5021
struct binder_node *target_node = t->buffer->target_node;
5022
5023
trd->target.ptr = target_node->ptr;
5024
trd->cookie = target_node->cookie;
5025
t->saved_priority = task_nice(current);
5026
if (t->priority < target_node->min_priority &&
5027
!(t->flags & TF_ONE_WAY))
5028
binder_set_nice(t->priority);
5029
else if (!(t->flags & TF_ONE_WAY) ||
5030
t->saved_priority > target_node->min_priority)
5031
binder_set_nice(target_node->min_priority);
5032
cmd = BR_TRANSACTION;
5033
} else {
5034
trd->target.ptr = 0;
5035
trd->cookie = 0;
5036
cmd = BR_REPLY;
5037
}
5038
trd->code = t->code;
5039
trd->flags = t->flags;
5040
trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
5041
5042
t_from = binder_get_txn_from(t);
5043
if (t_from) {
5044
struct task_struct *sender = t_from->proc->tsk;
5045
5046
trd->sender_pid =
5047
task_tgid_nr_ns(sender,
5048
task_active_pid_ns(current));
5049
} else {
5050
trd->sender_pid = 0;
5051
}
5052
5053
ret = binder_apply_fd_fixups(proc, t);
5054
if (ret) {
5055
struct binder_buffer *buffer = t->buffer;
5056
bool oneway = !!(t->flags & TF_ONE_WAY);
5057
int tid = t->debug_id;
5058
5059
if (t_from)
5060
binder_thread_dec_tmpref(t_from);
5061
buffer->transaction = NULL;
5062
binder_cleanup_transaction(t, "fd fixups failed",
5063
BR_FAILED_REPLY);
5064
binder_free_buf(proc, thread, buffer, true);
5065
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5066
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5067
proc->pid, thread->pid,
5068
oneway ? "async " :
5069
(cmd == BR_REPLY ? "reply " : ""),
5070
tid, BR_FAILED_REPLY, ret, __LINE__);
5071
if (cmd == BR_REPLY) {
5072
cmd = BR_FAILED_REPLY;
5073
if (put_user(cmd, (uint32_t __user *)ptr))
5074
return -EFAULT;
5075
ptr += sizeof(uint32_t);
5076
binder_stat_br(proc, thread, cmd);
5077
break;
5078
}
5079
continue;
5080
}
5081
trd->data_size = t->buffer->data_size;
5082
trd->offsets_size = t->buffer->offsets_size;
5083
trd->data.ptr.buffer = t->buffer->user_data;
5084
trd->data.ptr.offsets = trd->data.ptr.buffer +
5085
ALIGN(t->buffer->data_size,
5086
sizeof(void *));
5087
5088
tr.secctx = t->security_ctx;
5089
if (t->security_ctx) {
5090
cmd = BR_TRANSACTION_SEC_CTX;
5091
trsize = sizeof(tr);
5092
}
5093
if (put_user(cmd, (uint32_t __user *)ptr)) {
5094
if (t_from)
5095
binder_thread_dec_tmpref(t_from);
5096
5097
binder_cleanup_transaction(t, "put_user failed",
5098
BR_FAILED_REPLY);
5099
5100
return -EFAULT;
5101
}
5102
ptr += sizeof(uint32_t);
5103
if (copy_to_user(ptr, &tr, trsize)) {
5104
if (t_from)
5105
binder_thread_dec_tmpref(t_from);
5106
5107
binder_cleanup_transaction(t, "copy_to_user failed",
5108
BR_FAILED_REPLY);
5109
5110
return -EFAULT;
5111
}
5112
ptr += trsize;
5113
5114
trace_binder_transaction_received(t);
5115
binder_stat_br(proc, thread, cmd);
5116
binder_debug(BINDER_DEBUG_TRANSACTION,
5117
"%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5118
proc->pid, thread->pid,
5119
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5120
(cmd == BR_TRANSACTION_SEC_CTX) ?
5121
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5122
t->debug_id, t_from ? t_from->proc->pid : 0,
5123
t_from ? t_from->pid : 0, cmd,
5124
t->buffer->data_size, t->buffer->offsets_size);
5125
5126
if (t_from)
5127
binder_thread_dec_tmpref(t_from);
5128
t->buffer->allow_user_free = 1;
5129
if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5130
binder_inner_proc_lock(thread->proc);
5131
t->to_parent = thread->transaction_stack;
5132
t->to_thread = thread;
5133
thread->transaction_stack = t;
5134
binder_inner_proc_unlock(thread->proc);
5135
} else {
5136
binder_free_transaction(t);
5137
}
5138
break;
5139
}
5140
5141
done:
5142
5143
*consumed = ptr - buffer;
5144
binder_inner_proc_lock(proc);
5145
if (proc->requested_threads == 0 &&
5146
list_empty(&thread->proc->waiting_threads) &&
5147
proc->requested_threads_started < proc->max_threads &&
5148
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5149
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5150
/*spawn a new thread if we leave this out */) {
5151
proc->requested_threads++;
5152
binder_inner_proc_unlock(proc);
5153
binder_debug(BINDER_DEBUG_THREADS,
5154
"%d:%d BR_SPAWN_LOOPER\n",
5155
proc->pid, thread->pid);
5156
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5157
return -EFAULT;
5158
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5159
} else
5160
binder_inner_proc_unlock(proc);
5161
return 0;
5162
}
5163
5164
static void binder_release_work(struct binder_proc *proc,
5165
struct list_head *list)
5166
{
5167
struct binder_work *w;
5168
enum binder_work_type wtype;
5169
5170
while (1) {
5171
binder_inner_proc_lock(proc);
5172
w = binder_dequeue_work_head_ilocked(list);
5173
wtype = w ? w->type : 0;
5174
binder_inner_proc_unlock(proc);
5175
if (!w)
5176
return;
5177
5178
switch (wtype) {
5179
case BINDER_WORK_TRANSACTION: {
5180
struct binder_transaction *t;
5181
5182
t = container_of(w, struct binder_transaction, work);
5183
5184
binder_cleanup_transaction(t, "process died.",
5185
BR_DEAD_REPLY);
5186
} break;
5187
case BINDER_WORK_RETURN_ERROR: {
5188
struct binder_error *e = container_of(
5189
w, struct binder_error, work);
5190
5191
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5192
"undelivered TRANSACTION_ERROR: %u\n",
5193
e->cmd);
5194
} break;
5195
case BINDER_WORK_TRANSACTION_PENDING:
5196
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5197
case BINDER_WORK_TRANSACTION_COMPLETE: {
5198
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5199
"undelivered TRANSACTION_COMPLETE\n");
5200
kfree(w);
5201
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5202
} break;
5203
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5204
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5205
struct binder_ref_death *death;
5206
5207
death = container_of(w, struct binder_ref_death, work);
5208
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5209
"undelivered death notification, %016llx\n",
5210
(u64)death->cookie);
5211
kfree(death);
5212
binder_stats_deleted(BINDER_STAT_DEATH);
5213
} break;
5214
case BINDER_WORK_NODE:
5215
break;
5216
case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5217
struct binder_ref_freeze *freeze;
5218
5219
freeze = container_of(w, struct binder_ref_freeze, work);
5220
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5221
"undelivered freeze notification, %016llx\n",
5222
(u64)freeze->cookie);
5223
kfree(freeze);
5224
binder_stats_deleted(BINDER_STAT_FREEZE);
5225
} break;
5226
default:
5227
pr_err("unexpected work type, %d, not freed\n",
5228
wtype);
5229
break;
5230
}
5231
}
5232
5233
}
5234
5235
static struct binder_thread *binder_get_thread_ilocked(
5236
struct binder_proc *proc, struct binder_thread *new_thread)
5237
{
5238
struct binder_thread *thread = NULL;
5239
struct rb_node *parent = NULL;
5240
struct rb_node **p = &proc->threads.rb_node;
5241
5242
while (*p) {
5243
parent = *p;
5244
thread = rb_entry(parent, struct binder_thread, rb_node);
5245
5246
if (current->pid < thread->pid)
5247
p = &(*p)->rb_left;
5248
else if (current->pid > thread->pid)
5249
p = &(*p)->rb_right;
5250
else
5251
return thread;
5252
}
5253
if (!new_thread)
5254
return NULL;
5255
thread = new_thread;
5256
binder_stats_created(BINDER_STAT_THREAD);
5257
thread->proc = proc;
5258
thread->pid = current->pid;
5259
atomic_set(&thread->tmp_ref, 0);
5260
init_waitqueue_head(&thread->wait);
5261
INIT_LIST_HEAD(&thread->todo);
5262
rb_link_node(&thread->rb_node, parent, p);
5263
rb_insert_color(&thread->rb_node, &proc->threads);
5264
thread->looper_need_return = true;
5265
thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5266
thread->return_error.cmd = BR_OK;
5267
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5268
thread->reply_error.cmd = BR_OK;
5269
thread->ee.command = BR_OK;
5270
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5271
return thread;
5272
}
5273
5274
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5275
{
5276
struct binder_thread *thread;
5277
struct binder_thread *new_thread;
5278
5279
binder_inner_proc_lock(proc);
5280
thread = binder_get_thread_ilocked(proc, NULL);
5281
binder_inner_proc_unlock(proc);
5282
if (!thread) {
5283
new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5284
if (new_thread == NULL)
5285
return NULL;
5286
binder_inner_proc_lock(proc);
5287
thread = binder_get_thread_ilocked(proc, new_thread);
5288
binder_inner_proc_unlock(proc);
5289
if (thread != new_thread)
5290
kfree(new_thread);
5291
}
5292
return thread;
5293
}
5294
5295
static void binder_free_proc(struct binder_proc *proc)
5296
{
5297
struct binder_device *device;
5298
5299
BUG_ON(!list_empty(&proc->todo));
5300
BUG_ON(!list_empty(&proc->delivered_death));
5301
if (proc->outstanding_txns)
5302
pr_warn("%s: Unexpected outstanding_txns %d\n",
5303
__func__, proc->outstanding_txns);
5304
device = container_of(proc->context, struct binder_device, context);
5305
if (refcount_dec_and_test(&device->ref)) {
5306
binder_remove_device(device);
5307
kfree(proc->context->name);
5308
kfree(device);
5309
}
5310
binder_alloc_deferred_release(&proc->alloc);
5311
put_task_struct(proc->tsk);
5312
put_cred(proc->cred);
5313
binder_stats_deleted(BINDER_STAT_PROC);
5314
dbitmap_free(&proc->dmap);
5315
kfree(proc);
5316
}
5317
5318
static void binder_free_thread(struct binder_thread *thread)
5319
{
5320
BUG_ON(!list_empty(&thread->todo));
5321
binder_stats_deleted(BINDER_STAT_THREAD);
5322
binder_proc_dec_tmpref(thread->proc);
5323
kfree(thread);
5324
}
5325
5326
static int binder_thread_release(struct binder_proc *proc,
5327
struct binder_thread *thread)
5328
{
5329
struct binder_transaction *t;
5330
struct binder_transaction *send_reply = NULL;
5331
int active_transactions = 0;
5332
struct binder_transaction *last_t = NULL;
5333
5334
binder_inner_proc_lock(thread->proc);
5335
/*
5336
* take a ref on the proc so it survives
5337
* after we remove this thread from proc->threads.
5338
* The corresponding dec is when we actually
5339
* free the thread in binder_free_thread()
5340
*/
5341
proc->tmp_ref++;
5342
/*
5343
* take a ref on this thread to ensure it
5344
* survives while we are releasing it
5345
*/
5346
atomic_inc(&thread->tmp_ref);
5347
rb_erase(&thread->rb_node, &proc->threads);
5348
t = thread->transaction_stack;
5349
if (t) {
5350
spin_lock(&t->lock);
5351
if (t->to_thread == thread)
5352
send_reply = t;
5353
} else {
5354
__acquire(&t->lock);
5355
}
5356
thread->is_dead = true;
5357
5358
while (t) {
5359
last_t = t;
5360
active_transactions++;
5361
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5362
"release %d:%d transaction %d %s, still active\n",
5363
proc->pid, thread->pid,
5364
t->debug_id,
5365
(t->to_thread == thread) ? "in" : "out");
5366
5367
if (t->to_thread == thread) {
5368
thread->proc->outstanding_txns--;
5369
t->to_proc = NULL;
5370
t->to_thread = NULL;
5371
if (t->buffer) {
5372
t->buffer->transaction = NULL;
5373
t->buffer = NULL;
5374
}
5375
t = t->to_parent;
5376
} else if (t->from == thread) {
5377
t->from = NULL;
5378
t = t->from_parent;
5379
} else
5380
BUG();
5381
spin_unlock(&last_t->lock);
5382
if (t)
5383
spin_lock(&t->lock);
5384
else
5385
__acquire(&t->lock);
5386
}
5387
/* annotation for sparse, lock not acquired in last iteration above */
5388
__release(&t->lock);
5389
5390
/*
5391
* If this thread used poll, make sure we remove the waitqueue from any
5392
* poll data structures holding it.
5393
*/
5394
if (thread->looper & BINDER_LOOPER_STATE_POLL)
5395
wake_up_pollfree(&thread->wait);
5396
5397
binder_inner_proc_unlock(thread->proc);
5398
5399
/*
5400
* This is needed to avoid races between wake_up_pollfree() above and
5401
* someone else removing the last entry from the queue for other reasons
5402
* (e.g. ep_remove_wait_queue() being called due to an epoll file
5403
* descriptor being closed). Such other users hold an RCU read lock, so
5404
* we can be sure they're done after we call synchronize_rcu().
5405
*/
5406
if (thread->looper & BINDER_LOOPER_STATE_POLL)
5407
synchronize_rcu();
5408
5409
if (send_reply)
5410
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5411
binder_release_work(proc, &thread->todo);
5412
binder_thread_dec_tmpref(thread);
5413
return active_transactions;
5414
}
5415
5416
static __poll_t binder_poll(struct file *filp,
5417
struct poll_table_struct *wait)
5418
{
5419
struct binder_proc *proc = filp->private_data;
5420
struct binder_thread *thread = NULL;
5421
bool wait_for_proc_work;
5422
5423
thread = binder_get_thread(proc);
5424
if (!thread)
5425
return EPOLLERR;
5426
5427
binder_inner_proc_lock(thread->proc);
5428
thread->looper |= BINDER_LOOPER_STATE_POLL;
5429
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5430
5431
binder_inner_proc_unlock(thread->proc);
5432
5433
poll_wait(filp, &thread->wait, wait);
5434
5435
if (binder_has_work(thread, wait_for_proc_work))
5436
return EPOLLIN;
5437
5438
return 0;
5439
}
5440
5441
static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5442
struct binder_thread *thread)
5443
{
5444
int ret = 0;
5445
struct binder_proc *proc = filp->private_data;
5446
void __user *ubuf = (void __user *)arg;
5447
struct binder_write_read bwr;
5448
5449
if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
5450
return -EFAULT;
5451
5452
binder_debug(BINDER_DEBUG_READ_WRITE,
5453
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
5454
proc->pid, thread->pid,
5455
(u64)bwr.write_size, (u64)bwr.write_buffer,
5456
(u64)bwr.read_size, (u64)bwr.read_buffer);
5457
5458
if (bwr.write_size > 0) {
5459
ret = binder_thread_write(proc, thread,
5460
bwr.write_buffer,
5461
bwr.write_size,
5462
&bwr.write_consumed);
5463
trace_binder_write_done(ret);
5464
if (ret < 0) {
5465
bwr.read_consumed = 0;
5466
goto out;
5467
}
5468
}
5469
if (bwr.read_size > 0) {
5470
ret = binder_thread_read(proc, thread, bwr.read_buffer,
5471
bwr.read_size,
5472
&bwr.read_consumed,
5473
filp->f_flags & O_NONBLOCK);
5474
trace_binder_read_done(ret);
5475
binder_inner_proc_lock(proc);
5476
if (!binder_worklist_empty_ilocked(&proc->todo))
5477
binder_wakeup_proc_ilocked(proc);
5478
binder_inner_proc_unlock(proc);
5479
if (ret < 0)
5480
goto out;
5481
}
5482
binder_debug(BINDER_DEBUG_READ_WRITE,
5483
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5484
proc->pid, thread->pid,
5485
(u64)bwr.write_consumed, (u64)bwr.write_size,
5486
(u64)bwr.read_consumed, (u64)bwr.read_size);
5487
out:
5488
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5489
ret = -EFAULT;
5490
return ret;
5491
}
5492
5493
static int binder_ioctl_set_ctx_mgr(struct file *filp,
5494
struct flat_binder_object *fbo)
5495
{
5496
int ret = 0;
5497
struct binder_proc *proc = filp->private_data;
5498
struct binder_context *context = proc->context;
5499
struct binder_node *new_node;
5500
kuid_t curr_euid = current_euid();
5501
5502
guard(mutex)(&context->context_mgr_node_lock);
5503
if (context->binder_context_mgr_node) {
5504
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5505
return -EBUSY;
5506
}
5507
ret = security_binder_set_context_mgr(proc->cred);
5508
if (ret < 0)
5509
return ret;
5510
if (uid_valid(context->binder_context_mgr_uid)) {
5511
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5512
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5513
from_kuid(&init_user_ns, curr_euid),
5514
from_kuid(&init_user_ns,
5515
context->binder_context_mgr_uid));
5516
return -EPERM;
5517
}
5518
} else {
5519
context->binder_context_mgr_uid = curr_euid;
5520
}
5521
new_node = binder_new_node(proc, fbo);
5522
if (!new_node)
5523
return -ENOMEM;
5524
binder_node_lock(new_node);
5525
new_node->local_weak_refs++;
5526
new_node->local_strong_refs++;
5527
new_node->has_strong_ref = 1;
5528
new_node->has_weak_ref = 1;
5529
context->binder_context_mgr_node = new_node;
5530
binder_node_unlock(new_node);
5531
binder_put_node(new_node);
5532
return ret;
5533
}
5534
5535
static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5536
struct binder_node_info_for_ref *info)
5537
{
5538
struct binder_node *node;
5539
struct binder_context *context = proc->context;
5540
__u32 handle = info->handle;
5541
5542
if (info->strong_count || info->weak_count || info->reserved1 ||
5543
info->reserved2 || info->reserved3) {
5544
binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5545
proc->pid);
5546
return -EINVAL;
5547
}
5548
5549
/* This ioctl may only be used by the context manager */
5550
mutex_lock(&context->context_mgr_node_lock);
5551
if (!context->binder_context_mgr_node ||
5552
context->binder_context_mgr_node->proc != proc) {
5553
mutex_unlock(&context->context_mgr_node_lock);
5554
return -EPERM;
5555
}
5556
mutex_unlock(&context->context_mgr_node_lock);
5557
5558
node = binder_get_node_from_ref(proc, handle, true, NULL);
5559
if (!node)
5560
return -EINVAL;
5561
5562
info->strong_count = node->local_strong_refs +
5563
node->internal_strong_refs;
5564
info->weak_count = node->local_weak_refs;
5565
5566
binder_put_node(node);
5567
5568
return 0;
5569
}
5570
5571
static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5572
struct binder_node_debug_info *info)
5573
{
5574
struct rb_node *n;
5575
binder_uintptr_t ptr = info->ptr;
5576
5577
memset(info, 0, sizeof(*info));
5578
5579
binder_inner_proc_lock(proc);
5580
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5581
struct binder_node *node = rb_entry(n, struct binder_node,
5582
rb_node);
5583
if (node->ptr > ptr) {
5584
info->ptr = node->ptr;
5585
info->cookie = node->cookie;
5586
info->has_strong_ref = node->has_strong_ref;
5587
info->has_weak_ref = node->has_weak_ref;
5588
break;
5589
}
5590
}
5591
binder_inner_proc_unlock(proc);
5592
5593
return 0;
5594
}
5595
5596
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5597
{
5598
struct rb_node *n;
5599
struct binder_thread *thread;
5600
5601
if (proc->outstanding_txns > 0)
5602
return true;
5603
5604
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5605
thread = rb_entry(n, struct binder_thread, rb_node);
5606
if (thread->transaction_stack)
5607
return true;
5608
}
5609
return false;
5610
}
5611
5612
static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5613
{
5614
struct binder_node *prev = NULL;
5615
struct rb_node *n;
5616
struct binder_ref *ref;
5617
5618
binder_inner_proc_lock(proc);
5619
for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5620
struct binder_node *node;
5621
5622
node = rb_entry(n, struct binder_node, rb_node);
5623
binder_inc_node_tmpref_ilocked(node);
5624
binder_inner_proc_unlock(proc);
5625
if (prev)
5626
binder_put_node(prev);
5627
binder_node_lock(node);
5628
hlist_for_each_entry(ref, &node->refs, node_entry) {
5629
/*
5630
* Need the node lock to synchronize
5631
* with new notification requests and the
5632
* inner lock to synchronize with queued
5633
* freeze notifications.
5634
*/
5635
binder_inner_proc_lock(ref->proc);
5636
if (!ref->freeze) {
5637
binder_inner_proc_unlock(ref->proc);
5638
continue;
5639
}
5640
ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5641
if (list_empty(&ref->freeze->work.entry)) {
5642
ref->freeze->is_frozen = is_frozen;
5643
binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5644
binder_wakeup_proc_ilocked(ref->proc);
5645
} else {
5646
if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5647
ref->freeze->resend = true;
5648
ref->freeze->is_frozen = is_frozen;
5649
}
5650
binder_inner_proc_unlock(ref->proc);
5651
}
5652
prev = node;
5653
binder_node_unlock(node);
5654
binder_inner_proc_lock(proc);
5655
if (proc->is_dead)
5656
break;
5657
}
5658
binder_inner_proc_unlock(proc);
5659
if (prev)
5660
binder_put_node(prev);
5661
}
5662
5663
static int binder_ioctl_freeze(struct binder_freeze_info *info,
5664
struct binder_proc *target_proc)
5665
{
5666
int ret = 0;
5667
5668
if (!info->enable) {
5669
binder_inner_proc_lock(target_proc);
5670
target_proc->sync_recv = false;
5671
target_proc->async_recv = false;
5672
target_proc->is_frozen = false;
5673
binder_inner_proc_unlock(target_proc);
5674
binder_add_freeze_work(target_proc, false);
5675
return 0;
5676
}
5677
5678
/*
5679
* Freezing the target. Prevent new transactions by
5680
* setting frozen state. If timeout specified, wait
5681
* for transactions to drain.
5682
*/
5683
binder_inner_proc_lock(target_proc);
5684
target_proc->sync_recv = false;
5685
target_proc->async_recv = false;
5686
target_proc->is_frozen = true;
5687
binder_inner_proc_unlock(target_proc);
5688
5689
if (info->timeout_ms > 0)
5690
ret = wait_event_interruptible_timeout(
5691
target_proc->freeze_wait,
5692
(!target_proc->outstanding_txns),
5693
msecs_to_jiffies(info->timeout_ms));
5694
5695
/* Check pending transactions that wait for reply */
5696
if (ret >= 0) {
5697
binder_inner_proc_lock(target_proc);
5698
if (binder_txns_pending_ilocked(target_proc))
5699
ret = -EAGAIN;
5700
binder_inner_proc_unlock(target_proc);
5701
}
5702
5703
if (ret < 0) {
5704
binder_inner_proc_lock(target_proc);
5705
target_proc->is_frozen = false;
5706
binder_inner_proc_unlock(target_proc);
5707
} else {
5708
binder_add_freeze_work(target_proc, true);
5709
}
5710
5711
return ret;
5712
}
5713
5714
static int binder_ioctl_get_freezer_info(
5715
struct binder_frozen_status_info *info)
5716
{
5717
struct binder_proc *target_proc;
5718
bool found = false;
5719
__u32 txns_pending;
5720
5721
info->sync_recv = 0;
5722
info->async_recv = 0;
5723
5724
mutex_lock(&binder_procs_lock);
5725
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5726
if (target_proc->pid == info->pid) {
5727
found = true;
5728
binder_inner_proc_lock(target_proc);
5729
txns_pending = binder_txns_pending_ilocked(target_proc);
5730
info->sync_recv |= target_proc->sync_recv |
5731
(txns_pending << 1);
5732
info->async_recv |= target_proc->async_recv;
5733
binder_inner_proc_unlock(target_proc);
5734
}
5735
}
5736
mutex_unlock(&binder_procs_lock);
5737
5738
if (!found)
5739
return -EINVAL;
5740
5741
return 0;
5742
}
5743
5744
static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5745
void __user *ubuf)
5746
{
5747
struct binder_extended_error ee;
5748
5749
binder_inner_proc_lock(thread->proc);
5750
ee = thread->ee;
5751
binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5752
binder_inner_proc_unlock(thread->proc);
5753
5754
if (copy_to_user(ubuf, &ee, sizeof(ee)))
5755
return -EFAULT;
5756
5757
return 0;
5758
}
5759
5760
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5761
{
5762
int ret;
5763
struct binder_proc *proc = filp->private_data;
5764
struct binder_thread *thread;
5765
void __user *ubuf = (void __user *)arg;
5766
5767
trace_binder_ioctl(cmd, arg);
5768
5769
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5770
if (ret)
5771
goto err_unlocked;
5772
5773
thread = binder_get_thread(proc);
5774
if (thread == NULL) {
5775
ret = -ENOMEM;
5776
goto err;
5777
}
5778
5779
switch (cmd) {
5780
case BINDER_WRITE_READ:
5781
ret = binder_ioctl_write_read(filp, arg, thread);
5782
if (ret)
5783
goto err;
5784
break;
5785
case BINDER_SET_MAX_THREADS: {
5786
u32 max_threads;
5787
5788
if (copy_from_user(&max_threads, ubuf,
5789
sizeof(max_threads))) {
5790
ret = -EINVAL;
5791
goto err;
5792
}
5793
binder_inner_proc_lock(proc);
5794
proc->max_threads = max_threads;
5795
binder_inner_proc_unlock(proc);
5796
break;
5797
}
5798
case BINDER_SET_CONTEXT_MGR_EXT: {
5799
struct flat_binder_object fbo;
5800
5801
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5802
ret = -EINVAL;
5803
goto err;
5804
}
5805
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5806
if (ret)
5807
goto err;
5808
break;
5809
}
5810
case BINDER_SET_CONTEXT_MGR:
5811
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5812
if (ret)
5813
goto err;
5814
break;
5815
case BINDER_THREAD_EXIT:
5816
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5817
proc->pid, thread->pid);
5818
binder_thread_release(proc, thread);
5819
thread = NULL;
5820
break;
5821
case BINDER_VERSION: {
5822
struct binder_version __user *ver = ubuf;
5823
5824
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5825
&ver->protocol_version)) {
5826
ret = -EINVAL;
5827
goto err;
5828
}
5829
break;
5830
}
5831
case BINDER_GET_NODE_INFO_FOR_REF: {
5832
struct binder_node_info_for_ref info;
5833
5834
if (copy_from_user(&info, ubuf, sizeof(info))) {
5835
ret = -EFAULT;
5836
goto err;
5837
}
5838
5839
ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5840
if (ret < 0)
5841
goto err;
5842
5843
if (copy_to_user(ubuf, &info, sizeof(info))) {
5844
ret = -EFAULT;
5845
goto err;
5846
}
5847
5848
break;
5849
}
5850
case BINDER_GET_NODE_DEBUG_INFO: {
5851
struct binder_node_debug_info info;
5852
5853
if (copy_from_user(&info, ubuf, sizeof(info))) {
5854
ret = -EFAULT;
5855
goto err;
5856
}
5857
5858
ret = binder_ioctl_get_node_debug_info(proc, &info);
5859
if (ret < 0)
5860
goto err;
5861
5862
if (copy_to_user(ubuf, &info, sizeof(info))) {
5863
ret = -EFAULT;
5864
goto err;
5865
}
5866
break;
5867
}
5868
case BINDER_FREEZE: {
5869
struct binder_freeze_info info;
5870
struct binder_proc **target_procs = NULL, *target_proc;
5871
int target_procs_count = 0, i = 0;
5872
5873
ret = 0;
5874
5875
if (copy_from_user(&info, ubuf, sizeof(info))) {
5876
ret = -EFAULT;
5877
goto err;
5878
}
5879
5880
mutex_lock(&binder_procs_lock);
5881
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5882
if (target_proc->pid == info.pid)
5883
target_procs_count++;
5884
}
5885
5886
if (target_procs_count == 0) {
5887
mutex_unlock(&binder_procs_lock);
5888
ret = -EINVAL;
5889
goto err;
5890
}
5891
5892
target_procs = kcalloc(target_procs_count,
5893
sizeof(struct binder_proc *),
5894
GFP_KERNEL);
5895
5896
if (!target_procs) {
5897
mutex_unlock(&binder_procs_lock);
5898
ret = -ENOMEM;
5899
goto err;
5900
}
5901
5902
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5903
if (target_proc->pid != info.pid)
5904
continue;
5905
5906
binder_inner_proc_lock(target_proc);
5907
target_proc->tmp_ref++;
5908
binder_inner_proc_unlock(target_proc);
5909
5910
target_procs[i++] = target_proc;
5911
}
5912
mutex_unlock(&binder_procs_lock);
5913
5914
for (i = 0; i < target_procs_count; i++) {
5915
if (ret >= 0)
5916
ret = binder_ioctl_freeze(&info,
5917
target_procs[i]);
5918
5919
binder_proc_dec_tmpref(target_procs[i]);
5920
}
5921
5922
kfree(target_procs);
5923
5924
if (ret < 0)
5925
goto err;
5926
break;
5927
}
5928
case BINDER_GET_FROZEN_INFO: {
5929
struct binder_frozen_status_info info;
5930
5931
if (copy_from_user(&info, ubuf, sizeof(info))) {
5932
ret = -EFAULT;
5933
goto err;
5934
}
5935
5936
ret = binder_ioctl_get_freezer_info(&info);
5937
if (ret < 0)
5938
goto err;
5939
5940
if (copy_to_user(ubuf, &info, sizeof(info))) {
5941
ret = -EFAULT;
5942
goto err;
5943
}
5944
break;
5945
}
5946
case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5947
uint32_t enable;
5948
5949
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5950
ret = -EFAULT;
5951
goto err;
5952
}
5953
binder_inner_proc_lock(proc);
5954
proc->oneway_spam_detection_enabled = (bool)enable;
5955
binder_inner_proc_unlock(proc);
5956
break;
5957
}
5958
case BINDER_GET_EXTENDED_ERROR:
5959
ret = binder_ioctl_get_extended_error(thread, ubuf);
5960
if (ret < 0)
5961
goto err;
5962
break;
5963
default:
5964
ret = -EINVAL;
5965
goto err;
5966
}
5967
ret = 0;
5968
err:
5969
if (thread)
5970
thread->looper_need_return = false;
5971
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5972
if (ret && ret != -EINTR)
5973
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5974
err_unlocked:
5975
trace_binder_ioctl_done(ret);
5976
return ret;
5977
}
5978
5979
static void binder_vma_open(struct vm_area_struct *vma)
5980
{
5981
struct binder_proc *proc = vma->vm_private_data;
5982
5983
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5984
"%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5985
proc->pid, vma->vm_start, vma->vm_end,
5986
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5987
(unsigned long)pgprot_val(vma->vm_page_prot));
5988
}
5989
5990
static void binder_vma_close(struct vm_area_struct *vma)
5991
{
5992
struct binder_proc *proc = vma->vm_private_data;
5993
5994
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5995
"%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5996
proc->pid, vma->vm_start, vma->vm_end,
5997
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5998
(unsigned long)pgprot_val(vma->vm_page_prot));
5999
binder_alloc_vma_close(&proc->alloc);
6000
}
6001
6002
VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
6003
{
6004
return VM_FAULT_SIGBUS;
6005
}
6006
EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
6007
6008
static const struct vm_operations_struct binder_vm_ops = {
6009
.open = binder_vma_open,
6010
.close = binder_vma_close,
6011
.fault = binder_vm_fault,
6012
};
6013
6014
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
6015
{
6016
struct binder_proc *proc = filp->private_data;
6017
6018
if (proc->tsk != current->group_leader)
6019
return -EINVAL;
6020
6021
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6022
"%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
6023
__func__, proc->pid, vma->vm_start, vma->vm_end,
6024
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6025
(unsigned long)pgprot_val(vma->vm_page_prot));
6026
6027
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
6028
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
6029
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
6030
return -EPERM;
6031
}
6032
vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
6033
6034
vma->vm_ops = &binder_vm_ops;
6035
vma->vm_private_data = proc;
6036
6037
return binder_alloc_mmap_handler(&proc->alloc, vma);
6038
}
6039
6040
static int binder_open(struct inode *nodp, struct file *filp)
6041
{
6042
struct binder_proc *proc, *itr;
6043
struct binder_device *binder_dev;
6044
struct binderfs_info *info;
6045
struct dentry *binder_binderfs_dir_entry_proc = NULL;
6046
bool existing_pid = false;
6047
6048
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
6049
current->group_leader->pid, current->pid);
6050
6051
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
6052
if (proc == NULL)
6053
return -ENOMEM;
6054
6055
dbitmap_init(&proc->dmap);
6056
spin_lock_init(&proc->inner_lock);
6057
spin_lock_init(&proc->outer_lock);
6058
get_task_struct(current->group_leader);
6059
proc->tsk = current->group_leader;
6060
proc->cred = get_cred(filp->f_cred);
6061
INIT_LIST_HEAD(&proc->todo);
6062
init_waitqueue_head(&proc->freeze_wait);
6063
proc->default_priority = task_nice(current);
6064
/* binderfs stashes devices in i_private */
6065
if (is_binderfs_device(nodp)) {
6066
binder_dev = nodp->i_private;
6067
info = nodp->i_sb->s_fs_info;
6068
binder_binderfs_dir_entry_proc = info->proc_log_dir;
6069
} else {
6070
binder_dev = container_of(filp->private_data,
6071
struct binder_device, miscdev);
6072
}
6073
refcount_inc(&binder_dev->ref);
6074
proc->context = &binder_dev->context;
6075
binder_alloc_init(&proc->alloc);
6076
6077
binder_stats_created(BINDER_STAT_PROC);
6078
proc->pid = current->group_leader->pid;
6079
INIT_LIST_HEAD(&proc->delivered_death);
6080
INIT_LIST_HEAD(&proc->delivered_freeze);
6081
INIT_LIST_HEAD(&proc->waiting_threads);
6082
filp->private_data = proc;
6083
6084
mutex_lock(&binder_procs_lock);
6085
hlist_for_each_entry(itr, &binder_procs, proc_node) {
6086
if (itr->pid == proc->pid) {
6087
existing_pid = true;
6088
break;
6089
}
6090
}
6091
hlist_add_head(&proc->proc_node, &binder_procs);
6092
mutex_unlock(&binder_procs_lock);
6093
6094
if (binder_debugfs_dir_entry_proc && !existing_pid) {
6095
char strbuf[11];
6096
6097
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6098
/*
6099
* proc debug entries are shared between contexts.
6100
* Only create for the first PID to avoid debugfs log spamming
6101
* The printing code will anyway print all contexts for a given
6102
* PID so this is not a problem.
6103
*/
6104
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6105
binder_debugfs_dir_entry_proc,
6106
(void *)(unsigned long)proc->pid,
6107
&proc_fops);
6108
}
6109
6110
if (binder_binderfs_dir_entry_proc && !existing_pid) {
6111
char strbuf[11];
6112
struct dentry *binderfs_entry;
6113
6114
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6115
/*
6116
* Similar to debugfs, the process specific log file is shared
6117
* between contexts. Only create for the first PID.
6118
* This is ok since same as debugfs, the log file will contain
6119
* information on all contexts of a given PID.
6120
*/
6121
binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6122
strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6123
if (!IS_ERR(binderfs_entry)) {
6124
proc->binderfs_entry = binderfs_entry;
6125
} else {
6126
int error;
6127
6128
error = PTR_ERR(binderfs_entry);
6129
pr_warn("Unable to create file %s in binderfs (error %d)\n",
6130
strbuf, error);
6131
}
6132
}
6133
6134
return 0;
6135
}
6136
6137
static int binder_flush(struct file *filp, fl_owner_t id)
6138
{
6139
struct binder_proc *proc = filp->private_data;
6140
6141
binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6142
6143
return 0;
6144
}
6145
6146
static void binder_deferred_flush(struct binder_proc *proc)
6147
{
6148
struct rb_node *n;
6149
int wake_count = 0;
6150
6151
binder_inner_proc_lock(proc);
6152
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6153
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6154
6155
thread->looper_need_return = true;
6156
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6157
wake_up_interruptible(&thread->wait);
6158
wake_count++;
6159
}
6160
}
6161
binder_inner_proc_unlock(proc);
6162
6163
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6164
"binder_flush: %d woke %d threads\n", proc->pid,
6165
wake_count);
6166
}
6167
6168
static int binder_release(struct inode *nodp, struct file *filp)
6169
{
6170
struct binder_proc *proc = filp->private_data;
6171
6172
debugfs_remove(proc->debugfs_entry);
6173
6174
if (proc->binderfs_entry) {
6175
simple_recursive_removal(proc->binderfs_entry, NULL);
6176
proc->binderfs_entry = NULL;
6177
}
6178
6179
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6180
6181
return 0;
6182
}
6183
6184
static int binder_node_release(struct binder_node *node, int refs)
6185
{
6186
struct binder_ref *ref;
6187
int death = 0;
6188
struct binder_proc *proc = node->proc;
6189
6190
binder_release_work(proc, &node->async_todo);
6191
6192
binder_node_lock(node);
6193
binder_inner_proc_lock(proc);
6194
binder_dequeue_work_ilocked(&node->work);
6195
/*
6196
* The caller must have taken a temporary ref on the node,
6197
*/
6198
BUG_ON(!node->tmp_refs);
6199
if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6200
binder_inner_proc_unlock(proc);
6201
binder_node_unlock(node);
6202
binder_free_node(node);
6203
6204
return refs;
6205
}
6206
6207
node->proc = NULL;
6208
node->local_strong_refs = 0;
6209
node->local_weak_refs = 0;
6210
binder_inner_proc_unlock(proc);
6211
6212
spin_lock(&binder_dead_nodes_lock);
6213
hlist_add_head(&node->dead_node, &binder_dead_nodes);
6214
spin_unlock(&binder_dead_nodes_lock);
6215
6216
hlist_for_each_entry(ref, &node->refs, node_entry) {
6217
refs++;
6218
/*
6219
* Need the node lock to synchronize
6220
* with new notification requests and the
6221
* inner lock to synchronize with queued
6222
* death notifications.
6223
*/
6224
binder_inner_proc_lock(ref->proc);
6225
if (!ref->death) {
6226
binder_inner_proc_unlock(ref->proc);
6227
continue;
6228
}
6229
6230
death++;
6231
6232
BUG_ON(!list_empty(&ref->death->work.entry));
6233
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6234
binder_enqueue_work_ilocked(&ref->death->work,
6235
&ref->proc->todo);
6236
binder_wakeup_proc_ilocked(ref->proc);
6237
binder_inner_proc_unlock(ref->proc);
6238
}
6239
6240
binder_debug(BINDER_DEBUG_DEAD_BINDER,
6241
"node %d now dead, refs %d, death %d\n",
6242
node->debug_id, refs, death);
6243
binder_node_unlock(node);
6244
binder_put_node(node);
6245
6246
return refs;
6247
}
6248
6249
static void binder_deferred_release(struct binder_proc *proc)
6250
{
6251
struct binder_context *context = proc->context;
6252
struct rb_node *n;
6253
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6254
6255
mutex_lock(&binder_procs_lock);
6256
hlist_del(&proc->proc_node);
6257
mutex_unlock(&binder_procs_lock);
6258
6259
mutex_lock(&context->context_mgr_node_lock);
6260
if (context->binder_context_mgr_node &&
6261
context->binder_context_mgr_node->proc == proc) {
6262
binder_debug(BINDER_DEBUG_DEAD_BINDER,
6263
"%s: %d context_mgr_node gone\n",
6264
__func__, proc->pid);
6265
context->binder_context_mgr_node = NULL;
6266
}
6267
mutex_unlock(&context->context_mgr_node_lock);
6268
binder_inner_proc_lock(proc);
6269
/*
6270
* Make sure proc stays alive after we
6271
* remove all the threads
6272
*/
6273
proc->tmp_ref++;
6274
6275
proc->is_dead = true;
6276
proc->is_frozen = false;
6277
proc->sync_recv = false;
6278
proc->async_recv = false;
6279
threads = 0;
6280
active_transactions = 0;
6281
while ((n = rb_first(&proc->threads))) {
6282
struct binder_thread *thread;
6283
6284
thread = rb_entry(n, struct binder_thread, rb_node);
6285
binder_inner_proc_unlock(proc);
6286
threads++;
6287
active_transactions += binder_thread_release(proc, thread);
6288
binder_inner_proc_lock(proc);
6289
}
6290
6291
nodes = 0;
6292
incoming_refs = 0;
6293
while ((n = rb_first(&proc->nodes))) {
6294
struct binder_node *node;
6295
6296
node = rb_entry(n, struct binder_node, rb_node);
6297
nodes++;
6298
/*
6299
* take a temporary ref on the node before
6300
* calling binder_node_release() which will either
6301
* kfree() the node or call binder_put_node()
6302
*/
6303
binder_inc_node_tmpref_ilocked(node);
6304
rb_erase(&node->rb_node, &proc->nodes);
6305
binder_inner_proc_unlock(proc);
6306
incoming_refs = binder_node_release(node, incoming_refs);
6307
binder_inner_proc_lock(proc);
6308
}
6309
binder_inner_proc_unlock(proc);
6310
6311
outgoing_refs = 0;
6312
binder_proc_lock(proc);
6313
while ((n = rb_first(&proc->refs_by_desc))) {
6314
struct binder_ref *ref;
6315
6316
ref = rb_entry(n, struct binder_ref, rb_node_desc);
6317
outgoing_refs++;
6318
binder_cleanup_ref_olocked(ref);
6319
binder_proc_unlock(proc);
6320
binder_free_ref(ref);
6321
binder_proc_lock(proc);
6322
}
6323
binder_proc_unlock(proc);
6324
6325
binder_release_work(proc, &proc->todo);
6326
binder_release_work(proc, &proc->delivered_death);
6327
binder_release_work(proc, &proc->delivered_freeze);
6328
6329
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6330
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6331
__func__, proc->pid, threads, nodes, incoming_refs,
6332
outgoing_refs, active_transactions);
6333
6334
binder_proc_dec_tmpref(proc);
6335
}
6336
6337
static void binder_deferred_func(struct work_struct *work)
6338
{
6339
struct binder_proc *proc;
6340
6341
int defer;
6342
6343
do {
6344
mutex_lock(&binder_deferred_lock);
6345
if (!hlist_empty(&binder_deferred_list)) {
6346
proc = hlist_entry(binder_deferred_list.first,
6347
struct binder_proc, deferred_work_node);
6348
hlist_del_init(&proc->deferred_work_node);
6349
defer = proc->deferred_work;
6350
proc->deferred_work = 0;
6351
} else {
6352
proc = NULL;
6353
defer = 0;
6354
}
6355
mutex_unlock(&binder_deferred_lock);
6356
6357
if (defer & BINDER_DEFERRED_FLUSH)
6358
binder_deferred_flush(proc);
6359
6360
if (defer & BINDER_DEFERRED_RELEASE)
6361
binder_deferred_release(proc); /* frees proc */
6362
} while (proc);
6363
}
6364
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6365
6366
static void
6367
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6368
{
6369
guard(mutex)(&binder_deferred_lock);
6370
proc->deferred_work |= defer;
6371
if (hlist_unhashed(&proc->deferred_work_node)) {
6372
hlist_add_head(&proc->deferred_work_node,
6373
&binder_deferred_list);
6374
schedule_work(&binder_deferred_work);
6375
}
6376
}
6377
6378
static void print_binder_transaction_ilocked(struct seq_file *m,
6379
struct binder_proc *proc,
6380
const char *prefix,
6381
struct binder_transaction *t)
6382
{
6383
struct binder_proc *to_proc;
6384
struct binder_buffer *buffer = t->buffer;
6385
ktime_t current_time = ktime_get();
6386
6387
spin_lock(&t->lock);
6388
to_proc = t->to_proc;
6389
seq_printf(m,
6390
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
6391
prefix, t->debug_id, t,
6392
t->from_pid,
6393
t->from_tid,
6394
to_proc ? to_proc->pid : 0,
6395
t->to_thread ? t->to_thread->pid : 0,
6396
t->code, t->flags, t->priority, t->is_async, t->is_reply,
6397
ktime_ms_delta(current_time, t->start_time));
6398
spin_unlock(&t->lock);
6399
6400
if (proc != to_proc) {
6401
/*
6402
* Can only safely deref buffer if we are holding the
6403
* correct proc inner lock for this node
6404
*/
6405
seq_puts(m, "\n");
6406
return;
6407
}
6408
6409
if (buffer == NULL) {
6410
seq_puts(m, " buffer free\n");
6411
return;
6412
}
6413
if (buffer->target_node)
6414
seq_printf(m, " node %d", buffer->target_node->debug_id);
6415
seq_printf(m, " size %zd:%zd offset %lx\n",
6416
buffer->data_size, buffer->offsets_size,
6417
buffer->user_data - proc->alloc.vm_start);
6418
}
6419
6420
static void print_binder_work_ilocked(struct seq_file *m,
6421
struct binder_proc *proc,
6422
const char *prefix,
6423
const char *transaction_prefix,
6424
struct binder_work *w, bool hash_ptrs)
6425
{
6426
struct binder_node *node;
6427
struct binder_transaction *t;
6428
6429
switch (w->type) {
6430
case BINDER_WORK_TRANSACTION:
6431
t = container_of(w, struct binder_transaction, work);
6432
print_binder_transaction_ilocked(
6433
m, proc, transaction_prefix, t);
6434
break;
6435
case BINDER_WORK_RETURN_ERROR: {
6436
struct binder_error *e = container_of(
6437
w, struct binder_error, work);
6438
6439
seq_printf(m, "%stransaction error: %u\n",
6440
prefix, e->cmd);
6441
} break;
6442
case BINDER_WORK_TRANSACTION_COMPLETE:
6443
seq_printf(m, "%stransaction complete\n", prefix);
6444
break;
6445
case BINDER_WORK_NODE:
6446
node = container_of(w, struct binder_node, work);
6447
if (hash_ptrs)
6448
seq_printf(m, "%snode work %d: u%p c%p\n",
6449
prefix, node->debug_id,
6450
(void *)(long)node->ptr,
6451
(void *)(long)node->cookie);
6452
else
6453
seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6454
prefix, node->debug_id,
6455
(u64)node->ptr, (u64)node->cookie);
6456
break;
6457
case BINDER_WORK_DEAD_BINDER:
6458
seq_printf(m, "%shas dead binder\n", prefix);
6459
break;
6460
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6461
seq_printf(m, "%shas cleared dead binder\n", prefix);
6462
break;
6463
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6464
seq_printf(m, "%shas cleared death notification\n", prefix);
6465
break;
6466
case BINDER_WORK_FROZEN_BINDER:
6467
seq_printf(m, "%shas frozen binder\n", prefix);
6468
break;
6469
case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6470
seq_printf(m, "%shas cleared freeze notification\n", prefix);
6471
break;
6472
default:
6473
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6474
break;
6475
}
6476
}
6477
6478
static void print_binder_thread_ilocked(struct seq_file *m,
6479
struct binder_thread *thread,
6480
bool print_always, bool hash_ptrs)
6481
{
6482
struct binder_transaction *t;
6483
struct binder_work *w;
6484
size_t start_pos = m->count;
6485
size_t header_pos;
6486
6487
seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6488
thread->pid, thread->looper,
6489
thread->looper_need_return,
6490
atomic_read(&thread->tmp_ref));
6491
header_pos = m->count;
6492
t = thread->transaction_stack;
6493
while (t) {
6494
if (t->from == thread) {
6495
print_binder_transaction_ilocked(m, thread->proc,
6496
" outgoing transaction", t);
6497
t = t->from_parent;
6498
} else if (t->to_thread == thread) {
6499
print_binder_transaction_ilocked(m, thread->proc,
6500
" incoming transaction", t);
6501
t = t->to_parent;
6502
} else {
6503
print_binder_transaction_ilocked(m, thread->proc,
6504
" bad transaction", t);
6505
t = NULL;
6506
}
6507
}
6508
list_for_each_entry(w, &thread->todo, entry) {
6509
print_binder_work_ilocked(m, thread->proc, " ",
6510
" pending transaction",
6511
w, hash_ptrs);
6512
}
6513
if (!print_always && m->count == header_pos)
6514
m->count = start_pos;
6515
}
6516
6517
static void print_binder_node_nilocked(struct seq_file *m,
6518
struct binder_node *node,
6519
bool hash_ptrs)
6520
{
6521
struct binder_ref *ref;
6522
struct binder_work *w;
6523
int count;
6524
6525
count = hlist_count_nodes(&node->refs);
6526
6527
if (hash_ptrs)
6528
seq_printf(m, " node %d: u%p c%p", node->debug_id,
6529
(void *)(long)node->ptr, (void *)(long)node->cookie);
6530
else
6531
seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
6532
(u64)node->ptr, (u64)node->cookie);
6533
seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6534
node->has_strong_ref, node->has_weak_ref,
6535
node->local_strong_refs, node->local_weak_refs,
6536
node->internal_strong_refs, count, node->tmp_refs);
6537
if (count) {
6538
seq_puts(m, " proc");
6539
hlist_for_each_entry(ref, &node->refs, node_entry)
6540
seq_printf(m, " %d", ref->proc->pid);
6541
}
6542
seq_puts(m, "\n");
6543
if (node->proc) {
6544
list_for_each_entry(w, &node->async_todo, entry)
6545
print_binder_work_ilocked(m, node->proc, " ",
6546
" pending async transaction",
6547
w, hash_ptrs);
6548
}
6549
}
6550
6551
static void print_binder_ref_olocked(struct seq_file *m,
6552
struct binder_ref *ref)
6553
{
6554
binder_node_lock(ref->node);
6555
seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6556
ref->data.debug_id, ref->data.desc,
6557
ref->node->proc ? "" : "dead ",
6558
ref->node->debug_id, ref->data.strong,
6559
ref->data.weak, ref->death);
6560
binder_node_unlock(ref->node);
6561
}
6562
6563
/**
6564
* print_next_binder_node_ilocked() - Print binder_node from a locked list
6565
* @m: struct seq_file for output via seq_printf()
6566
* @proc: struct binder_proc we hold the inner_proc_lock to (if any)
6567
* @node: struct binder_node to print fields of
6568
* @prev_node: struct binder_node we hold a temporary reference to (if any)
6569
* @hash_ptrs: whether to hash @node's binder_uintptr_t fields
6570
*
6571
* Helper function to handle synchronization around printing a struct
6572
* binder_node while iterating through @proc->nodes or the dead nodes list.
6573
* Caller must hold either @proc->inner_lock (for live nodes) or
6574
* binder_dead_nodes_lock. This lock will be released during the body of this
6575
* function, but it will be reacquired before returning to the caller.
6576
*
6577
* Return: pointer to the struct binder_node we hold a tmpref on
6578
*/
6579
static struct binder_node *
6580
print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
6581
struct binder_node *node,
6582
struct binder_node *prev_node, bool hash_ptrs)
6583
{
6584
/*
6585
* Take a temporary reference on the node so that isn't freed while
6586
* we print it.
6587
*/
6588
binder_inc_node_tmpref_ilocked(node);
6589
/*
6590
* Live nodes need to drop the inner proc lock and dead nodes need to
6591
* drop the binder_dead_nodes_lock before trying to take the node lock.
6592
*/
6593
if (proc)
6594
binder_inner_proc_unlock(proc);
6595
else
6596
spin_unlock(&binder_dead_nodes_lock);
6597
if (prev_node)
6598
binder_put_node(prev_node);
6599
binder_node_inner_lock(node);
6600
print_binder_node_nilocked(m, node, hash_ptrs);
6601
binder_node_inner_unlock(node);
6602
if (proc)
6603
binder_inner_proc_lock(proc);
6604
else
6605
spin_lock(&binder_dead_nodes_lock);
6606
return node;
6607
}
6608
6609
static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
6610
bool print_all, bool hash_ptrs)
6611
{
6612
struct binder_work *w;
6613
struct rb_node *n;
6614
size_t start_pos = m->count;
6615
size_t header_pos;
6616
struct binder_node *last_node = NULL;
6617
6618
seq_printf(m, "proc %d\n", proc->pid);
6619
seq_printf(m, "context %s\n", proc->context->name);
6620
header_pos = m->count;
6621
6622
binder_inner_proc_lock(proc);
6623
for (n = rb_first(&proc->threads); n; n = rb_next(n))
6624
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6625
rb_node), print_all, hash_ptrs);
6626
6627
for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
6628
struct binder_node *node = rb_entry(n, struct binder_node,
6629
rb_node);
6630
if (!print_all && !node->has_async_transaction)
6631
continue;
6632
6633
last_node = print_next_binder_node_ilocked(m, proc, node,
6634
last_node,
6635
hash_ptrs);
6636
}
6637
binder_inner_proc_unlock(proc);
6638
if (last_node)
6639
binder_put_node(last_node);
6640
6641
if (print_all) {
6642
binder_proc_lock(proc);
6643
for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
6644
print_binder_ref_olocked(m, rb_entry(n,
6645
struct binder_ref,
6646
rb_node_desc));
6647
binder_proc_unlock(proc);
6648
}
6649
binder_alloc_print_allocated(m, &proc->alloc);
6650
binder_inner_proc_lock(proc);
6651
list_for_each_entry(w, &proc->todo, entry)
6652
print_binder_work_ilocked(m, proc, " ",
6653
" pending transaction", w,
6654
hash_ptrs);
6655
list_for_each_entry(w, &proc->delivered_death, entry) {
6656
seq_puts(m, " has delivered dead binder\n");
6657
break;
6658
}
6659
list_for_each_entry(w, &proc->delivered_freeze, entry) {
6660
seq_puts(m, " has delivered freeze binder\n");
6661
break;
6662
}
6663
binder_inner_proc_unlock(proc);
6664
if (!print_all && m->count == header_pos)
6665
m->count = start_pos;
6666
}
6667
6668
static const char * const binder_return_strings[] = {
6669
"BR_ERROR",
6670
"BR_OK",
6671
"BR_TRANSACTION",
6672
"BR_REPLY",
6673
"BR_ACQUIRE_RESULT",
6674
"BR_DEAD_REPLY",
6675
"BR_TRANSACTION_COMPLETE",
6676
"BR_INCREFS",
6677
"BR_ACQUIRE",
6678
"BR_RELEASE",
6679
"BR_DECREFS",
6680
"BR_ATTEMPT_ACQUIRE",
6681
"BR_NOOP",
6682
"BR_SPAWN_LOOPER",
6683
"BR_FINISHED",
6684
"BR_DEAD_BINDER",
6685
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6686
"BR_FAILED_REPLY",
6687
"BR_FROZEN_REPLY",
6688
"BR_ONEWAY_SPAM_SUSPECT",
6689
"BR_TRANSACTION_PENDING_FROZEN",
6690
"BR_FROZEN_BINDER",
6691
"BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6692
};
6693
6694
static const char * const binder_command_strings[] = {
6695
"BC_TRANSACTION",
6696
"BC_REPLY",
6697
"BC_ACQUIRE_RESULT",
6698
"BC_FREE_BUFFER",
6699
"BC_INCREFS",
6700
"BC_ACQUIRE",
6701
"BC_RELEASE",
6702
"BC_DECREFS",
6703
"BC_INCREFS_DONE",
6704
"BC_ACQUIRE_DONE",
6705
"BC_ATTEMPT_ACQUIRE",
6706
"BC_REGISTER_LOOPER",
6707
"BC_ENTER_LOOPER",
6708
"BC_EXIT_LOOPER",
6709
"BC_REQUEST_DEATH_NOTIFICATION",
6710
"BC_CLEAR_DEATH_NOTIFICATION",
6711
"BC_DEAD_BINDER_DONE",
6712
"BC_TRANSACTION_SG",
6713
"BC_REPLY_SG",
6714
"BC_REQUEST_FREEZE_NOTIFICATION",
6715
"BC_CLEAR_FREEZE_NOTIFICATION",
6716
"BC_FREEZE_NOTIFICATION_DONE",
6717
};
6718
6719
static const char * const binder_objstat_strings[] = {
6720
"proc",
6721
"thread",
6722
"node",
6723
"ref",
6724
"death",
6725
"transaction",
6726
"transaction_complete",
6727
"freeze",
6728
};
6729
6730
static void print_binder_stats(struct seq_file *m, const char *prefix,
6731
struct binder_stats *stats)
6732
{
6733
int i;
6734
6735
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6736
ARRAY_SIZE(binder_command_strings));
6737
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6738
int temp = atomic_read(&stats->bc[i]);
6739
6740
if (temp)
6741
seq_printf(m, "%s%s: %d\n", prefix,
6742
binder_command_strings[i], temp);
6743
}
6744
6745
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6746
ARRAY_SIZE(binder_return_strings));
6747
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6748
int temp = atomic_read(&stats->br[i]);
6749
6750
if (temp)
6751
seq_printf(m, "%s%s: %d\n", prefix,
6752
binder_return_strings[i], temp);
6753
}
6754
6755
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6756
ARRAY_SIZE(binder_objstat_strings));
6757
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6758
ARRAY_SIZE(stats->obj_deleted));
6759
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6760
int created = atomic_read(&stats->obj_created[i]);
6761
int deleted = atomic_read(&stats->obj_deleted[i]);
6762
6763
if (created || deleted)
6764
seq_printf(m, "%s%s: active %d total %d\n",
6765
prefix,
6766
binder_objstat_strings[i],
6767
created - deleted,
6768
created);
6769
}
6770
}
6771
6772
static void print_binder_proc_stats(struct seq_file *m,
6773
struct binder_proc *proc)
6774
{
6775
struct binder_work *w;
6776
struct binder_thread *thread;
6777
struct rb_node *n;
6778
int count, strong, weak, ready_threads;
6779
size_t free_async_space =
6780
binder_alloc_get_free_async_space(&proc->alloc);
6781
6782
seq_printf(m, "proc %d\n", proc->pid);
6783
seq_printf(m, "context %s\n", proc->context->name);
6784
count = 0;
6785
ready_threads = 0;
6786
binder_inner_proc_lock(proc);
6787
for (n = rb_first(&proc->threads); n; n = rb_next(n))
6788
count++;
6789
6790
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6791
ready_threads++;
6792
6793
seq_printf(m, " threads: %d\n", count);
6794
seq_printf(m, " requested threads: %d+%d/%d\n"
6795
" ready threads %d\n"
6796
" free async space %zd\n", proc->requested_threads,
6797
proc->requested_threads_started, proc->max_threads,
6798
ready_threads,
6799
free_async_space);
6800
count = 0;
6801
for (n = rb_first(&proc->nodes); n; n = rb_next(n))
6802
count++;
6803
binder_inner_proc_unlock(proc);
6804
seq_printf(m, " nodes: %d\n", count);
6805
count = 0;
6806
strong = 0;
6807
weak = 0;
6808
binder_proc_lock(proc);
6809
for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
6810
struct binder_ref *ref = rb_entry(n, struct binder_ref,
6811
rb_node_desc);
6812
count++;
6813
strong += ref->data.strong;
6814
weak += ref->data.weak;
6815
}
6816
binder_proc_unlock(proc);
6817
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6818
6819
count = binder_alloc_get_allocated_count(&proc->alloc);
6820
seq_printf(m, " buffers: %d\n", count);
6821
6822
binder_alloc_print_pages(m, &proc->alloc);
6823
6824
count = 0;
6825
binder_inner_proc_lock(proc);
6826
list_for_each_entry(w, &proc->todo, entry) {
6827
if (w->type == BINDER_WORK_TRANSACTION)
6828
count++;
6829
}
6830
binder_inner_proc_unlock(proc);
6831
seq_printf(m, " pending transactions: %d\n", count);
6832
6833
print_binder_stats(m, " ", &proc->stats);
6834
}
6835
6836
static void print_binder_state(struct seq_file *m, bool hash_ptrs)
6837
{
6838
struct binder_proc *proc;
6839
struct binder_node *node;
6840
struct binder_node *last_node = NULL;
6841
6842
seq_puts(m, "binder state:\n");
6843
6844
spin_lock(&binder_dead_nodes_lock);
6845
if (!hlist_empty(&binder_dead_nodes))
6846
seq_puts(m, "dead nodes:\n");
6847
hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
6848
last_node = print_next_binder_node_ilocked(m, NULL, node,
6849
last_node,
6850
hash_ptrs);
6851
spin_unlock(&binder_dead_nodes_lock);
6852
if (last_node)
6853
binder_put_node(last_node);
6854
6855
mutex_lock(&binder_procs_lock);
6856
hlist_for_each_entry(proc, &binder_procs, proc_node)
6857
print_binder_proc(m, proc, true, hash_ptrs);
6858
mutex_unlock(&binder_procs_lock);
6859
}
6860
6861
static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
6862
{
6863
struct binder_proc *proc;
6864
6865
seq_puts(m, "binder transactions:\n");
6866
mutex_lock(&binder_procs_lock);
6867
hlist_for_each_entry(proc, &binder_procs, proc_node)
6868
print_binder_proc(m, proc, false, hash_ptrs);
6869
mutex_unlock(&binder_procs_lock);
6870
}
6871
6872
static int state_show(struct seq_file *m, void *unused)
6873
{
6874
print_binder_state(m, false);
6875
return 0;
6876
}
6877
6878
static int state_hashed_show(struct seq_file *m, void *unused)
6879
{
6880
print_binder_state(m, true);
6881
return 0;
6882
}
6883
6884
static int stats_show(struct seq_file *m, void *unused)
6885
{
6886
struct binder_proc *proc;
6887
6888
seq_puts(m, "binder stats:\n");
6889
6890
print_binder_stats(m, "", &binder_stats);
6891
6892
mutex_lock(&binder_procs_lock);
6893
hlist_for_each_entry(proc, &binder_procs, proc_node)
6894
print_binder_proc_stats(m, proc);
6895
mutex_unlock(&binder_procs_lock);
6896
6897
return 0;
6898
}
6899
6900
static int transactions_show(struct seq_file *m, void *unused)
6901
{
6902
print_binder_transactions(m, false);
6903
return 0;
6904
}
6905
6906
static int transactions_hashed_show(struct seq_file *m, void *unused)
6907
{
6908
print_binder_transactions(m, true);
6909
return 0;
6910
}
6911
6912
static int proc_show(struct seq_file *m, void *unused)
6913
{
6914
struct binder_proc *itr;
6915
int pid = (unsigned long)m->private;
6916
6917
guard(mutex)(&binder_procs_lock);
6918
hlist_for_each_entry(itr, &binder_procs, proc_node) {
6919
if (itr->pid == pid) {
6920
seq_puts(m, "binder proc state:\n");
6921
print_binder_proc(m, itr, true, false);
6922
}
6923
}
6924
6925
return 0;
6926
}
6927
6928
static void print_binder_transaction_log_entry(struct seq_file *m,
6929
struct binder_transaction_log_entry *e)
6930
{
6931
int debug_id = READ_ONCE(e->debug_id_done);
6932
/*
6933
* read barrier to guarantee debug_id_done read before
6934
* we print the log values
6935
*/
6936
smp_rmb();
6937
seq_printf(m,
6938
"%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6939
e->debug_id, (e->call_type == 2) ? "reply" :
6940
((e->call_type == 1) ? "async" : "call "), e->from_proc,
6941
e->from_thread, e->to_proc, e->to_thread, e->context_name,
6942
e->to_node, e->target_handle, e->data_size, e->offsets_size,
6943
e->return_error, e->return_error_param,
6944
e->return_error_line);
6945
/*
6946
* read-barrier to guarantee read of debug_id_done after
6947
* done printing the fields of the entry
6948
*/
6949
smp_rmb();
6950
seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6951
"\n" : " (incomplete)\n");
6952
}
6953
6954
static int transaction_log_show(struct seq_file *m, void *unused)
6955
{
6956
struct binder_transaction_log *log = m->private;
6957
unsigned int log_cur = atomic_read(&log->cur);
6958
unsigned int count;
6959
unsigned int cur;
6960
int i;
6961
6962
count = log_cur + 1;
6963
cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6964
0 : count % ARRAY_SIZE(log->entry);
6965
if (count > ARRAY_SIZE(log->entry) || log->full)
6966
count = ARRAY_SIZE(log->entry);
6967
for (i = 0; i < count; i++) {
6968
unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6969
6970
print_binder_transaction_log_entry(m, &log->entry[index]);
6971
}
6972
return 0;
6973
}
6974
6975
const struct file_operations binder_fops = {
6976
.owner = THIS_MODULE,
6977
.poll = binder_poll,
6978
.unlocked_ioctl = binder_ioctl,
6979
.compat_ioctl = compat_ptr_ioctl,
6980
.mmap = binder_mmap,
6981
.open = binder_open,
6982
.flush = binder_flush,
6983
.release = binder_release,
6984
};
6985
6986
DEFINE_SHOW_ATTRIBUTE(state);
6987
DEFINE_SHOW_ATTRIBUTE(state_hashed);
6988
DEFINE_SHOW_ATTRIBUTE(stats);
6989
DEFINE_SHOW_ATTRIBUTE(transactions);
6990
DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
6991
DEFINE_SHOW_ATTRIBUTE(transaction_log);
6992
6993
const struct binder_debugfs_entry binder_debugfs_entries[] = {
6994
{
6995
.name = "state",
6996
.mode = 0444,
6997
.fops = &state_fops,
6998
.data = NULL,
6999
},
7000
{
7001
.name = "state_hashed",
7002
.mode = 0444,
7003
.fops = &state_hashed_fops,
7004
.data = NULL,
7005
},
7006
{
7007
.name = "stats",
7008
.mode = 0444,
7009
.fops = &stats_fops,
7010
.data = NULL,
7011
},
7012
{
7013
.name = "transactions",
7014
.mode = 0444,
7015
.fops = &transactions_fops,
7016
.data = NULL,
7017
},
7018
{
7019
.name = "transactions_hashed",
7020
.mode = 0444,
7021
.fops = &transactions_hashed_fops,
7022
.data = NULL,
7023
},
7024
{
7025
.name = "transaction_log",
7026
.mode = 0444,
7027
.fops = &transaction_log_fops,
7028
.data = &binder_transaction_log,
7029
},
7030
{
7031
.name = "failed_transaction_log",
7032
.mode = 0444,
7033
.fops = &transaction_log_fops,
7034
.data = &binder_transaction_log_failed,
7035
},
7036
{} /* terminator */
7037
};
7038
7039
void binder_add_device(struct binder_device *device)
7040
{
7041
guard(spinlock)(&binder_devices_lock);
7042
hlist_add_head(&device->hlist, &binder_devices);
7043
}
7044
7045
void binder_remove_device(struct binder_device *device)
7046
{
7047
guard(spinlock)(&binder_devices_lock);
7048
hlist_del_init(&device->hlist);
7049
}
7050
7051
static int __init init_binder_device(const char *name)
7052
{
7053
int ret;
7054
struct binder_device *binder_device;
7055
7056
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
7057
if (!binder_device)
7058
return -ENOMEM;
7059
7060
binder_device->miscdev.fops = &binder_fops;
7061
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
7062
binder_device->miscdev.name = name;
7063
7064
refcount_set(&binder_device->ref, 1);
7065
binder_device->context.binder_context_mgr_uid = INVALID_UID;
7066
binder_device->context.name = name;
7067
mutex_init(&binder_device->context.context_mgr_node_lock);
7068
7069
ret = misc_register(&binder_device->miscdev);
7070
if (ret < 0) {
7071
kfree(binder_device);
7072
return ret;
7073
}
7074
7075
binder_add_device(binder_device);
7076
7077
return ret;
7078
}
7079
7080
static int __init binder_init(void)
7081
{
7082
int ret;
7083
char *device_name, *device_tmp;
7084
struct binder_device *device;
7085
struct hlist_node *tmp;
7086
char *device_names = NULL;
7087
const struct binder_debugfs_entry *db_entry;
7088
7089
ret = binder_alloc_shrinker_init();
7090
if (ret)
7091
return ret;
7092
7093
atomic_set(&binder_transaction_log.cur, ~0U);
7094
atomic_set(&binder_transaction_log_failed.cur, ~0U);
7095
7096
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
7097
7098
binder_for_each_debugfs_entry(db_entry)
7099
debugfs_create_file(db_entry->name,
7100
db_entry->mode,
7101
binder_debugfs_dir_entry_root,
7102
db_entry->data,
7103
db_entry->fops);
7104
7105
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
7106
binder_debugfs_dir_entry_root);
7107
7108
if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
7109
strcmp(binder_devices_param, "") != 0) {
7110
/*
7111
* Copy the module_parameter string, because we don't want to
7112
* tokenize it in-place.
7113
*/
7114
device_names = kstrdup(binder_devices_param, GFP_KERNEL);
7115
if (!device_names) {
7116
ret = -ENOMEM;
7117
goto err_alloc_device_names_failed;
7118
}
7119
7120
device_tmp = device_names;
7121
while ((device_name = strsep(&device_tmp, ","))) {
7122
ret = init_binder_device(device_name);
7123
if (ret)
7124
goto err_init_binder_device_failed;
7125
}
7126
}
7127
7128
ret = genl_register_family(&binder_nl_family);
7129
if (ret)
7130
goto err_init_binder_device_failed;
7131
7132
ret = init_binderfs();
7133
if (ret)
7134
goto err_init_binderfs_failed;
7135
7136
return ret;
7137
7138
err_init_binderfs_failed:
7139
genl_unregister_family(&binder_nl_family);
7140
7141
err_init_binder_device_failed:
7142
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7143
misc_deregister(&device->miscdev);
7144
binder_remove_device(device);
7145
kfree(device);
7146
}
7147
7148
kfree(device_names);
7149
7150
err_alloc_device_names_failed:
7151
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7152
binder_alloc_shrinker_exit();
7153
7154
return ret;
7155
}
7156
7157
device_initcall(binder_init);
7158
7159
#define CREATE_TRACE_POINTS
7160
#include "binder_trace.h"
7161
7162