blob: f0ba730384c379da2da9082507161f70f9293216 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
57#include <linux/freezer.h>
58#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
61#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
65#include <linux/debugfs.h>
66#include <linux/rbtree.h>
67#include <linux/sched/signal.h>
68#include <linux/sched/mm.h>
69#include <linux/seq_file.h>
70#include <linux/uaccess.h>
71#include <linux/pid_namespace.h>
72#include <linux/security.h>
73#include <linux/spinlock.h>
74
75#include <uapi/linux/android/binder.h>
76#include <uapi/linux/sched/types.h>
77#include "binder_alloc.h"
78#include "binder_trace.h"
79
80static HLIST_HEAD(binder_deferred_list);
81static DEFINE_MUTEX(binder_deferred_lock);
82
83static HLIST_HEAD(binder_devices);
84static HLIST_HEAD(binder_procs);
85static DEFINE_MUTEX(binder_procs_lock);
86
87static HLIST_HEAD(binder_dead_nodes);
88static DEFINE_SPINLOCK(binder_dead_nodes_lock);
89
90static struct dentry *binder_debugfs_dir_entry_root;
91static struct dentry *binder_debugfs_dir_entry_proc;
92static atomic_t binder_last_id;
93
94#define BINDER_DEBUG_ENTRY(name) \
95static int binder_##name##_open(struct inode *inode, struct file *file) \
96{ \
97 return single_open(file, binder_##name##_show, inode->i_private); \
98} \
99\
100static const struct file_operations binder_##name##_fops = { \
101 .owner = THIS_MODULE, \
102 .open = binder_##name##_open, \
103 .read = seq_read, \
104 .llseek = seq_lseek, \
105 .release = single_release, \
106}
107
108static int binder_proc_show(struct seq_file *m, void *unused);
109BINDER_DEBUG_ENTRY(proc);
110
111/* This is only defined in include/asm-arm/sizes.h */
112#ifndef SZ_1K
113#define SZ_1K 0x400
114#endif
115
116#ifndef SZ_4M
117#define SZ_4M 0x400000
118#endif
119
120#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
121
122enum {
123 BINDER_DEBUG_USER_ERROR = 1U << 0,
124 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
125 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
126 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
127 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
128 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
129 BINDER_DEBUG_READ_WRITE = 1U << 6,
130 BINDER_DEBUG_USER_REFS = 1U << 7,
131 BINDER_DEBUG_THREADS = 1U << 8,
132 BINDER_DEBUG_TRANSACTION = 1U << 9,
133 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
134 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
135 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
136 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
137 BINDER_DEBUG_SPINLOCKS = 1U << 14,
138};
139static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
140 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
141module_param_named(debug_mask, binder_debug_mask, uint, 0644);
142
143static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
144module_param_named(devices, binder_devices_param, charp, 0444);
145
146static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
147static int binder_stop_on_user_error;
148
149static int binder_set_stop_on_user_error(const char *val,
150 const struct kernel_param *kp)
151{
152 int ret;
153
154 ret = param_set_int(val, kp);
155 if (binder_stop_on_user_error < 2)
156 wake_up(&binder_user_error_wait);
157 return ret;
158}
159module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
160 param_get_int, &binder_stop_on_user_error, 0644);
161
162#define binder_debug(mask, x...) \
163 do { \
164 if (binder_debug_mask & mask) \
165 pr_info(x); \
166 } while (0)
167
168#define binder_user_error(x...) \
169 do { \
170 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
171 pr_info(x); \
172 if (binder_stop_on_user_error) \
173 binder_stop_on_user_error = 2; \
174 } while (0)
175
176#define to_flat_binder_object(hdr) \
177 container_of(hdr, struct flat_binder_object, hdr)
178
179#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
180
181#define to_binder_buffer_object(hdr) \
182 container_of(hdr, struct binder_buffer_object, hdr)
183
184#define to_binder_fd_array_object(hdr) \
185 container_of(hdr, struct binder_fd_array_object, hdr)
186
187enum binder_stat_types {
188 BINDER_STAT_PROC,
189 BINDER_STAT_THREAD,
190 BINDER_STAT_NODE,
191 BINDER_STAT_REF,
192 BINDER_STAT_DEATH,
193 BINDER_STAT_TRANSACTION,
194 BINDER_STAT_TRANSACTION_COMPLETE,
195 BINDER_STAT_COUNT
196};
197
198struct binder_stats {
199 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
200 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
201 atomic_t obj_created[BINDER_STAT_COUNT];
202 atomic_t obj_deleted[BINDER_STAT_COUNT];
203};
204
205static struct binder_stats binder_stats;
206
207static inline void binder_stats_deleted(enum binder_stat_types type)
208{
209 atomic_inc(&binder_stats.obj_deleted[type]);
210}
211
212static inline void binder_stats_created(enum binder_stat_types type)
213{
214 atomic_inc(&binder_stats.obj_created[type]);
215}
216
217struct binder_transaction_log_entry {
218 int debug_id;
219 int debug_id_done;
220 int call_type;
221 int from_proc;
222 int from_thread;
223 int target_handle;
224 int to_proc;
225 int to_thread;
226 int to_node;
227 int data_size;
228 int offsets_size;
229 int return_error_line;
230 uint32_t return_error;
231 uint32_t return_error_param;
232 const char *context_name;
233};
234struct binder_transaction_log {
235 atomic_t cur;
236 bool full;
237 struct binder_transaction_log_entry entry[32];
238};
239static struct binder_transaction_log binder_transaction_log;
240static struct binder_transaction_log binder_transaction_log_failed;
241
242static struct binder_transaction_log_entry *binder_transaction_log_add(
243 struct binder_transaction_log *log)
244{
245 struct binder_transaction_log_entry *e;
246 unsigned int cur = atomic_inc_return(&log->cur);
247
248 if (cur >= ARRAY_SIZE(log->entry))
249 log->full = true;
250 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
251 WRITE_ONCE(e->debug_id_done, 0);
252 /*
253 * write-barrier to synchronize access to e->debug_id_done.
254 * We make sure the initialized 0 value is seen before
255 * memset() other fields are zeroed by memset.
256 */
257 smp_wmb();
258 memset(e, 0, sizeof(*e));
259 return e;
260}
261
262struct binder_context {
263 struct binder_node *binder_context_mgr_node;
264 struct mutex context_mgr_node_lock;
265
266 kuid_t binder_context_mgr_uid;
267 const char *name;
268};
269
270struct binder_device {
271 struct hlist_node hlist;
272 struct miscdevice miscdev;
273 struct binder_context context;
274};
275
276/**
277 * struct binder_work - work enqueued on a worklist
278 * @entry: node enqueued on list
279 * @type: type of work to be performed
280 *
281 * There are separate work lists for proc, thread, and node (async).
282 */
283struct binder_work {
284 struct list_head entry;
285
286 enum {
287 BINDER_WORK_TRANSACTION = 1,
288 BINDER_WORK_TRANSACTION_COMPLETE,
289 BINDER_WORK_RETURN_ERROR,
290 BINDER_WORK_NODE,
291 BINDER_WORK_DEAD_BINDER,
292 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
293 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
294 } type;
295};
296
297struct binder_error {
298 struct binder_work work;
299 uint32_t cmd;
300};
301
302/**
303 * struct binder_node - binder node bookkeeping
304 * @debug_id: unique ID for debugging
305 * (invariant after initialized)
306 * @lock: lock for node fields
307 * @work: worklist element for node work
308 * (protected by @proc->inner_lock)
309 * @rb_node: element for proc->nodes tree
310 * (protected by @proc->inner_lock)
311 * @dead_node: element for binder_dead_nodes list
312 * (protected by binder_dead_nodes_lock)
313 * @proc: binder_proc that owns this node
314 * (invariant after initialized)
315 * @refs: list of references on this node
316 * (protected by @lock)
317 * @internal_strong_refs: used to take strong references when
318 * initiating a transaction
319 * (protected by @proc->inner_lock if @proc
320 * and by @lock)
321 * @local_weak_refs: weak user refs from local process
322 * (protected by @proc->inner_lock if @proc
323 * and by @lock)
324 * @local_strong_refs: strong user refs from local process
325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
327 * @tmp_refs: temporary kernel refs
328 * (protected by @proc->inner_lock while @proc
329 * is valid, and by binder_dead_nodes_lock
330 * if @proc is NULL. During inc/dec and node release
331 * it is also protected by @lock to provide safety
332 * as the node dies and @proc becomes NULL)
333 * @ptr: userspace pointer for node
334 * (invariant, no lock needed)
335 * @cookie: userspace cookie for node
336 * (invariant, no lock needed)
337 * @has_strong_ref: userspace notified of strong ref
338 * (protected by @proc->inner_lock if @proc
339 * and by @lock)
340 * @pending_strong_ref: userspace has acked notification of strong ref
341 * (protected by @proc->inner_lock if @proc
342 * and by @lock)
343 * @has_weak_ref: userspace notified of weak ref
344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
346 * @pending_weak_ref: userspace has acked notification of weak ref
347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
349 * @has_async_transaction: async transaction to node in progress
350 * (protected by @lock)
351 * @sched_policy: minimum scheduling policy for node
352 * (invariant after initialized)
353 * @accept_fds: file descriptor operations supported for node
354 * (invariant after initialized)
355 * @min_priority: minimum scheduling priority
356 * (invariant after initialized)
357 * @inherit_rt: inherit RT scheduling policy from caller
358 * @txn_security_ctx: require sender's security context
359 * (invariant after initialized)
360 * @async_todo: list of async work items
361 * (protected by @proc->inner_lock)
362 *
363 * Bookkeeping structure for binder nodes.
364 */
365struct binder_node {
366 int debug_id;
367 spinlock_t lock;
368 struct binder_work work;
369 union {
370 struct rb_node rb_node;
371 struct hlist_node dead_node;
372 };
373 struct binder_proc *proc;
374 struct hlist_head refs;
375 int internal_strong_refs;
376 int local_weak_refs;
377 int local_strong_refs;
378 int tmp_refs;
379 binder_uintptr_t ptr;
380 binder_uintptr_t cookie;
381 struct {
382 /*
383 * bitfield elements protected by
384 * proc inner_lock
385 */
386 u8 has_strong_ref:1;
387 u8 pending_strong_ref:1;
388 u8 has_weak_ref:1;
389 u8 pending_weak_ref:1;
390 };
391 struct {
392 /*
393 * invariant after initialization
394 */
395 u8 sched_policy:2;
396 u8 inherit_rt:1;
397 u8 accept_fds:1;
398 u8 txn_security_ctx:1;
399 u8 min_priority;
400 };
401 bool has_async_transaction;
402 struct list_head async_todo;
403};
404
405struct binder_ref_death {
406 /**
407 * @work: worklist element for death notifications
408 * (protected by inner_lock of the proc that
409 * this ref belongs to)
410 */
411 struct binder_work work;
412 binder_uintptr_t cookie;
413};
414
415/**
416 * struct binder_ref_data - binder_ref counts and id
417 * @debug_id: unique ID for the ref
418 * @desc: unique userspace handle for ref
419 * @strong: strong ref count (debugging only if not locked)
420 * @weak: weak ref count (debugging only if not locked)
421 *
422 * Structure to hold ref count and ref id information. Since
423 * the actual ref can only be accessed with a lock, this structure
424 * is used to return information about the ref to callers of
425 * ref inc/dec functions.
426 */
427struct binder_ref_data {
428 int debug_id;
429 uint32_t desc;
430 int strong;
431 int weak;
432};
433
434/**
435 * struct binder_ref - struct to track references on nodes
436 * @data: binder_ref_data containing id, handle, and current refcounts
437 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
438 * @rb_node_node: node for lookup by @node in proc's rb_tree
439 * @node_entry: list entry for node->refs list in target node
440 * (protected by @node->lock)
441 * @proc: binder_proc containing ref
442 * @node: binder_node of target node. When cleaning up a
443 * ref for deletion in binder_cleanup_ref, a non-NULL
444 * @node indicates the node must be freed
445 * @death: pointer to death notification (ref_death) if requested
446 * (protected by @node->lock)
447 *
448 * Structure to track references from procA to target node (on procB). This
449 * structure is unsafe to access without holding @proc->outer_lock.
450 */
451struct binder_ref {
452 /* Lookups needed: */
453 /* node + proc => ref (transaction) */
454 /* desc + proc => ref (transaction, inc/dec ref) */
455 /* node => refs + procs (proc exit) */
456 struct binder_ref_data data;
457 struct rb_node rb_node_desc;
458 struct rb_node rb_node_node;
459 struct hlist_node node_entry;
460 struct binder_proc *proc;
461 struct binder_node *node;
462 struct binder_ref_death *death;
463};
464
465enum binder_deferred_state {
466 BINDER_DEFERRED_PUT_FILES = 0x01,
467 BINDER_DEFERRED_FLUSH = 0x02,
468 BINDER_DEFERRED_RELEASE = 0x04,
469};
470
471/**
472 * struct binder_priority - scheduler policy and priority
473 * @sched_policy scheduler policy
474 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
475 *
476 * The binder driver supports inheriting the following scheduler policies:
477 * SCHED_NORMAL
478 * SCHED_BATCH
479 * SCHED_FIFO
480 * SCHED_RR
481 */
482struct binder_priority {
483 unsigned int sched_policy;
484 int prio;
485};
486
487/**
488 * struct binder_proc - binder process bookkeeping
489 * @proc_node: element for binder_procs list
490 * @threads: rbtree of binder_threads in this proc
491 * (protected by @inner_lock)
492 * @nodes: rbtree of binder nodes associated with
493 * this proc ordered by node->ptr
494 * (protected by @inner_lock)
495 * @refs_by_desc: rbtree of refs ordered by ref->desc
496 * (protected by @outer_lock)
497 * @refs_by_node: rbtree of refs ordered by ref->node
498 * (protected by @outer_lock)
499 * @waiting_threads: threads currently waiting for proc work
500 * (protected by @inner_lock)
501 * @pid PID of group_leader of process
502 * (invariant after initialized)
503 * @tsk task_struct for group_leader of process
504 * (invariant after initialized)
505 * @files files_struct for process
506 * (protected by @files_lock)
507 * @files_lock mutex to protect @files
508 * @deferred_work_node: element for binder_deferred_list
509 * (protected by binder_deferred_lock)
510 * @deferred_work: bitmap of deferred work to perform
511 * (protected by binder_deferred_lock)
512 * @is_dead: process is dead and awaiting free
513 * when outstanding transactions are cleaned up
514 * (protected by @inner_lock)
515 * @todo: list of work for this process
516 * (protected by @inner_lock)
517 * @stats: per-process binder statistics
518 * (atomics, no lock needed)
519 * @delivered_death: list of delivered death notification
520 * (protected by @inner_lock)
521 * @max_threads: cap on number of binder threads
522 * (protected by @inner_lock)
523 * @requested_threads: number of binder threads requested but not
524 * yet started. In current implementation, can
525 * only be 0 or 1.
526 * (protected by @inner_lock)
527 * @requested_threads_started: number binder threads started
528 * (protected by @inner_lock)
529 * @tmp_ref: temporary reference to indicate proc is in use
530 * (protected by @inner_lock)
531 * @default_priority: default scheduler priority
532 * (invariant after initialized)
533 * @debugfs_entry: debugfs node
534 * @alloc: binder allocator bookkeeping
535 * @context: binder_context for this proc
536 * (invariant after initialized)
537 * @inner_lock: can nest under outer_lock and/or node lock
538 * @outer_lock: no nesting under innor or node lock
539 * Lock order: 1) outer, 2) node, 3) inner
540 *
541 * Bookkeeping structure for binder processes
542 */
543struct binder_proc {
544 struct hlist_node proc_node;
545 struct rb_root threads;
546 struct rb_root nodes;
547 struct rb_root refs_by_desc;
548 struct rb_root refs_by_node;
549 struct list_head waiting_threads;
550 int pid;
551 struct task_struct *tsk;
552 struct files_struct *files;
553 struct mutex files_lock;
554 struct hlist_node deferred_work_node;
555 int deferred_work;
556 bool is_dead;
557
558 struct list_head todo;
559 struct binder_stats stats;
560 struct list_head delivered_death;
561 int max_threads;
562 int requested_threads;
563 int requested_threads_started;
564 int tmp_ref;
565 struct binder_priority default_priority;
566 struct dentry *debugfs_entry;
567 struct binder_alloc alloc;
568 struct binder_context *context;
569 spinlock_t inner_lock;
570 spinlock_t outer_lock;
571};
572
573enum {
574 BINDER_LOOPER_STATE_REGISTERED = 0x01,
575 BINDER_LOOPER_STATE_ENTERED = 0x02,
576 BINDER_LOOPER_STATE_EXITED = 0x04,
577 BINDER_LOOPER_STATE_INVALID = 0x08,
578 BINDER_LOOPER_STATE_WAITING = 0x10,
579 BINDER_LOOPER_STATE_POLL = 0x20,
580};
581
582/**
583 * struct binder_thread - binder thread bookkeeping
584 * @proc: binder process for this thread
585 * (invariant after initialization)
586 * @rb_node: element for proc->threads rbtree
587 * (protected by @proc->inner_lock)
588 * @waiting_thread_node: element for @proc->waiting_threads list
589 * (protected by @proc->inner_lock)
590 * @pid: PID for this thread
591 * (invariant after initialization)
592 * @looper: bitmap of looping state
593 * (only accessed by this thread)
594 * @looper_needs_return: looping thread needs to exit driver
595 * (no lock needed)
596 * @transaction_stack: stack of in-progress transactions for this thread
597 * (protected by @proc->inner_lock)
598 * @todo: list of work to do for this thread
599 * (protected by @proc->inner_lock)
600 * @process_todo: whether work in @todo should be processed
601 * (protected by @proc->inner_lock)
602 * @return_error: transaction errors reported by this thread
603 * (only accessed by this thread)
604 * @reply_error: transaction errors reported by target thread
605 * (protected by @proc->inner_lock)
606 * @wait: wait queue for thread work
607 * @stats: per-thread statistics
608 * (atomics, no lock needed)
609 * @tmp_ref: temporary reference to indicate thread is in use
610 * (atomic since @proc->inner_lock cannot
611 * always be acquired)
612 * @is_dead: thread is dead and awaiting free
613 * when outstanding transactions are cleaned up
614 * (protected by @proc->inner_lock)
615 * @task: struct task_struct for this thread
616 *
617 * Bookkeeping structure for binder threads.
618 */
619struct binder_thread {
620 struct binder_proc *proc;
621 struct rb_node rb_node;
622 struct list_head waiting_thread_node;
623 int pid;
624 int looper; /* only modified by this thread */
625 bool looper_need_return; /* can be written by other thread */
626 struct binder_transaction *transaction_stack;
627 struct list_head todo;
628 bool process_todo;
629 struct binder_error return_error;
630 struct binder_error reply_error;
631 wait_queue_head_t wait;
632 struct binder_stats stats;
633 atomic_t tmp_ref;
634 bool is_dead;
635 struct task_struct *task;
636};
637
638struct binder_transaction {
639 int debug_id;
640 struct binder_work work;
641 struct binder_thread *from;
642 struct binder_transaction *from_parent;
643 struct binder_proc *to_proc;
644 struct binder_thread *to_thread;
645 struct binder_transaction *to_parent;
646 unsigned need_reply:1;
647 /* unsigned is_dead:1; */ /* not used at the moment */
648
649 struct binder_buffer *buffer;
650 unsigned int code;
651 unsigned int flags;
652 struct binder_priority priority;
653 struct binder_priority saved_priority;
654 bool set_priority_called;
655 kuid_t sender_euid;
656 binder_uintptr_t security_ctx;
657 /**
658 * @lock: protects @from, @to_proc, and @to_thread
659 *
660 * @from, @to_proc, and @to_thread can be set to NULL
661 * during thread teardown
662 */
663 spinlock_t lock;
664};
665
666/**
667 * binder_proc_lock() - Acquire outer lock for given binder_proc
668 * @proc: struct binder_proc to acquire
669 *
670 * Acquires proc->outer_lock. Used to protect binder_ref
671 * structures associated with the given proc.
672 */
673#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
674static void
675_binder_proc_lock(struct binder_proc *proc, int line)
676{
677 binder_debug(BINDER_DEBUG_SPINLOCKS,
678 "%s: line=%d\n", __func__, line);
679 spin_lock(&proc->outer_lock);
680}
681
682/**
683 * binder_proc_unlock() - Release spinlock for given binder_proc
684 * @proc: struct binder_proc to acquire
685 *
686 * Release lock acquired via binder_proc_lock()
687 */
688#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
689static void
690_binder_proc_unlock(struct binder_proc *proc, int line)
691{
692 binder_debug(BINDER_DEBUG_SPINLOCKS,
693 "%s: line=%d\n", __func__, line);
694 spin_unlock(&proc->outer_lock);
695}
696
697/**
698 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
699 * @proc: struct binder_proc to acquire
700 *
701 * Acquires proc->inner_lock. Used to protect todo lists
702 */
703#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
704static void
705_binder_inner_proc_lock(struct binder_proc *proc, int line)
706{
707 binder_debug(BINDER_DEBUG_SPINLOCKS,
708 "%s: line=%d\n", __func__, line);
709 spin_lock(&proc->inner_lock);
710}
711
712/**
713 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
714 * @proc: struct binder_proc to acquire
715 *
716 * Release lock acquired via binder_inner_proc_lock()
717 */
718#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
719static void
720_binder_inner_proc_unlock(struct binder_proc *proc, int line)
721{
722 binder_debug(BINDER_DEBUG_SPINLOCKS,
723 "%s: line=%d\n", __func__, line);
724 spin_unlock(&proc->inner_lock);
725}
726
727/**
728 * binder_node_lock() - Acquire spinlock for given binder_node
729 * @node: struct binder_node to acquire
730 *
731 * Acquires node->lock. Used to protect binder_node fields
732 */
733#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
734static void
735_binder_node_lock(struct binder_node *node, int line)
736{
737 binder_debug(BINDER_DEBUG_SPINLOCKS,
738 "%s: line=%d\n", __func__, line);
739 spin_lock(&node->lock);
740}
741
742/**
743 * binder_node_unlock() - Release spinlock for given binder_proc
744 * @node: struct binder_node to acquire
745 *
746 * Release lock acquired via binder_node_lock()
747 */
748#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
749static void
750_binder_node_unlock(struct binder_node *node, int line)
751{
752 binder_debug(BINDER_DEBUG_SPINLOCKS,
753 "%s: line=%d\n", __func__, line);
754 spin_unlock(&node->lock);
755}
756
757/**
758 * binder_node_inner_lock() - Acquire node and inner locks
759 * @node: struct binder_node to acquire
760 *
761 * Acquires node->lock. If node->proc also acquires
762 * proc->inner_lock. Used to protect binder_node fields
763 */
764#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
765static void
766_binder_node_inner_lock(struct binder_node *node, int line)
767{
768 binder_debug(BINDER_DEBUG_SPINLOCKS,
769 "%s: line=%d\n", __func__, line);
770 spin_lock(&node->lock);
771 if (node->proc)
772 binder_inner_proc_lock(node->proc);
773}
774
775/**
776 * binder_node_unlock() - Release node and inner locks
777 * @node: struct binder_node to acquire
778 *
779 * Release lock acquired via binder_node_lock()
780 */
781#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
782static void
783_binder_node_inner_unlock(struct binder_node *node, int line)
784{
785 struct binder_proc *proc = node->proc;
786
787 binder_debug(BINDER_DEBUG_SPINLOCKS,
788 "%s: line=%d\n", __func__, line);
789 if (proc)
790 binder_inner_proc_unlock(proc);
791 spin_unlock(&node->lock);
792}
793
794static bool binder_worklist_empty_ilocked(struct list_head *list)
795{
796 return list_empty(list);
797}
798
799/**
800 * binder_worklist_empty() - Check if no items on the work list
801 * @proc: binder_proc associated with list
802 * @list: list to check
803 *
804 * Return: true if there are no items on list, else false
805 */
806static bool binder_worklist_empty(struct binder_proc *proc,
807 struct list_head *list)
808{
809 bool ret;
810
811 binder_inner_proc_lock(proc);
812 ret = binder_worklist_empty_ilocked(list);
813 binder_inner_proc_unlock(proc);
814 return ret;
815}
816
817/**
818 * binder_enqueue_work_ilocked() - Add an item to the work list
819 * @work: struct binder_work to add to list
820 * @target_list: list to add work to
821 *
822 * Adds the work to the specified list. Asserts that work
823 * is not already on a list.
824 *
825 * Requires the proc->inner_lock to be held.
826 */
827static void
828binder_enqueue_work_ilocked(struct binder_work *work,
829 struct list_head *target_list)
830{
831 BUG_ON(target_list == NULL);
832 BUG_ON(work->entry.next && !list_empty(&work->entry));
833 list_add_tail(&work->entry, target_list);
834}
835
836/**
837 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
838 * @thread: thread to queue work to
839 * @work: struct binder_work to add to list
840 *
841 * Adds the work to the todo list of the thread. Doesn't set the process_todo
842 * flag, which means that (if it wasn't already set) the thread will go to
843 * sleep without handling this work when it calls read.
844 *
845 * Requires the proc->inner_lock to be held.
846 */
847static void
848binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
849 struct binder_work *work)
850{
851 binder_enqueue_work_ilocked(work, &thread->todo);
852}
853
854/**
855 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
856 * @thread: thread to queue work to
857 * @work: struct binder_work to add to list
858 *
859 * Adds the work to the todo list of the thread, and enables processing
860 * of the todo queue.
861 *
862 * Requires the proc->inner_lock to be held.
863 */
864static void
865binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
866 struct binder_work *work)
867{
868 binder_enqueue_work_ilocked(work, &thread->todo);
869 thread->process_todo = true;
870}
871
872/**
873 * binder_enqueue_thread_work() - Add an item to the thread work list
874 * @thread: thread to queue work to
875 * @work: struct binder_work to add to list
876 *
877 * Adds the work to the todo list of the thread, and enables processing
878 * of the todo queue.
879 */
880static void
881binder_enqueue_thread_work(struct binder_thread *thread,
882 struct binder_work *work)
883{
884 binder_inner_proc_lock(thread->proc);
885 binder_enqueue_thread_work_ilocked(thread, work);
886 binder_inner_proc_unlock(thread->proc);
887}
888
889static void
890binder_dequeue_work_ilocked(struct binder_work *work)
891{
892 list_del_init(&work->entry);
893}
894
895/**
896 * binder_dequeue_work() - Removes an item from the work list
897 * @proc: binder_proc associated with list
898 * @work: struct binder_work to remove from list
899 *
900 * Removes the specified work item from whatever list it is on.
901 * Can safely be called if work is not on any list.
902 */
903static void
904binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
905{
906 binder_inner_proc_lock(proc);
907 binder_dequeue_work_ilocked(work);
908 binder_inner_proc_unlock(proc);
909}
910
911static struct binder_work *binder_dequeue_work_head_ilocked(
912 struct list_head *list)
913{
914 struct binder_work *w;
915
916 w = list_first_entry_or_null(list, struct binder_work, entry);
917 if (w)
918 list_del_init(&w->entry);
919 return w;
920}
921
922/**
923 * binder_dequeue_work_head() - Dequeues the item at head of list
924 * @proc: binder_proc associated with list
925 * @list: list to dequeue head
926 *
927 * Removes the head of the list if there are items on the list
928 *
929 * Return: pointer dequeued binder_work, NULL if list was empty
930 */
931static struct binder_work *binder_dequeue_work_head(
932 struct binder_proc *proc,
933 struct list_head *list)
934{
935 struct binder_work *w;
936
937 binder_inner_proc_lock(proc);
938 w = binder_dequeue_work_head_ilocked(list);
939 binder_inner_proc_unlock(proc);
940 return w;
941}
942
943static void
944binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
945static void binder_free_thread(struct binder_thread *thread);
946static void binder_free_proc(struct binder_proc *proc);
947static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
948
949static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
950{
951 unsigned long rlim_cur;
952 unsigned long irqs;
953 int ret;
954
955 mutex_lock(&proc->files_lock);
956 if (proc->files == NULL) {
957 ret = -ESRCH;
958 goto err;
959 }
960 if (!lock_task_sighand(proc->tsk, &irqs)) {
961 ret = -EMFILE;
962 goto err;
963 }
964 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
965 unlock_task_sighand(proc->tsk, &irqs);
966
967 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
968err:
969 mutex_unlock(&proc->files_lock);
970 return ret;
971}
972
973/*
974 * copied from fd_install
975 */
976static void task_fd_install(
977 struct binder_proc *proc, unsigned int fd, struct file *file)
978{
979 mutex_lock(&proc->files_lock);
980 if (proc->files)
981 __fd_install(proc->files, fd, file);
982 mutex_unlock(&proc->files_lock);
983}
984
985/*
986 * copied from sys_close
987 */
988static long task_close_fd(struct binder_proc *proc, unsigned int fd)
989{
990 int retval;
991
992 mutex_lock(&proc->files_lock);
993 if (proc->files == NULL) {
994 retval = -ESRCH;
995 goto err;
996 }
997 retval = __close_fd(proc->files, fd);
998 /* can't restart close syscall because file table entry was cleared */
999 if (unlikely(retval == -ERESTARTSYS ||
1000 retval == -ERESTARTNOINTR ||
1001 retval == -ERESTARTNOHAND ||
1002 retval == -ERESTART_RESTARTBLOCK))
1003 retval = -EINTR;
1004err:
1005 mutex_unlock(&proc->files_lock);
1006 return retval;
1007}
1008
1009static bool binder_has_work_ilocked(struct binder_thread *thread,
1010 bool do_proc_work)
1011{
1012 return thread->process_todo ||
1013 thread->looper_need_return ||
1014 (do_proc_work &&
1015 !binder_worklist_empty_ilocked(&thread->proc->todo));
1016}
1017
1018static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1019{
1020 bool has_work;
1021
1022 binder_inner_proc_lock(thread->proc);
1023 has_work = binder_has_work_ilocked(thread, do_proc_work);
1024 binder_inner_proc_unlock(thread->proc);
1025
1026 return has_work;
1027}
1028
1029static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1030{
1031 return !thread->transaction_stack &&
1032 binder_worklist_empty_ilocked(&thread->todo) &&
1033 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1034 BINDER_LOOPER_STATE_REGISTERED));
1035}
1036
1037static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1038 bool sync)
1039{
1040 struct rb_node *n;
1041 struct binder_thread *thread;
1042
1043 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1044 thread = rb_entry(n, struct binder_thread, rb_node);
1045 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1046 binder_available_for_proc_work_ilocked(thread)) {
1047 if (sync)
1048 wake_up_interruptible_sync(&thread->wait);
1049 else
1050 wake_up_interruptible(&thread->wait);
1051 }
1052 }
1053}
1054
1055/**
1056 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1057 * @proc: process to select a thread from
1058 *
1059 * Note that calling this function moves the thread off the waiting_threads
1060 * list, so it can only be woken up by the caller of this function, or a
1061 * signal. Therefore, callers *should* always wake up the thread this function
1062 * returns.
1063 *
1064 * Return: If there's a thread currently waiting for process work,
1065 * returns that thread. Otherwise returns NULL.
1066 */
1067static struct binder_thread *
1068binder_select_thread_ilocked(struct binder_proc *proc)
1069{
1070 struct binder_thread *thread;
1071
1072 assert_spin_locked(&proc->inner_lock);
1073 thread = list_first_entry_or_null(&proc->waiting_threads,
1074 struct binder_thread,
1075 waiting_thread_node);
1076
1077 if (thread)
1078 list_del_init(&thread->waiting_thread_node);
1079
1080 return thread;
1081}
1082
1083/**
1084 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1085 * @proc: process to wake up a thread in
1086 * @thread: specific thread to wake-up (may be NULL)
1087 * @sync: whether to do a synchronous wake-up
1088 *
1089 * This function wakes up a thread in the @proc process.
1090 * The caller may provide a specific thread to wake-up in
1091 * the @thread parameter. If @thread is NULL, this function
1092 * will wake up threads that have called poll().
1093 *
1094 * Note that for this function to work as expected, callers
1095 * should first call binder_select_thread() to find a thread
1096 * to handle the work (if they don't have a thread already),
1097 * and pass the result into the @thread parameter.
1098 */
1099static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1100 struct binder_thread *thread,
1101 bool sync)
1102{
1103 assert_spin_locked(&proc->inner_lock);
1104
1105 if (thread) {
1106 if (sync)
1107 wake_up_interruptible_sync(&thread->wait);
1108 else
1109 wake_up_interruptible(&thread->wait);
1110 return;
1111 }
1112
1113 /* Didn't find a thread waiting for proc work; this can happen
1114 * in two scenarios:
1115 * 1. All threads are busy handling transactions
1116 * In that case, one of those threads should call back into
1117 * the kernel driver soon and pick up this work.
1118 * 2. Threads are using the (e)poll interface, in which case
1119 * they may be blocked on the waitqueue without having been
1120 * added to waiting_threads. For this case, we just iterate
1121 * over all threads not handling transaction work, and
1122 * wake them all up. We wake all because we don't know whether
1123 * a thread that called into (e)poll is handling non-binder
1124 * work currently.
1125 */
1126 binder_wakeup_poll_threads_ilocked(proc, sync);
1127}
1128
1129static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1130{
1131 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1132
1133 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1134}
1135
1136static bool is_rt_policy(int policy)
1137{
1138 return policy == SCHED_FIFO || policy == SCHED_RR;
1139}
1140
1141static bool is_fair_policy(int policy)
1142{
1143 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1144}
1145
1146static bool binder_supported_policy(int policy)
1147{
1148 return is_fair_policy(policy) || is_rt_policy(policy);
1149}
1150
1151static int to_userspace_prio(int policy, int kernel_priority)
1152{
1153 if (is_fair_policy(policy))
1154 return PRIO_TO_NICE(kernel_priority);
1155 else
1156 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1157}
1158
1159static int to_kernel_prio(int policy, int user_priority)
1160{
1161 if (is_fair_policy(policy))
1162 return NICE_TO_PRIO(user_priority);
1163 else
1164 return MAX_USER_RT_PRIO - 1 - user_priority;
1165}
1166
1167static void binder_do_set_priority(struct task_struct *task,
1168 struct binder_priority desired,
1169 bool verify)
1170{
1171 int priority; /* user-space prio value */
1172 bool has_cap_nice;
1173 unsigned int policy = desired.sched_policy;
1174
1175 if (task->policy == policy && task->normal_prio == desired.prio)
1176 return;
1177
1178 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1179
1180 priority = to_userspace_prio(policy, desired.prio);
1181
1182 if (verify && is_rt_policy(policy) && !has_cap_nice) {
1183 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1184
1185 if (max_rtprio == 0) {
1186 policy = SCHED_NORMAL;
1187 priority = MIN_NICE;
1188 } else if (priority > max_rtprio) {
1189 priority = max_rtprio;
1190 }
1191 }
1192
1193 if (verify && is_fair_policy(policy) && !has_cap_nice) {
1194 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1195
1196 if (min_nice > MAX_NICE) {
1197 binder_user_error("%d RLIMIT_NICE not set\n",
1198 task->pid);
1199 return;
1200 } else if (priority < min_nice) {
1201 priority = min_nice;
1202 }
1203 }
1204
1205 if (policy != desired.sched_policy ||
1206 to_kernel_prio(policy, priority) != desired.prio)
1207 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1208 "%d: priority %d not allowed, using %d instead\n",
1209 task->pid, desired.prio,
1210 to_kernel_prio(policy, priority));
1211
1212 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1213 to_kernel_prio(policy, priority),
1214 desired.prio);
1215
1216 /* Set the actual priority */
1217 if (task->policy != policy || is_rt_policy(policy)) {
1218 struct sched_param params;
1219
1220 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1221
1222 sched_setscheduler_nocheck(task,
1223 policy | SCHED_RESET_ON_FORK,
1224 &params);
1225 }
1226 if (is_fair_policy(policy))
1227 set_user_nice(task, priority);
1228}
1229
1230static void binder_set_priority(struct task_struct *task,
1231 struct binder_priority desired)
1232{
1233 binder_do_set_priority(task, desired, /* verify = */ true);
1234}
1235
1236static void binder_restore_priority(struct task_struct *task,
1237 struct binder_priority desired)
1238{
1239 binder_do_set_priority(task, desired, /* verify = */ false);
1240}
1241
1242static void binder_transaction_priority(struct task_struct *task,
1243 struct binder_transaction *t,
1244 struct binder_priority node_prio,
1245 bool inherit_rt)
1246{
1247 struct binder_priority desired_prio = t->priority;
1248
1249 if (t->set_priority_called)
1250 return;
1251
1252 t->set_priority_called = true;
1253 t->saved_priority.sched_policy = task->policy;
1254 t->saved_priority.prio = task->normal_prio;
1255
1256 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1257 desired_prio.prio = NICE_TO_PRIO(0);
1258 desired_prio.sched_policy = SCHED_NORMAL;
1259 }
1260
1261 if (node_prio.prio < t->priority.prio ||
1262 (node_prio.prio == t->priority.prio &&
1263 node_prio.sched_policy == SCHED_FIFO)) {
1264 /*
1265 * In case the minimum priority on the node is
1266 * higher (lower value), use that priority. If
1267 * the priority is the same, but the node uses
1268 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1269 * run unbounded, unlike SCHED_RR.
1270 */
1271 desired_prio = node_prio;
1272 }
1273
1274 binder_set_priority(task, desired_prio);
1275}
1276
1277static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1278 binder_uintptr_t ptr)
1279{
1280 struct rb_node *n = proc->nodes.rb_node;
1281 struct binder_node *node;
1282
1283 assert_spin_locked(&proc->inner_lock);
1284
1285 while (n) {
1286 node = rb_entry(n, struct binder_node, rb_node);
1287
1288 if (ptr < node->ptr)
1289 n = n->rb_left;
1290 else if (ptr > node->ptr)
1291 n = n->rb_right;
1292 else {
1293 /*
1294 * take an implicit weak reference
1295 * to ensure node stays alive until
1296 * call to binder_put_node()
1297 */
1298 binder_inc_node_tmpref_ilocked(node);
1299 return node;
1300 }
1301 }
1302 return NULL;
1303}
1304
1305static struct binder_node *binder_get_node(struct binder_proc *proc,
1306 binder_uintptr_t ptr)
1307{
1308 struct binder_node *node;
1309
1310 binder_inner_proc_lock(proc);
1311 node = binder_get_node_ilocked(proc, ptr);
1312 binder_inner_proc_unlock(proc);
1313 return node;
1314}
1315
1316static struct binder_node *binder_init_node_ilocked(
1317 struct binder_proc *proc,
1318 struct binder_node *new_node,
1319 struct flat_binder_object *fp)
1320{
1321 struct rb_node **p = &proc->nodes.rb_node;
1322 struct rb_node *parent = NULL;
1323 struct binder_node *node;
1324 binder_uintptr_t ptr = fp ? fp->binder : 0;
1325 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1326 __u32 flags = fp ? fp->flags : 0;
1327 s8 priority;
1328
1329 assert_spin_locked(&proc->inner_lock);
1330
1331 while (*p) {
1332
1333 parent = *p;
1334 node = rb_entry(parent, struct binder_node, rb_node);
1335
1336 if (ptr < node->ptr)
1337 p = &(*p)->rb_left;
1338 else if (ptr > node->ptr)
1339 p = &(*p)->rb_right;
1340 else {
1341 /*
1342 * A matching node is already in
1343 * the rb tree. Abandon the init
1344 * and return it.
1345 */
1346 binder_inc_node_tmpref_ilocked(node);
1347 return node;
1348 }
1349 }
1350 node = new_node;
1351 binder_stats_created(BINDER_STAT_NODE);
1352 node->tmp_refs++;
1353 rb_link_node(&node->rb_node, parent, p);
1354 rb_insert_color(&node->rb_node, &proc->nodes);
1355 node->debug_id = atomic_inc_return(&binder_last_id);
1356 node->proc = proc;
1357 node->ptr = ptr;
1358 node->cookie = cookie;
1359 node->work.type = BINDER_WORK_NODE;
1360 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1361 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
1362 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1363 node->min_priority = to_kernel_prio(node->sched_policy, priority);
1364 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1365 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
1366 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1367 spin_lock_init(&node->lock);
1368 INIT_LIST_HEAD(&node->work.entry);
1369 INIT_LIST_HEAD(&node->async_todo);
1370 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1371 "%d:%d node %d u%016llx c%016llx created\n",
1372 proc->pid, current->pid, node->debug_id,
1373 (u64)node->ptr, (u64)node->cookie);
1374
1375 return node;
1376}
1377
1378static struct binder_node *binder_new_node(struct binder_proc *proc,
1379 struct flat_binder_object *fp)
1380{
1381 struct binder_node *node;
1382 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1383
1384 if (!new_node)
1385 return NULL;
1386 binder_inner_proc_lock(proc);
1387 node = binder_init_node_ilocked(proc, new_node, fp);
1388 binder_inner_proc_unlock(proc);
1389 if (node != new_node)
1390 /*
1391 * The node was already added by another thread
1392 */
1393 kfree(new_node);
1394
1395 return node;
1396}
1397
1398static void binder_free_node(struct binder_node *node)
1399{
1400 kfree(node);
1401 binder_stats_deleted(BINDER_STAT_NODE);
1402}
1403
1404static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1405 int internal,
1406 struct list_head *target_list)
1407{
1408 struct binder_proc *proc = node->proc;
1409
1410 assert_spin_locked(&node->lock);
1411 if (proc)
1412 assert_spin_locked(&proc->inner_lock);
1413 if (strong) {
1414 if (internal) {
1415 if (target_list == NULL &&
1416 node->internal_strong_refs == 0 &&
1417 !(node->proc &&
1418 node == node->proc->context->binder_context_mgr_node &&
1419 node->has_strong_ref)) {
1420 pr_err("invalid inc strong node for %d\n",
1421 node->debug_id);
1422 return -EINVAL;
1423 }
1424 node->internal_strong_refs++;
1425 } else
1426 node->local_strong_refs++;
1427 if (!node->has_strong_ref && target_list) {
1428 binder_dequeue_work_ilocked(&node->work);
1429 /*
1430 * Note: this function is the only place where we queue
1431 * directly to a thread->todo without using the
1432 * corresponding binder_enqueue_thread_work() helper
1433 * functions; in this case it's ok to not set the
1434 * process_todo flag, since we know this node work will
1435 * always be followed by other work that starts queue
1436 * processing: in case of synchronous transactions, a
1437 * BR_REPLY or BR_ERROR; in case of oneway
1438 * transactions, a BR_TRANSACTION_COMPLETE.
1439 */
1440 binder_enqueue_work_ilocked(&node->work, target_list);
1441 }
1442 } else {
1443 if (!internal)
1444 node->local_weak_refs++;
1445 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1446 if (target_list == NULL) {
1447 pr_err("invalid inc weak node for %d\n",
1448 node->debug_id);
1449 return -EINVAL;
1450 }
1451 /*
1452 * See comment above
1453 */
1454 binder_enqueue_work_ilocked(&node->work, target_list);
1455 }
1456 }
1457 return 0;
1458}
1459
1460static int binder_inc_node(struct binder_node *node, int strong, int internal,
1461 struct list_head *target_list)
1462{
1463 int ret;
1464
1465 binder_node_inner_lock(node);
1466 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1467 binder_node_inner_unlock(node);
1468
1469 return ret;
1470}
1471
1472static bool binder_dec_node_nilocked(struct binder_node *node,
1473 int strong, int internal)
1474{
1475 struct binder_proc *proc = node->proc;
1476
1477 assert_spin_locked(&node->lock);
1478 if (proc)
1479 assert_spin_locked(&proc->inner_lock);
1480 if (strong) {
1481 if (internal)
1482 node->internal_strong_refs--;
1483 else
1484 node->local_strong_refs--;
1485 if (node->local_strong_refs || node->internal_strong_refs)
1486 return false;
1487 } else {
1488 if (!internal)
1489 node->local_weak_refs--;
1490 if (node->local_weak_refs || node->tmp_refs ||
1491 !hlist_empty(&node->refs))
1492 return false;
1493 }
1494
1495 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1496 if (list_empty(&node->work.entry)) {
1497 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1498 binder_wakeup_proc_ilocked(proc);
1499 }
1500 } else {
1501 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1502 !node->local_weak_refs && !node->tmp_refs) {
1503 if (proc) {
1504 binder_dequeue_work_ilocked(&node->work);
1505 rb_erase(&node->rb_node, &proc->nodes);
1506 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1507 "refless node %d deleted\n",
1508 node->debug_id);
1509 } else {
1510 BUG_ON(!list_empty(&node->work.entry));
1511 spin_lock(&binder_dead_nodes_lock);
1512 /*
1513 * tmp_refs could have changed so
1514 * check it again
1515 */
1516 if (node->tmp_refs) {
1517 spin_unlock(&binder_dead_nodes_lock);
1518 return false;
1519 }
1520 hlist_del(&node->dead_node);
1521 spin_unlock(&binder_dead_nodes_lock);
1522 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1523 "dead node %d deleted\n",
1524 node->debug_id);
1525 }
1526 return true;
1527 }
1528 }
1529 return false;
1530}
1531
1532static void binder_dec_node(struct binder_node *node, int strong, int internal)
1533{
1534 bool free_node;
1535
1536 binder_node_inner_lock(node);
1537 free_node = binder_dec_node_nilocked(node, strong, internal);
1538 binder_node_inner_unlock(node);
1539 if (free_node)
1540 binder_free_node(node);
1541}
1542
1543static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1544{
1545 /*
1546 * No call to binder_inc_node() is needed since we
1547 * don't need to inform userspace of any changes to
1548 * tmp_refs
1549 */
1550 node->tmp_refs++;
1551}
1552
1553/**
1554 * binder_inc_node_tmpref() - take a temporary reference on node
1555 * @node: node to reference
1556 *
1557 * Take reference on node to prevent the node from being freed
1558 * while referenced only by a local variable. The inner lock is
1559 * needed to serialize with the node work on the queue (which
1560 * isn't needed after the node is dead). If the node is dead
1561 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1562 * node->tmp_refs against dead-node-only cases where the node
1563 * lock cannot be acquired (eg traversing the dead node list to
1564 * print nodes)
1565 */
1566static void binder_inc_node_tmpref(struct binder_node *node)
1567{
1568 binder_node_lock(node);
1569 if (node->proc)
1570 binder_inner_proc_lock(node->proc);
1571 else
1572 spin_lock(&binder_dead_nodes_lock);
1573 binder_inc_node_tmpref_ilocked(node);
1574 if (node->proc)
1575 binder_inner_proc_unlock(node->proc);
1576 else
1577 spin_unlock(&binder_dead_nodes_lock);
1578 binder_node_unlock(node);
1579}
1580
1581/**
1582 * binder_dec_node_tmpref() - remove a temporary reference on node
1583 * @node: node to reference
1584 *
1585 * Release temporary reference on node taken via binder_inc_node_tmpref()
1586 */
1587static void binder_dec_node_tmpref(struct binder_node *node)
1588{
1589 bool free_node;
1590
1591 binder_node_inner_lock(node);
1592 if (!node->proc)
1593 spin_lock(&binder_dead_nodes_lock);
1594 node->tmp_refs--;
1595 BUG_ON(node->tmp_refs < 0);
1596 if (!node->proc)
1597 spin_unlock(&binder_dead_nodes_lock);
1598 /*
1599 * Call binder_dec_node() to check if all refcounts are 0
1600 * and cleanup is needed. Calling with strong=0 and internal=1
1601 * causes no actual reference to be released in binder_dec_node().
1602 * If that changes, a change is needed here too.
1603 */
1604 free_node = binder_dec_node_nilocked(node, 0, 1);
1605 binder_node_inner_unlock(node);
1606 if (free_node)
1607 binder_free_node(node);
1608}
1609
1610static void binder_put_node(struct binder_node *node)
1611{
1612 binder_dec_node_tmpref(node);
1613}
1614
1615static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1616 u32 desc, bool need_strong_ref)
1617{
1618 struct rb_node *n = proc->refs_by_desc.rb_node;
1619 struct binder_ref *ref;
1620
1621 while (n) {
1622 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1623
1624 if (desc < ref->data.desc) {
1625 n = n->rb_left;
1626 } else if (desc > ref->data.desc) {
1627 n = n->rb_right;
1628 } else if (need_strong_ref && !ref->data.strong) {
1629 binder_user_error("tried to use weak ref as strong ref\n");
1630 return NULL;
1631 } else {
1632 return ref;
1633 }
1634 }
1635 return NULL;
1636}
1637
1638/**
1639 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1640 * @proc: binder_proc that owns the ref
1641 * @node: binder_node of target
1642 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1643 *
1644 * Look up the ref for the given node and return it if it exists
1645 *
1646 * If it doesn't exist and the caller provides a newly allocated
1647 * ref, initialize the fields of the newly allocated ref and insert
1648 * into the given proc rb_trees and node refs list.
1649 *
1650 * Return: the ref for node. It is possible that another thread
1651 * allocated/initialized the ref first in which case the
1652 * returned ref would be different than the passed-in
1653 * new_ref. new_ref must be kfree'd by the caller in
1654 * this case.
1655 */
1656static struct binder_ref *binder_get_ref_for_node_olocked(
1657 struct binder_proc *proc,
1658 struct binder_node *node,
1659 struct binder_ref *new_ref)
1660{
1661 struct binder_context *context = proc->context;
1662 struct rb_node **p = &proc->refs_by_node.rb_node;
1663 struct rb_node *parent = NULL;
1664 struct binder_ref *ref;
1665 struct rb_node *n;
1666
1667 while (*p) {
1668 parent = *p;
1669 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1670
1671 if (node < ref->node)
1672 p = &(*p)->rb_left;
1673 else if (node > ref->node)
1674 p = &(*p)->rb_right;
1675 else
1676 return ref;
1677 }
1678 if (!new_ref)
1679 return NULL;
1680
1681 binder_stats_created(BINDER_STAT_REF);
1682 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1683 new_ref->proc = proc;
1684 new_ref->node = node;
1685 rb_link_node(&new_ref->rb_node_node, parent, p);
1686 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1687
1688 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1689 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1690 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1691 if (ref->data.desc > new_ref->data.desc)
1692 break;
1693 new_ref->data.desc = ref->data.desc + 1;
1694 }
1695
1696 p = &proc->refs_by_desc.rb_node;
1697 while (*p) {
1698 parent = *p;
1699 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1700
1701 if (new_ref->data.desc < ref->data.desc)
1702 p = &(*p)->rb_left;
1703 else if (new_ref->data.desc > ref->data.desc)
1704 p = &(*p)->rb_right;
1705 else
1706 BUG();
1707 }
1708 rb_link_node(&new_ref->rb_node_desc, parent, p);
1709 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1710
1711 binder_node_lock(node);
1712 hlist_add_head(&new_ref->node_entry, &node->refs);
1713
1714 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1715 "%d new ref %d desc %d for node %d\n",
1716 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1717 node->debug_id);
1718 binder_node_unlock(node);
1719 return new_ref;
1720}
1721
1722static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1723{
1724 bool delete_node = false;
1725
1726 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1727 "%d delete ref %d desc %d for node %d\n",
1728 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1729 ref->node->debug_id);
1730
1731 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1732 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1733
1734 binder_node_inner_lock(ref->node);
1735 if (ref->data.strong)
1736 binder_dec_node_nilocked(ref->node, 1, 1);
1737
1738 hlist_del(&ref->node_entry);
1739 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1740 binder_node_inner_unlock(ref->node);
1741 /*
1742 * Clear ref->node unless we want the caller to free the node
1743 */
1744 if (!delete_node) {
1745 /*
1746 * The caller uses ref->node to determine
1747 * whether the node needs to be freed. Clear
1748 * it since the node is still alive.
1749 */
1750 ref->node = NULL;
1751 }
1752
1753 if (ref->death) {
1754 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1755 "%d delete ref %d desc %d has death notification\n",
1756 ref->proc->pid, ref->data.debug_id,
1757 ref->data.desc);
1758 binder_dequeue_work(ref->proc, &ref->death->work);
1759 binder_stats_deleted(BINDER_STAT_DEATH);
1760 }
1761 binder_stats_deleted(BINDER_STAT_REF);
1762}
1763
1764/**
1765 * binder_inc_ref_olocked() - increment the ref for given handle
1766 * @ref: ref to be incremented
1767 * @strong: if true, strong increment, else weak
1768 * @target_list: list to queue node work on
1769 *
1770 * Increment the ref. @ref->proc->outer_lock must be held on entry
1771 *
1772 * Return: 0, if successful, else errno
1773 */
1774static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1775 struct list_head *target_list)
1776{
1777 int ret;
1778
1779 if (strong) {
1780 if (ref->data.strong == 0) {
1781 ret = binder_inc_node(ref->node, 1, 1, target_list);
1782 if (ret)
1783 return ret;
1784 }
1785 ref->data.strong++;
1786 } else {
1787 if (ref->data.weak == 0) {
1788 ret = binder_inc_node(ref->node, 0, 1, target_list);
1789 if (ret)
1790 return ret;
1791 }
1792 ref->data.weak++;
1793 }
1794 return 0;
1795}
1796
1797/**
1798 * binder_dec_ref() - dec the ref for given handle
1799 * @ref: ref to be decremented
1800 * @strong: if true, strong decrement, else weak
1801 *
1802 * Decrement the ref.
1803 *
1804 * Return: true if ref is cleaned up and ready to be freed
1805 */
1806static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1807{
1808 if (strong) {
1809 if (ref->data.strong == 0) {
1810 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1811 ref->proc->pid, ref->data.debug_id,
1812 ref->data.desc, ref->data.strong,
1813 ref->data.weak);
1814 return false;
1815 }
1816 ref->data.strong--;
1817 if (ref->data.strong == 0)
1818 binder_dec_node(ref->node, strong, 1);
1819 } else {
1820 if (ref->data.weak == 0) {
1821 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1822 ref->proc->pid, ref->data.debug_id,
1823 ref->data.desc, ref->data.strong,
1824 ref->data.weak);
1825 return false;
1826 }
1827 ref->data.weak--;
1828 }
1829 if (ref->data.strong == 0 && ref->data.weak == 0) {
1830 binder_cleanup_ref_olocked(ref);
1831 return true;
1832 }
1833 return false;
1834}
1835
1836/**
1837 * binder_get_node_from_ref() - get the node from the given proc/desc
1838 * @proc: proc containing the ref
1839 * @desc: the handle associated with the ref
1840 * @need_strong_ref: if true, only return node if ref is strong
1841 * @rdata: the id/refcount data for the ref
1842 *
1843 * Given a proc and ref handle, return the associated binder_node
1844 *
1845 * Return: a binder_node or NULL if not found or not strong when strong required
1846 */
1847static struct binder_node *binder_get_node_from_ref(
1848 struct binder_proc *proc,
1849 u32 desc, bool need_strong_ref,
1850 struct binder_ref_data *rdata)
1851{
1852 struct binder_node *node;
1853 struct binder_ref *ref;
1854
1855 binder_proc_lock(proc);
1856 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1857 if (!ref)
1858 goto err_no_ref;
1859 node = ref->node;
1860 /*
1861 * Take an implicit reference on the node to ensure
1862 * it stays alive until the call to binder_put_node()
1863 */
1864 binder_inc_node_tmpref(node);
1865 if (rdata)
1866 *rdata = ref->data;
1867 binder_proc_unlock(proc);
1868
1869 return node;
1870
1871err_no_ref:
1872 binder_proc_unlock(proc);
1873 return NULL;
1874}
1875
1876/**
1877 * binder_free_ref() - free the binder_ref
1878 * @ref: ref to free
1879 *
1880 * Free the binder_ref. Free the binder_node indicated by ref->node
1881 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1882 */
1883static void binder_free_ref(struct binder_ref *ref)
1884{
1885 if (ref->node)
1886 binder_free_node(ref->node);
1887 kfree(ref->death);
1888 kfree(ref);
1889}
1890
1891/**
1892 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1893 * @proc: proc containing the ref
1894 * @desc: the handle associated with the ref
1895 * @increment: true=inc reference, false=dec reference
1896 * @strong: true=strong reference, false=weak reference
1897 * @rdata: the id/refcount data for the ref
1898 *
1899 * Given a proc and ref handle, increment or decrement the ref
1900 * according to "increment" arg.
1901 *
1902 * Return: 0 if successful, else errno
1903 */
1904static int binder_update_ref_for_handle(struct binder_proc *proc,
1905 uint32_t desc, bool increment, bool strong,
1906 struct binder_ref_data *rdata)
1907{
1908 int ret = 0;
1909 struct binder_ref *ref;
1910 bool delete_ref = false;
1911
1912 binder_proc_lock(proc);
1913 ref = binder_get_ref_olocked(proc, desc, strong);
1914 if (!ref) {
1915 ret = -EINVAL;
1916 goto err_no_ref;
1917 }
1918 if (increment)
1919 ret = binder_inc_ref_olocked(ref, strong, NULL);
1920 else
1921 delete_ref = binder_dec_ref_olocked(ref, strong);
1922
1923 if (rdata)
1924 *rdata = ref->data;
1925 binder_proc_unlock(proc);
1926
1927 if (delete_ref)
1928 binder_free_ref(ref);
1929 return ret;
1930
1931err_no_ref:
1932 binder_proc_unlock(proc);
1933 return ret;
1934}
1935
1936/**
1937 * binder_dec_ref_for_handle() - dec the ref for given handle
1938 * @proc: proc containing the ref
1939 * @desc: the handle associated with the ref
1940 * @strong: true=strong reference, false=weak reference
1941 * @rdata: the id/refcount data for the ref
1942 *
1943 * Just calls binder_update_ref_for_handle() to decrement the ref.
1944 *
1945 * Return: 0 if successful, else errno
1946 */
1947static int binder_dec_ref_for_handle(struct binder_proc *proc,
1948 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1949{
1950 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1951}
1952
1953
1954/**
1955 * binder_inc_ref_for_node() - increment the ref for given proc/node
1956 * @proc: proc containing the ref
1957 * @node: target node
1958 * @strong: true=strong reference, false=weak reference
1959 * @target_list: worklist to use if node is incremented
1960 * @rdata: the id/refcount data for the ref
1961 *
1962 * Given a proc and node, increment the ref. Create the ref if it
1963 * doesn't already exist
1964 *
1965 * Return: 0 if successful, else errno
1966 */
1967static int binder_inc_ref_for_node(struct binder_proc *proc,
1968 struct binder_node *node,
1969 bool strong,
1970 struct list_head *target_list,
1971 struct binder_ref_data *rdata)
1972{
1973 struct binder_ref *ref;
1974 struct binder_ref *new_ref = NULL;
1975 int ret = 0;
1976
1977 binder_proc_lock(proc);
1978 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1979 if (!ref) {
1980 binder_proc_unlock(proc);
1981 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1982 if (!new_ref)
1983 return -ENOMEM;
1984 binder_proc_lock(proc);
1985 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1986 }
1987 ret = binder_inc_ref_olocked(ref, strong, target_list);
1988 *rdata = ref->data;
1989 binder_proc_unlock(proc);
1990 if (new_ref && ref != new_ref)
1991 /*
1992 * Another thread created the ref first so
1993 * free the one we allocated
1994 */
1995 kfree(new_ref);
1996 return ret;
1997}
1998
1999static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
2000 struct binder_transaction *t)
2001{
2002 BUG_ON(!target_thread);
2003 assert_spin_locked(&target_thread->proc->inner_lock);
2004 BUG_ON(target_thread->transaction_stack != t);
2005 BUG_ON(target_thread->transaction_stack->from != target_thread);
2006 target_thread->transaction_stack =
2007 target_thread->transaction_stack->from_parent;
2008 t->from = NULL;
2009}
2010
2011/**
2012 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2013 * @thread: thread to decrement
2014 *
2015 * A thread needs to be kept alive while being used to create or
2016 * handle a transaction. binder_get_txn_from() is used to safely
2017 * extract t->from from a binder_transaction and keep the thread
2018 * indicated by t->from from being freed. When done with that
2019 * binder_thread, this function is called to decrement the
2020 * tmp_ref and free if appropriate (thread has been released
2021 * and no transaction being processed by the driver)
2022 */
2023static void binder_thread_dec_tmpref(struct binder_thread *thread)
2024{
2025 /*
2026 * atomic is used to protect the counter value while
2027 * it cannot reach zero or thread->is_dead is false
2028 */
2029 binder_inner_proc_lock(thread->proc);
2030 atomic_dec(&thread->tmp_ref);
2031 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
2032 binder_inner_proc_unlock(thread->proc);
2033 binder_free_thread(thread);
2034 return;
2035 }
2036 binder_inner_proc_unlock(thread->proc);
2037}
2038
2039/**
2040 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2041 * @proc: proc to decrement
2042 *
2043 * A binder_proc needs to be kept alive while being used to create or
2044 * handle a transaction. proc->tmp_ref is incremented when
2045 * creating a new transaction or the binder_proc is currently in-use
2046 * by threads that are being released. When done with the binder_proc,
2047 * this function is called to decrement the counter and free the
2048 * proc if appropriate (proc has been released, all threads have
2049 * been released and not currenly in-use to process a transaction).
2050 */
2051static void binder_proc_dec_tmpref(struct binder_proc *proc)
2052{
2053 binder_inner_proc_lock(proc);
2054 proc->tmp_ref--;
2055 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2056 !proc->tmp_ref) {
2057 binder_inner_proc_unlock(proc);
2058 binder_free_proc(proc);
2059 return;
2060 }
2061 binder_inner_proc_unlock(proc);
2062}
2063
2064/**
2065 * binder_get_txn_from() - safely extract the "from" thread in transaction
2066 * @t: binder transaction for t->from
2067 *
2068 * Atomically return the "from" thread and increment the tmp_ref
2069 * count for the thread to ensure it stays alive until
2070 * binder_thread_dec_tmpref() is called.
2071 *
2072 * Return: the value of t->from
2073 */
2074static struct binder_thread *binder_get_txn_from(
2075 struct binder_transaction *t)
2076{
2077 struct binder_thread *from;
2078
2079 spin_lock(&t->lock);
2080 from = t->from;
2081 if (from)
2082 atomic_inc(&from->tmp_ref);
2083 spin_unlock(&t->lock);
2084 return from;
2085}
2086
2087/**
2088 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2089 * @t: binder transaction for t->from
2090 *
2091 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2092 * to guarantee that the thread cannot be released while operating on it.
2093 * The caller must call binder_inner_proc_unlock() to release the inner lock
2094 * as well as call binder_dec_thread_txn() to release the reference.
2095 *
2096 * Return: the value of t->from
2097 */
2098static struct binder_thread *binder_get_txn_from_and_acq_inner(
2099 struct binder_transaction *t)
2100{
2101 struct binder_thread *from;
2102
2103 from = binder_get_txn_from(t);
2104 if (!from)
2105 return NULL;
2106 binder_inner_proc_lock(from->proc);
2107 if (t->from) {
2108 BUG_ON(from != t->from);
2109 return from;
2110 }
2111 binder_inner_proc_unlock(from->proc);
2112 binder_thread_dec_tmpref(from);
2113 return NULL;
2114}
2115
2116static void binder_free_transaction(struct binder_transaction *t)
2117{
2118 struct binder_proc *target_proc = t->to_proc;
2119
2120 if (target_proc) {
2121 binder_inner_proc_lock(target_proc);
2122 if (t->buffer)
2123 t->buffer->transaction = NULL;
2124 binder_inner_proc_unlock(target_proc);
2125 }
2126 /*
2127 * If the transaction has no target_proc, then
2128 * t->buffer->transaction has already been cleared.
2129 */
2130 kfree(t);
2131 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2132}
2133
2134static void binder_send_failed_reply(struct binder_transaction *t,
2135 uint32_t error_code)
2136{
2137 struct binder_thread *target_thread;
2138 struct binder_transaction *next;
2139
2140 BUG_ON(t->flags & TF_ONE_WAY);
2141 while (1) {
2142 target_thread = binder_get_txn_from_and_acq_inner(t);
2143 if (target_thread) {
2144 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2145 "send failed reply for transaction %d to %d:%d\n",
2146 t->debug_id,
2147 target_thread->proc->pid,
2148 target_thread->pid);
2149
2150 binder_pop_transaction_ilocked(target_thread, t);
2151 if (target_thread->reply_error.cmd == BR_OK) {
2152 target_thread->reply_error.cmd = error_code;
2153 binder_enqueue_thread_work_ilocked(
2154 target_thread,
2155 &target_thread->reply_error.work);
2156 wake_up_interruptible(&target_thread->wait);
2157 } else {
2158 /*
2159 * Cannot get here for normal operation, but
2160 * we can if multiple synchronous transactions
2161 * are sent without blocking for responses.
2162 * Just ignore the 2nd error in this case.
2163 */
2164 pr_warn("Unexpected reply error: %u\n",
2165 target_thread->reply_error.cmd);
2166 }
2167 binder_inner_proc_unlock(target_thread->proc);
2168 binder_thread_dec_tmpref(target_thread);
2169 binder_free_transaction(t);
2170 return;
2171 }
2172 next = t->from_parent;
2173
2174 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2175 "send failed reply for transaction %d, target dead\n",
2176 t->debug_id);
2177
2178 binder_free_transaction(t);
2179 if (next == NULL) {
2180 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2181 "reply failed, no target thread at root\n");
2182 return;
2183 }
2184 t = next;
2185 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2186 "reply failed, no target thread -- retry %d\n",
2187 t->debug_id);
2188 }
2189}
2190
2191/**
2192 * binder_cleanup_transaction() - cleans up undelivered transaction
2193 * @t: transaction that needs to be cleaned up
2194 * @reason: reason the transaction wasn't delivered
2195 * @error_code: error to return to caller (if synchronous call)
2196 */
2197static void binder_cleanup_transaction(struct binder_transaction *t,
2198 const char *reason,
2199 uint32_t error_code)
2200{
2201 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2202 binder_send_failed_reply(t, error_code);
2203 } else {
2204 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2205 "undelivered transaction %d, %s\n",
2206 t->debug_id, reason);
2207 binder_free_transaction(t);
2208 }
2209}
2210
2211/**
2212 * binder_validate_object() - checks for a valid metadata object in a buffer.
2213 * @buffer: binder_buffer that we're parsing.
2214 * @offset: offset in the buffer at which to validate an object.
2215 *
2216 * Return: If there's a valid metadata object at @offset in @buffer, the
2217 * size of that object. Otherwise, it returns zero.
2218 */
2219static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2220{
2221 /* Check if we can read a header first */
2222 struct binder_object_header *hdr;
2223 size_t object_size = 0;
2224
2225 if (buffer->data_size < sizeof(*hdr) ||
2226 offset > buffer->data_size - sizeof(*hdr) ||
2227 !IS_ALIGNED(offset, sizeof(u32)))
2228 return 0;
2229
2230 /* Ok, now see if we can read a complete object. */
2231 hdr = (struct binder_object_header *)(buffer->data + offset);
2232 switch (hdr->type) {
2233 case BINDER_TYPE_BINDER:
2234 case BINDER_TYPE_WEAK_BINDER:
2235 case BINDER_TYPE_HANDLE:
2236 case BINDER_TYPE_WEAK_HANDLE:
2237 object_size = sizeof(struct flat_binder_object);
2238 break;
2239 case BINDER_TYPE_FD:
2240 object_size = sizeof(struct binder_fd_object);
2241 break;
2242 case BINDER_TYPE_PTR:
2243 object_size = sizeof(struct binder_buffer_object);
2244 break;
2245 case BINDER_TYPE_FDA:
2246 object_size = sizeof(struct binder_fd_array_object);
2247 break;
2248 default:
2249 return 0;
2250 }
2251 if (offset <= buffer->data_size - object_size &&
2252 buffer->data_size >= object_size)
2253 return object_size;
2254 else
2255 return 0;
2256}
2257
2258/**
2259 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2260 * @b: binder_buffer containing the object
2261 * @index: index in offset array at which the binder_buffer_object is
2262 * located
2263 * @start: points to the start of the offset array
2264 * @num_valid: the number of valid offsets in the offset array
2265 *
2266 * Return: If @index is within the valid range of the offset array
2267 * described by @start and @num_valid, and if there's a valid
2268 * binder_buffer_object at the offset found in index @index
2269 * of the offset array, that object is returned. Otherwise,
2270 * %NULL is returned.
2271 * Note that the offset found in index @index itself is not
2272 * verified; this function assumes that @num_valid elements
2273 * from @start were previously verified to have valid offsets.
2274 */
2275static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2276 binder_size_t index,
2277 binder_size_t *start,
2278 binder_size_t num_valid)
2279{
2280 struct binder_buffer_object *buffer_obj;
2281 binder_size_t *offp;
2282
2283 if (index >= num_valid)
2284 return NULL;
2285
2286 offp = start + index;
2287 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2288 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2289 return NULL;
2290
2291 return buffer_obj;
2292}
2293
2294/**
2295 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2296 * @b: transaction buffer
2297 * @objects_start start of objects buffer
2298 * @buffer: binder_buffer_object in which to fix up
2299 * @offset: start offset in @buffer to fix up
2300 * @last_obj: last binder_buffer_object that we fixed up in
2301 * @last_min_offset: minimum fixup offset in @last_obj
2302 *
2303 * Return: %true if a fixup in buffer @buffer at offset @offset is
2304 * allowed.
2305 *
2306 * For safety reasons, we only allow fixups inside a buffer to happen
2307 * at increasing offsets; additionally, we only allow fixup on the last
2308 * buffer object that was verified, or one of its parents.
2309 *
2310 * Example of what is allowed:
2311 *
2312 * A
2313 * B (parent = A, offset = 0)
2314 * C (parent = A, offset = 16)
2315 * D (parent = C, offset = 0)
2316 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2317 *
2318 * Examples of what is not allowed:
2319 *
2320 * Decreasing offsets within the same parent:
2321 * A
2322 * C (parent = A, offset = 16)
2323 * B (parent = A, offset = 0) // decreasing offset within A
2324 *
2325 * Referring to a parent that wasn't the last object or any of its parents:
2326 * A
2327 * B (parent = A, offset = 0)
2328 * C (parent = A, offset = 0)
2329 * C (parent = A, offset = 16)
2330 * D (parent = B, offset = 0) // B is not A or any of A's parents
2331 */
2332static bool binder_validate_fixup(struct binder_buffer *b,
2333 binder_size_t *objects_start,
2334 struct binder_buffer_object *buffer,
2335 binder_size_t fixup_offset,
2336 struct binder_buffer_object *last_obj,
2337 binder_size_t last_min_offset)
2338{
2339 if (!last_obj) {
2340 /* Nothing to fix up in */
2341 return false;
2342 }
2343
2344 while (last_obj != buffer) {
2345 /*
2346 * Safe to retrieve the parent of last_obj, since it
2347 * was already previously verified by the driver.
2348 */
2349 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2350 return false;
2351 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2352 last_obj = (struct binder_buffer_object *)
2353 (b->data + *(objects_start + last_obj->parent));
2354 }
2355 return (fixup_offset >= last_min_offset);
2356}
2357
2358static void binder_transaction_buffer_release(struct binder_proc *proc,
2359 struct binder_buffer *buffer,
2360 binder_size_t *failed_at)
2361{
2362 binder_size_t *offp, *off_start, *off_end;
2363 int debug_id = buffer->debug_id;
2364
2365 binder_debug(BINDER_DEBUG_TRANSACTION,
2366 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2367 proc->pid, buffer->debug_id,
2368 buffer->data_size, buffer->offsets_size, failed_at);
2369
2370 if (buffer->target_node)
2371 binder_dec_node(buffer->target_node, 1, 0);
2372
2373 off_start = (binder_size_t *)(buffer->data +
2374 ALIGN(buffer->data_size, sizeof(void *)));
2375 if (failed_at)
2376 off_end = failed_at;
2377 else
2378 off_end = (void *)off_start + buffer->offsets_size;
2379 for (offp = off_start; offp < off_end; offp++) {
2380 struct binder_object_header *hdr;
2381 size_t object_size = binder_validate_object(buffer, *offp);
2382
2383 if (object_size == 0) {
2384 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2385 debug_id, (u64)*offp, buffer->data_size);
2386 continue;
2387 }
2388 hdr = (struct binder_object_header *)(buffer->data + *offp);
2389 switch (hdr->type) {
2390 case BINDER_TYPE_BINDER:
2391 case BINDER_TYPE_WEAK_BINDER: {
2392 struct flat_binder_object *fp;
2393 struct binder_node *node;
2394
2395 fp = to_flat_binder_object(hdr);
2396 node = binder_get_node(proc, fp->binder);
2397 if (node == NULL) {
2398 pr_err("transaction release %d bad node %016llx\n",
2399 debug_id, (u64)fp->binder);
2400 break;
2401 }
2402 binder_debug(BINDER_DEBUG_TRANSACTION,
2403 " node %d u%016llx\n",
2404 node->debug_id, (u64)node->ptr);
2405 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2406 0);
2407 binder_put_node(node);
2408 } break;
2409 case BINDER_TYPE_HANDLE:
2410 case BINDER_TYPE_WEAK_HANDLE: {
2411 struct flat_binder_object *fp;
2412 struct binder_ref_data rdata;
2413 int ret;
2414
2415 fp = to_flat_binder_object(hdr);
2416 ret = binder_dec_ref_for_handle(proc, fp->handle,
2417 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2418
2419 if (ret) {
2420 pr_err("transaction release %d bad handle %d, ret = %d\n",
2421 debug_id, fp->handle, ret);
2422 break;
2423 }
2424 binder_debug(BINDER_DEBUG_TRANSACTION,
2425 " ref %d desc %d\n",
2426 rdata.debug_id, rdata.desc);
2427 } break;
2428
2429 case BINDER_TYPE_FD: {
2430 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2431
2432 binder_debug(BINDER_DEBUG_TRANSACTION,
2433 " fd %d\n", fp->fd);
2434 if (failed_at)
2435 task_close_fd(proc, fp->fd);
2436 } break;
2437 case BINDER_TYPE_PTR:
2438 /*
2439 * Nothing to do here, this will get cleaned up when the
2440 * transaction buffer gets freed
2441 */
2442 break;
2443 case BINDER_TYPE_FDA: {
2444 struct binder_fd_array_object *fda;
2445 struct binder_buffer_object *parent;
2446 uintptr_t parent_buffer;
2447 u32 *fd_array;
2448 size_t fd_index;
2449 binder_size_t fd_buf_size;
2450
2451 fda = to_binder_fd_array_object(hdr);
2452 parent = binder_validate_ptr(buffer, fda->parent,
2453 off_start,
2454 offp - off_start);
2455 if (!parent) {
2456 pr_err("transaction release %d bad parent offset",
2457 debug_id);
2458 continue;
2459 }
2460 /*
2461 * Since the parent was already fixed up, convert it
2462 * back to kernel address space to access it
2463 */
2464 parent_buffer = parent->buffer -
2465 binder_alloc_get_user_buffer_offset(
2466 &proc->alloc);
2467
2468 fd_buf_size = sizeof(u32) * fda->num_fds;
2469 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2470 pr_err("transaction release %d invalid number of fds (%lld)\n",
2471 debug_id, (u64)fda->num_fds);
2472 continue;
2473 }
2474 if (fd_buf_size > parent->length ||
2475 fda->parent_offset > parent->length - fd_buf_size) {
2476 /* No space for all file descriptors here. */
2477 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2478 debug_id, (u64)fda->num_fds);
2479 continue;
2480 }
2481 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2482 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2483 task_close_fd(proc, fd_array[fd_index]);
2484 } break;
2485 default:
2486 pr_err("transaction release %d bad object type %x\n",
2487 debug_id, hdr->type);
2488 break;
2489 }
2490 }
2491}
2492
2493static int binder_translate_binder(struct flat_binder_object *fp,
2494 struct binder_transaction *t,
2495 struct binder_thread *thread)
2496{
2497 struct binder_node *node;
2498 struct binder_proc *proc = thread->proc;
2499 struct binder_proc *target_proc = t->to_proc;
2500 struct binder_ref_data rdata;
2501 int ret = 0;
2502
2503 node = binder_get_node(proc, fp->binder);
2504 if (!node) {
2505 node = binder_new_node(proc, fp);
2506 if (!node)
2507 return -ENOMEM;
2508 }
2509 if (fp->cookie != node->cookie) {
2510 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2511 proc->pid, thread->pid, (u64)fp->binder,
2512 node->debug_id, (u64)fp->cookie,
2513 (u64)node->cookie);
2514 ret = -EINVAL;
2515 goto done;
2516 }
2517 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2518 ret = -EPERM;
2519 goto done;
2520 }
2521
2522 ret = binder_inc_ref_for_node(target_proc, node,
2523 fp->hdr.type == BINDER_TYPE_BINDER,
2524 &thread->todo, &rdata);
2525 if (ret)
2526 goto done;
2527
2528 if (fp->hdr.type == BINDER_TYPE_BINDER)
2529 fp->hdr.type = BINDER_TYPE_HANDLE;
2530 else
2531 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2532 fp->binder = 0;
2533 fp->handle = rdata.desc;
2534 fp->cookie = 0;
2535
2536 trace_binder_transaction_node_to_ref(t, node, &rdata);
2537 binder_debug(BINDER_DEBUG_TRANSACTION,
2538 " node %d u%016llx -> ref %d desc %d\n",
2539 node->debug_id, (u64)node->ptr,
2540 rdata.debug_id, rdata.desc);
2541done:
2542 binder_put_node(node);
2543 return ret;
2544}
2545
2546static int binder_translate_handle(struct flat_binder_object *fp,
2547 struct binder_transaction *t,
2548 struct binder_thread *thread)
2549{
2550 struct binder_proc *proc = thread->proc;
2551 struct binder_proc *target_proc = t->to_proc;
2552 struct binder_node *node;
2553 struct binder_ref_data src_rdata;
2554 int ret = 0;
2555
2556 node = binder_get_node_from_ref(proc, fp->handle,
2557 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2558 if (!node) {
2559 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2560 proc->pid, thread->pid, fp->handle);
2561 return -EINVAL;
2562 }
2563 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2564 ret = -EPERM;
2565 goto done;
2566 }
2567
2568 binder_node_lock(node);
2569 if (node->proc == target_proc) {
2570 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2571 fp->hdr.type = BINDER_TYPE_BINDER;
2572 else
2573 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2574 fp->binder = node->ptr;
2575 fp->cookie = node->cookie;
2576 if (node->proc)
2577 binder_inner_proc_lock(node->proc);
2578 binder_inc_node_nilocked(node,
2579 fp->hdr.type == BINDER_TYPE_BINDER,
2580 0, NULL);
2581 if (node->proc)
2582 binder_inner_proc_unlock(node->proc);
2583 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2584 binder_debug(BINDER_DEBUG_TRANSACTION,
2585 " ref %d desc %d -> node %d u%016llx\n",
2586 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2587 (u64)node->ptr);
2588 binder_node_unlock(node);
2589 } else {
2590 struct binder_ref_data dest_rdata;
2591
2592 binder_node_unlock(node);
2593 ret = binder_inc_ref_for_node(target_proc, node,
2594 fp->hdr.type == BINDER_TYPE_HANDLE,
2595 NULL, &dest_rdata);
2596 if (ret)
2597 goto done;
2598
2599 fp->binder = 0;
2600 fp->handle = dest_rdata.desc;
2601 fp->cookie = 0;
2602 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2603 &dest_rdata);
2604 binder_debug(BINDER_DEBUG_TRANSACTION,
2605 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2606 src_rdata.debug_id, src_rdata.desc,
2607 dest_rdata.debug_id, dest_rdata.desc,
2608 node->debug_id);
2609 }
2610done:
2611 binder_put_node(node);
2612 return ret;
2613}
2614
2615static int binder_translate_fd(int fd,
2616 struct binder_transaction *t,
2617 struct binder_thread *thread,
2618 struct binder_transaction *in_reply_to)
2619{
2620 struct binder_proc *proc = thread->proc;
2621 struct binder_proc *target_proc = t->to_proc;
2622 int target_fd;
2623 struct file *file;
2624 int ret;
2625 bool target_allows_fd;
2626
2627 if (in_reply_to)
2628 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2629 else
2630 target_allows_fd = t->buffer->target_node->accept_fds;
2631 if (!target_allows_fd) {
2632 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2633 proc->pid, thread->pid,
2634 in_reply_to ? "reply" : "transaction",
2635 fd);
2636 ret = -EPERM;
2637 goto err_fd_not_accepted;
2638 }
2639
2640 file = fget(fd);
2641 if (!file) {
2642 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2643 proc->pid, thread->pid, fd);
2644 ret = -EBADF;
2645 goto err_fget;
2646 }
2647 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2648 if (ret < 0) {
2649 ret = -EPERM;
2650 goto err_security;
2651 }
2652
2653 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2654 if (target_fd < 0) {
2655 ret = -ENOMEM;
2656 goto err_get_unused_fd;
2657 }
2658 task_fd_install(target_proc, target_fd, file);
2659 trace_binder_transaction_fd(t, fd, target_fd);
2660 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2661 fd, target_fd);
2662
2663 return target_fd;
2664
2665err_get_unused_fd:
2666err_security:
2667 fput(file);
2668err_fget:
2669err_fd_not_accepted:
2670 return ret;
2671}
2672
2673static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2674 struct binder_buffer_object *parent,
2675 struct binder_transaction *t,
2676 struct binder_thread *thread,
2677 struct binder_transaction *in_reply_to)
2678{
2679 binder_size_t fdi, fd_buf_size, num_installed_fds;
2680 int target_fd;
2681 uintptr_t parent_buffer;
2682 u32 *fd_array;
2683 struct binder_proc *proc = thread->proc;
2684 struct binder_proc *target_proc = t->to_proc;
2685
2686 fd_buf_size = sizeof(u32) * fda->num_fds;
2687 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2688 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2689 proc->pid, thread->pid, (u64)fda->num_fds);
2690 return -EINVAL;
2691 }
2692 if (fd_buf_size > parent->length ||
2693 fda->parent_offset > parent->length - fd_buf_size) {
2694 /* No space for all file descriptors here. */
2695 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2696 proc->pid, thread->pid, (u64)fda->num_fds);
2697 return -EINVAL;
2698 }
2699 /*
2700 * Since the parent was already fixed up, convert it
2701 * back to the kernel address space to access it
2702 */
2703 parent_buffer = parent->buffer -
2704 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2705 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2706 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2707 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2708 proc->pid, thread->pid);
2709 return -EINVAL;
2710 }
2711 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2712 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2713 in_reply_to);
2714 if (target_fd < 0)
2715 goto err_translate_fd_failed;
2716 fd_array[fdi] = target_fd;
2717 }
2718 return 0;
2719
2720err_translate_fd_failed:
2721 /*
2722 * Failed to allocate fd or security error, free fds
2723 * installed so far.
2724 */
2725 num_installed_fds = fdi;
2726 for (fdi = 0; fdi < num_installed_fds; fdi++)
2727 task_close_fd(target_proc, fd_array[fdi]);
2728 return target_fd;
2729}
2730
2731static int binder_fixup_parent(struct binder_transaction *t,
2732 struct binder_thread *thread,
2733 struct binder_buffer_object *bp,
2734 binder_size_t *off_start,
2735 binder_size_t num_valid,
2736 struct binder_buffer_object *last_fixup_obj,
2737 binder_size_t last_fixup_min_off)
2738{
2739 struct binder_buffer_object *parent;
2740 u8 *parent_buffer;
2741 struct binder_buffer *b = t->buffer;
2742 struct binder_proc *proc = thread->proc;
2743 struct binder_proc *target_proc = t->to_proc;
2744
2745 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2746 return 0;
2747
2748 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2749 if (!parent) {
2750 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2751 proc->pid, thread->pid);
2752 return -EINVAL;
2753 }
2754
2755 if (!binder_validate_fixup(b, off_start,
2756 parent, bp->parent_offset,
2757 last_fixup_obj,
2758 last_fixup_min_off)) {
2759 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2760 proc->pid, thread->pid);
2761 return -EINVAL;
2762 }
2763
2764 if (parent->length < sizeof(binder_uintptr_t) ||
2765 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2766 /* No space for a pointer here! */
2767 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2768 proc->pid, thread->pid);
2769 return -EINVAL;
2770 }
2771 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2772 binder_alloc_get_user_buffer_offset(
2773 &target_proc->alloc));
2774 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2775
2776 return 0;
2777}
2778
2779/**
2780 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2781 * @t: transaction to send
2782 * @proc: process to send the transaction to
2783 * @thread: thread in @proc to send the transaction to (may be NULL)
2784 *
2785 * This function queues a transaction to the specified process. It will try
2786 * to find a thread in the target process to handle the transaction and
2787 * wake it up. If no thread is found, the work is queued to the proc
2788 * waitqueue.
2789 *
2790 * If the @thread parameter is not NULL, the transaction is always queued
2791 * to the waitlist of that specific thread.
2792 *
2793 * Return: true if the transactions was successfully queued
2794 * false if the target process or thread is dead
2795 */
2796static bool binder_proc_transaction(struct binder_transaction *t,
2797 struct binder_proc *proc,
2798 struct binder_thread *thread)
2799{
2800 struct binder_node *node = t->buffer->target_node;
2801 struct binder_priority node_prio;
2802 bool oneway = !!(t->flags & TF_ONE_WAY);
2803 bool pending_async = false;
2804
2805 BUG_ON(!node);
2806 binder_node_lock(node);
2807 node_prio.prio = node->min_priority;
2808 node_prio.sched_policy = node->sched_policy;
2809
2810 if (oneway) {
2811 BUG_ON(thread);
2812 if (node->has_async_transaction) {
2813 pending_async = true;
2814 } else {
2815 node->has_async_transaction = true;
2816 }
2817 }
2818
2819 binder_inner_proc_lock(proc);
2820
2821 if (proc->is_dead || (thread && thread->is_dead)) {
2822 binder_inner_proc_unlock(proc);
2823 binder_node_unlock(node);
2824 return false;
2825 }
2826
2827 if (!thread && !pending_async)
2828 thread = binder_select_thread_ilocked(proc);
2829
2830 if (thread) {
2831 binder_transaction_priority(thread->task, t, node_prio,
2832 node->inherit_rt);
2833 binder_enqueue_thread_work_ilocked(thread, &t->work);
2834 } else if (!pending_async) {
2835 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2836 } else {
2837 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2838 }
2839
2840 if (!pending_async)
2841 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2842
2843 binder_inner_proc_unlock(proc);
2844 binder_node_unlock(node);
2845
2846 return true;
2847}
2848
2849/**
2850 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2851 * @node: struct binder_node for which to get refs
2852 * @proc: returns @node->proc if valid
2853 * @error: if no @proc then returns BR_DEAD_REPLY
2854 *
2855 * User-space normally keeps the node alive when creating a transaction
2856 * since it has a reference to the target. The local strong ref keeps it
2857 * alive if the sending process dies before the target process processes
2858 * the transaction. If the source process is malicious or has a reference
2859 * counting bug, relying on the local strong ref can fail.
2860 *
2861 * Since user-space can cause the local strong ref to go away, we also take
2862 * a tmpref on the node to ensure it survives while we are constructing
2863 * the transaction. We also need a tmpref on the proc while we are
2864 * constructing the transaction, so we take that here as well.
2865 *
2866 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2867 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2868 * target proc has died, @error is set to BR_DEAD_REPLY
2869 */
2870static struct binder_node *binder_get_node_refs_for_txn(
2871 struct binder_node *node,
2872 struct binder_proc **procp,
2873 uint32_t *error)
2874{
2875 struct binder_node *target_node = NULL;
2876
2877 binder_node_inner_lock(node);
2878 if (node->proc) {
2879 target_node = node;
2880 binder_inc_node_nilocked(node, 1, 0, NULL);
2881 binder_inc_node_tmpref_ilocked(node);
2882 node->proc->tmp_ref++;
2883 *procp = node->proc;
2884 } else
2885 *error = BR_DEAD_REPLY;
2886 binder_node_inner_unlock(node);
2887
2888 return target_node;
2889}
2890
2891static void binder_transaction(struct binder_proc *proc,
2892 struct binder_thread *thread,
2893 struct binder_transaction_data *tr, int reply,
2894 binder_size_t extra_buffers_size)
2895{
2896 int ret;
2897 struct binder_transaction *t;
2898 struct binder_work *tcomplete;
2899 binder_size_t *offp, *off_end, *off_start;
2900 binder_size_t off_min;
2901 u8 *sg_bufp, *sg_buf_end;
2902 struct binder_proc *target_proc = NULL;
2903 struct binder_thread *target_thread = NULL;
2904 struct binder_node *target_node = NULL;
2905 struct binder_transaction *in_reply_to = NULL;
2906 struct binder_transaction_log_entry *e;
2907 uint32_t return_error = 0;
2908 uint32_t return_error_param = 0;
2909 uint32_t return_error_line = 0;
2910 struct binder_buffer_object *last_fixup_obj = NULL;
2911 binder_size_t last_fixup_min_off = 0;
2912 struct binder_context *context = proc->context;
2913 int t_debug_id = atomic_inc_return(&binder_last_id);
2914 char *secctx = NULL;
2915 u32 secctx_sz = 0;
2916
2917 e = binder_transaction_log_add(&binder_transaction_log);
2918 e->debug_id = t_debug_id;
2919 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2920 e->from_proc = proc->pid;
2921 e->from_thread = thread->pid;
2922 e->target_handle = tr->target.handle;
2923 e->data_size = tr->data_size;
2924 e->offsets_size = tr->offsets_size;
2925 e->context_name = proc->context->name;
2926
2927 if (reply) {
2928 binder_inner_proc_lock(proc);
2929 in_reply_to = thread->transaction_stack;
2930 if (in_reply_to == NULL) {
2931 binder_inner_proc_unlock(proc);
2932 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2933 proc->pid, thread->pid);
2934 return_error = BR_FAILED_REPLY;
2935 return_error_param = -EPROTO;
2936 return_error_line = __LINE__;
2937 goto err_empty_call_stack;
2938 }
2939 if (in_reply_to->to_thread != thread) {
2940 spin_lock(&in_reply_to->lock);
2941 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2942 proc->pid, thread->pid, in_reply_to->debug_id,
2943 in_reply_to->to_proc ?
2944 in_reply_to->to_proc->pid : 0,
2945 in_reply_to->to_thread ?
2946 in_reply_to->to_thread->pid : 0);
2947 spin_unlock(&in_reply_to->lock);
2948 binder_inner_proc_unlock(proc);
2949 return_error = BR_FAILED_REPLY;
2950 return_error_param = -EPROTO;
2951 return_error_line = __LINE__;
2952 in_reply_to = NULL;
2953 goto err_bad_call_stack;
2954 }
2955 thread->transaction_stack = in_reply_to->to_parent;
2956 binder_inner_proc_unlock(proc);
2957 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2958 if (target_thread == NULL) {
2959 return_error = BR_DEAD_REPLY;
2960 return_error_line = __LINE__;
2961 goto err_dead_binder;
2962 }
2963 if (target_thread->transaction_stack != in_reply_to) {
2964 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2965 proc->pid, thread->pid,
2966 target_thread->transaction_stack ?
2967 target_thread->transaction_stack->debug_id : 0,
2968 in_reply_to->debug_id);
2969 binder_inner_proc_unlock(target_thread->proc);
2970 return_error = BR_FAILED_REPLY;
2971 return_error_param = -EPROTO;
2972 return_error_line = __LINE__;
2973 in_reply_to = NULL;
2974 target_thread = NULL;
2975 goto err_dead_binder;
2976 }
2977 target_proc = target_thread->proc;
2978 target_proc->tmp_ref++;
2979 binder_inner_proc_unlock(target_thread->proc);
2980 } else {
2981 if (tr->target.handle) {
2982 struct binder_ref *ref;
2983
2984 /*
2985 * There must already be a strong ref
2986 * on this node. If so, do a strong
2987 * increment on the node to ensure it
2988 * stays alive until the transaction is
2989 * done.
2990 */
2991 binder_proc_lock(proc);
2992 ref = binder_get_ref_olocked(proc, tr->target.handle,
2993 true);
2994 if (ref) {
2995 target_node = binder_get_node_refs_for_txn(
2996 ref->node, &target_proc,
2997 &return_error);
2998 } else {
2999 binder_user_error("%d:%d got transaction to invalid handle\n",
3000 proc->pid, thread->pid);
3001 return_error = BR_FAILED_REPLY;
3002 }
3003 binder_proc_unlock(proc);
3004 } else {
3005 mutex_lock(&context->context_mgr_node_lock);
3006 target_node = context->binder_context_mgr_node;
3007 if (target_node)
3008 target_node = binder_get_node_refs_for_txn(
3009 target_node, &target_proc,
3010 &return_error);
3011 else
3012 return_error = BR_DEAD_REPLY;
3013 mutex_unlock(&context->context_mgr_node_lock);
3014 if (target_node && target_proc->pid == proc->pid) {
3015 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3016 proc->pid, thread->pid);
3017 return_error = BR_FAILED_REPLY;
3018 return_error_param = -EINVAL;
3019 return_error_line = __LINE__;
3020 goto err_invalid_target_handle;
3021 }
3022 }
3023 if (!target_node) {
3024 /*
3025 * return_error is set above
3026 */
3027 return_error_param = -EINVAL;
3028 return_error_line = __LINE__;
3029 goto err_dead_binder;
3030 }
3031 e->to_node = target_node->debug_id;
3032 if (WARN_ON(proc == target_proc)) {
3033 return_error = BR_FAILED_REPLY;
3034 return_error_param = -EINVAL;
3035 return_error_line = __LINE__;
3036 goto err_invalid_target_handle;
3037 }
3038 if (security_binder_transaction(proc->tsk,
3039 target_proc->tsk) < 0) {
3040 return_error = BR_FAILED_REPLY;
3041 return_error_param = -EPERM;
3042 return_error_line = __LINE__;
3043 goto err_invalid_target_handle;
3044 }
3045 binder_inner_proc_lock(proc);
3046 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3047 struct binder_transaction *tmp;
3048
3049 tmp = thread->transaction_stack;
3050 if (tmp->to_thread != thread) {
3051 spin_lock(&tmp->lock);
3052 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3053 proc->pid, thread->pid, tmp->debug_id,
3054 tmp->to_proc ? tmp->to_proc->pid : 0,
3055 tmp->to_thread ?
3056 tmp->to_thread->pid : 0);
3057 spin_unlock(&tmp->lock);
3058 binder_inner_proc_unlock(proc);
3059 return_error = BR_FAILED_REPLY;
3060 return_error_param = -EPROTO;
3061 return_error_line = __LINE__;
3062 goto err_bad_call_stack;
3063 }
3064 while (tmp) {
3065 struct binder_thread *from;
3066
3067 spin_lock(&tmp->lock);
3068 from = tmp->from;
3069 if (from && from->proc == target_proc) {
3070 atomic_inc(&from->tmp_ref);
3071 target_thread = from;
3072 spin_unlock(&tmp->lock);
3073 break;
3074 }
3075 spin_unlock(&tmp->lock);
3076 tmp = tmp->from_parent;
3077 }
3078 }
3079 binder_inner_proc_unlock(proc);
3080 }
3081 if (target_thread)
3082 e->to_thread = target_thread->pid;
3083 e->to_proc = target_proc->pid;
3084
3085 /* TODO: reuse incoming transaction for reply */
3086 t = kzalloc(sizeof(*t), GFP_KERNEL);
3087 if (t == NULL) {
3088 return_error = BR_FAILED_REPLY;
3089 return_error_param = -ENOMEM;
3090 return_error_line = __LINE__;
3091 goto err_alloc_t_failed;
3092 }
3093 binder_stats_created(BINDER_STAT_TRANSACTION);
3094 spin_lock_init(&t->lock);
3095
3096 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3097 if (tcomplete == NULL) {
3098 return_error = BR_FAILED_REPLY;
3099 return_error_param = -ENOMEM;
3100 return_error_line = __LINE__;
3101 goto err_alloc_tcomplete_failed;
3102 }
3103 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3104
3105 t->debug_id = t_debug_id;
3106
3107 if (reply)
3108 binder_debug(BINDER_DEBUG_TRANSACTION,
3109 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3110 proc->pid, thread->pid, t->debug_id,
3111 target_proc->pid, target_thread->pid,
3112 (u64)tr->data.ptr.buffer,
3113 (u64)tr->data.ptr.offsets,
3114 (u64)tr->data_size, (u64)tr->offsets_size,
3115 (u64)extra_buffers_size);
3116 else
3117 binder_debug(BINDER_DEBUG_TRANSACTION,
3118 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3119 proc->pid, thread->pid, t->debug_id,
3120 target_proc->pid, target_node->debug_id,
3121 (u64)tr->data.ptr.buffer,
3122 (u64)tr->data.ptr.offsets,
3123 (u64)tr->data_size, (u64)tr->offsets_size,
3124 (u64)extra_buffers_size);
3125
3126 if (!reply && !(tr->flags & TF_ONE_WAY))
3127 t->from = thread;
3128 else
3129 t->from = NULL;
3130 t->sender_euid = task_euid(proc->tsk);
3131 t->to_proc = target_proc;
3132 t->to_thread = target_thread;
3133 t->code = tr->code;
3134 t->flags = tr->flags;
3135 if (!(t->flags & TF_ONE_WAY) &&
3136 binder_supported_policy(current->policy)) {
3137 /* Inherit supported policies for synchronous transactions */
3138 t->priority.sched_policy = current->policy;
3139 t->priority.prio = current->normal_prio;
3140 } else {
3141 /* Otherwise, fall back to the default priority */
3142 t->priority = target_proc->default_priority;
3143 }
3144
3145 if (target_node && target_node->txn_security_ctx) {
3146 u32 secid;
3147 size_t added_size;
3148
3149 security_task_getsecid(proc->tsk, &secid);
3150 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3151 if (ret) {
3152 return_error = BR_FAILED_REPLY;
3153 return_error_param = ret;
3154 return_error_line = __LINE__;
3155 goto err_get_secctx_failed;
3156 }
3157 added_size = ALIGN(secctx_sz, sizeof(u64));
3158 extra_buffers_size += added_size;
3159 if (extra_buffers_size < added_size) {
3160 /* integer overflow of extra_buffers_size */
3161 return_error = BR_FAILED_REPLY;
3162 return_error_param = EINVAL;
3163 return_error_line = __LINE__;
3164 goto err_bad_extra_size;
3165 }
3166 }
3167
3168 trace_binder_transaction(reply, t, target_node);
3169
3170 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3171 tr->offsets_size, extra_buffers_size,
3172 !reply && (t->flags & TF_ONE_WAY));
3173 if (IS_ERR(t->buffer)) {
3174 /*
3175 * -ESRCH indicates VMA cleared. The target is dying.
3176 */
3177 return_error_param = PTR_ERR(t->buffer);
3178 return_error = return_error_param == -ESRCH ?
3179 BR_DEAD_REPLY : BR_FAILED_REPLY;
3180 return_error_line = __LINE__;
3181 t->buffer = NULL;
3182 goto err_binder_alloc_buf_failed;
3183 }
3184 if (secctx) {
3185 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3186 ALIGN(tr->offsets_size, sizeof(void *)) +
3187 ALIGN(extra_buffers_size, sizeof(void *)) -
3188 ALIGN(secctx_sz, sizeof(u64));
3189 char *kptr = t->buffer->data + buf_offset;
3190
3191 t->security_ctx = (uintptr_t)kptr +
3192 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
3193 memcpy(kptr, secctx, secctx_sz);
3194 security_release_secctx(secctx, secctx_sz);
3195 secctx = NULL;
3196 }
3197 t->buffer->debug_id = t->debug_id;
3198 t->buffer->transaction = t;
3199 t->buffer->target_node = target_node;
3200 trace_binder_transaction_alloc_buf(t->buffer);
3201 off_start = (binder_size_t *)(t->buffer->data +
3202 ALIGN(tr->data_size, sizeof(void *)));
3203 offp = off_start;
3204
3205 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3206 tr->data.ptr.buffer, tr->data_size)) {
3207 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3208 proc->pid, thread->pid);
3209 return_error = BR_FAILED_REPLY;
3210 return_error_param = -EFAULT;
3211 return_error_line = __LINE__;
3212 goto err_copy_data_failed;
3213 }
3214 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3215 tr->data.ptr.offsets, tr->offsets_size)) {
3216 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3217 proc->pid, thread->pid);
3218 return_error = BR_FAILED_REPLY;
3219 return_error_param = -EFAULT;
3220 return_error_line = __LINE__;
3221 goto err_copy_data_failed;
3222 }
3223 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3224 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3225 proc->pid, thread->pid, (u64)tr->offsets_size);
3226 return_error = BR_FAILED_REPLY;
3227 return_error_param = -EINVAL;
3228 return_error_line = __LINE__;
3229 goto err_bad_offset;
3230 }
3231 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3232 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3233 proc->pid, thread->pid,
3234 (u64)extra_buffers_size);
3235 return_error = BR_FAILED_REPLY;
3236 return_error_param = -EINVAL;
3237 return_error_line = __LINE__;
3238 goto err_bad_offset;
3239 }
3240 off_end = (void *)off_start + tr->offsets_size;
3241 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3242 sg_buf_end = sg_bufp + extra_buffers_size -
3243 ALIGN(secctx_sz, sizeof(u64));
3244 off_min = 0;
3245 for (; offp < off_end; offp++) {
3246 struct binder_object_header *hdr;
3247 size_t object_size = binder_validate_object(t->buffer, *offp);
3248
3249 if (object_size == 0 || *offp < off_min) {
3250 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3251 proc->pid, thread->pid, (u64)*offp,
3252 (u64)off_min,
3253 (u64)t->buffer->data_size);
3254 return_error = BR_FAILED_REPLY;
3255 return_error_param = -EINVAL;
3256 return_error_line = __LINE__;
3257 goto err_bad_offset;
3258 }
3259
3260 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3261 off_min = *offp + object_size;
3262 switch (hdr->type) {
3263 case BINDER_TYPE_BINDER:
3264 case BINDER_TYPE_WEAK_BINDER: {
3265 struct flat_binder_object *fp;
3266
3267 fp = to_flat_binder_object(hdr);
3268 ret = binder_translate_binder(fp, t, thread);
3269 if (ret < 0) {
3270 return_error = BR_FAILED_REPLY;
3271 return_error_param = ret;
3272 return_error_line = __LINE__;
3273 goto err_translate_failed;
3274 }
3275 } break;
3276 case BINDER_TYPE_HANDLE:
3277 case BINDER_TYPE_WEAK_HANDLE: {
3278 struct flat_binder_object *fp;
3279
3280 fp = to_flat_binder_object(hdr);
3281 ret = binder_translate_handle(fp, t, thread);
3282 if (ret < 0) {
3283 return_error = BR_FAILED_REPLY;
3284 return_error_param = ret;
3285 return_error_line = __LINE__;
3286 goto err_translate_failed;
3287 }
3288 } break;
3289
3290 case BINDER_TYPE_FD: {
3291 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3292 int target_fd = binder_translate_fd(fp->fd, t, thread,
3293 in_reply_to);
3294
3295 if (target_fd < 0) {
3296 return_error = BR_FAILED_REPLY;
3297 return_error_param = target_fd;
3298 return_error_line = __LINE__;
3299 goto err_translate_failed;
3300 }
3301 fp->pad_binder = 0;
3302 fp->fd = target_fd;
3303 } break;
3304 case BINDER_TYPE_FDA: {
3305 struct binder_fd_array_object *fda =
3306 to_binder_fd_array_object(hdr);
3307 struct binder_buffer_object *parent =
3308 binder_validate_ptr(t->buffer, fda->parent,
3309 off_start,
3310 offp - off_start);
3311 if (!parent) {
3312 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3313 proc->pid, thread->pid);
3314 return_error = BR_FAILED_REPLY;
3315 return_error_param = -EINVAL;
3316 return_error_line = __LINE__;
3317 goto err_bad_parent;
3318 }
3319 if (!binder_validate_fixup(t->buffer, off_start,
3320 parent, fda->parent_offset,
3321 last_fixup_obj,
3322 last_fixup_min_off)) {
3323 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3324 proc->pid, thread->pid);
3325 return_error = BR_FAILED_REPLY;
3326 return_error_param = -EINVAL;
3327 return_error_line = __LINE__;
3328 goto err_bad_parent;
3329 }
3330 ret = binder_translate_fd_array(fda, parent, t, thread,
3331 in_reply_to);
3332 if (ret < 0) {
3333 return_error = BR_FAILED_REPLY;
3334 return_error_param = ret;
3335 return_error_line = __LINE__;
3336 goto err_translate_failed;
3337 }
3338 last_fixup_obj = parent;
3339 last_fixup_min_off =
3340 fda->parent_offset + sizeof(u32) * fda->num_fds;
3341 } break;
3342 case BINDER_TYPE_PTR: {
3343 struct binder_buffer_object *bp =
3344 to_binder_buffer_object(hdr);
3345 size_t buf_left = sg_buf_end - sg_bufp;
3346
3347 if (bp->length > buf_left) {
3348 binder_user_error("%d:%d got transaction with too large buffer\n",
3349 proc->pid, thread->pid);
3350 return_error = BR_FAILED_REPLY;
3351 return_error_param = -EINVAL;
3352 return_error_line = __LINE__;
3353 goto err_bad_offset;
3354 }
3355 if (copy_from_user(sg_bufp,
3356 (const void __user *)(uintptr_t)
3357 bp->buffer, bp->length)) {
3358 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3359 proc->pid, thread->pid);
3360 return_error_param = -EFAULT;
3361 return_error = BR_FAILED_REPLY;
3362 return_error_line = __LINE__;
3363 goto err_copy_data_failed;
3364 }
3365 /* Fixup buffer pointer to target proc address space */
3366 bp->buffer = (uintptr_t)sg_bufp +
3367 binder_alloc_get_user_buffer_offset(
3368 &target_proc->alloc);
3369 sg_bufp += ALIGN(bp->length, sizeof(u64));
3370
3371 ret = binder_fixup_parent(t, thread, bp, off_start,
3372 offp - off_start,
3373 last_fixup_obj,
3374 last_fixup_min_off);
3375 if (ret < 0) {
3376 return_error = BR_FAILED_REPLY;
3377 return_error_param = ret;
3378 return_error_line = __LINE__;
3379 goto err_translate_failed;
3380 }
3381 last_fixup_obj = bp;
3382 last_fixup_min_off = 0;
3383 } break;
3384 default:
3385 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3386 proc->pid, thread->pid, hdr->type);
3387 return_error = BR_FAILED_REPLY;
3388 return_error_param = -EINVAL;
3389 return_error_line = __LINE__;
3390 goto err_bad_object_type;
3391 }
3392 }
3393 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3394 t->work.type = BINDER_WORK_TRANSACTION;
3395
3396 if (reply) {
3397 binder_enqueue_thread_work(thread, tcomplete);
3398 binder_inner_proc_lock(target_proc);
3399 if (target_thread->is_dead) {
3400 binder_inner_proc_unlock(target_proc);
3401 goto err_dead_proc_or_thread;
3402 }
3403 BUG_ON(t->buffer->async_transaction != 0);
3404 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3405 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3406 binder_inner_proc_unlock(target_proc);
3407 wake_up_interruptible_sync(&target_thread->wait);
3408 binder_restore_priority(current, in_reply_to->saved_priority);
3409 binder_free_transaction(in_reply_to);
3410 } else if (!(t->flags & TF_ONE_WAY)) {
3411 BUG_ON(t->buffer->async_transaction != 0);
3412 binder_inner_proc_lock(proc);
3413 /*
3414 * Defer the TRANSACTION_COMPLETE, so we don't return to
3415 * userspace immediately; this allows the target process to
3416 * immediately start processing this transaction, reducing
3417 * latency. We will then return the TRANSACTION_COMPLETE when
3418 * the target replies (or there is an error).
3419 */
3420 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3421 t->need_reply = 1;
3422 t->from_parent = thread->transaction_stack;
3423 thread->transaction_stack = t;
3424 binder_inner_proc_unlock(proc);
3425 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3426 binder_inner_proc_lock(proc);
3427 binder_pop_transaction_ilocked(thread, t);
3428 binder_inner_proc_unlock(proc);
3429 goto err_dead_proc_or_thread;
3430 }
3431 } else {
3432 BUG_ON(target_node == NULL);
3433 BUG_ON(t->buffer->async_transaction != 1);
3434 binder_enqueue_thread_work(thread, tcomplete);
3435 if (!binder_proc_transaction(t, target_proc, NULL))
3436 goto err_dead_proc_or_thread;
3437 }
3438 if (target_thread)
3439 binder_thread_dec_tmpref(target_thread);
3440 binder_proc_dec_tmpref(target_proc);
3441 if (target_node)
3442 binder_dec_node_tmpref(target_node);
3443 /*
3444 * write barrier to synchronize with initialization
3445 * of log entry
3446 */
3447 smp_wmb();
3448 WRITE_ONCE(e->debug_id_done, t_debug_id);
3449 return;
3450
3451err_dead_proc_or_thread:
3452 return_error = BR_DEAD_REPLY;
3453 return_error_line = __LINE__;
3454 binder_dequeue_work(proc, tcomplete);
3455err_translate_failed:
3456err_bad_object_type:
3457err_bad_offset:
3458err_bad_parent:
3459err_copy_data_failed:
3460 trace_binder_transaction_failed_buffer_release(t->buffer);
3461 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3462 if (target_node)
3463 binder_dec_node_tmpref(target_node);
3464 target_node = NULL;
3465 t->buffer->transaction = NULL;
3466 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3467err_binder_alloc_buf_failed:
3468err_bad_extra_size:
3469 if (secctx)
3470 security_release_secctx(secctx, secctx_sz);
3471err_get_secctx_failed:
3472 kfree(tcomplete);
3473 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3474err_alloc_tcomplete_failed:
3475 kfree(t);
3476 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3477err_alloc_t_failed:
3478err_bad_call_stack:
3479err_empty_call_stack:
3480err_dead_binder:
3481err_invalid_target_handle:
3482 if (target_thread)
3483 binder_thread_dec_tmpref(target_thread);
3484 if (target_proc)
3485 binder_proc_dec_tmpref(target_proc);
3486 if (target_node) {
3487 binder_dec_node(target_node, 1, 0);
3488 binder_dec_node_tmpref(target_node);
3489 }
3490
3491 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3492 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3493 proc->pid, thread->pid, return_error, return_error_param,
3494 (u64)tr->data_size, (u64)tr->offsets_size,
3495 return_error_line);
3496
3497 {
3498 struct binder_transaction_log_entry *fe;
3499
3500 e->return_error = return_error;
3501 e->return_error_param = return_error_param;
3502 e->return_error_line = return_error_line;
3503 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3504 *fe = *e;
3505 /*
3506 * write barrier to synchronize with initialization
3507 * of log entry
3508 */
3509 smp_wmb();
3510 WRITE_ONCE(e->debug_id_done, t_debug_id);
3511 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3512 }
3513
3514 BUG_ON(thread->return_error.cmd != BR_OK);
3515 if (in_reply_to) {
3516 binder_restore_priority(current, in_reply_to->saved_priority);
3517 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3518 binder_enqueue_thread_work(thread, &thread->return_error.work);
3519 binder_send_failed_reply(in_reply_to, return_error);
3520 } else {
3521 thread->return_error.cmd = return_error;
3522 binder_enqueue_thread_work(thread, &thread->return_error.work);
3523 }
3524}
3525
3526static int binder_thread_write(struct binder_proc *proc,
3527 struct binder_thread *thread,
3528 binder_uintptr_t binder_buffer, size_t size,
3529 binder_size_t *consumed)
3530{
3531 uint32_t cmd;
3532 struct binder_context *context = proc->context;
3533 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3534 void __user *ptr = buffer + *consumed;
3535 void __user *end = buffer + size;
3536
3537 while (ptr < end && thread->return_error.cmd == BR_OK) {
3538 int ret;
3539
3540 if (get_user(cmd, (uint32_t __user *)ptr))
3541 return -EFAULT;
3542 ptr += sizeof(uint32_t);
3543 trace_binder_command(cmd);
3544 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3545 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3546 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3547 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3548 }
3549 switch (cmd) {
3550 case BC_INCREFS:
3551 case BC_ACQUIRE:
3552 case BC_RELEASE:
3553 case BC_DECREFS: {
3554 uint32_t target;
3555 const char *debug_string;
3556 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3557 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3558 struct binder_ref_data rdata;
3559
3560 if (get_user(target, (uint32_t __user *)ptr))
3561 return -EFAULT;
3562
3563 ptr += sizeof(uint32_t);
3564 ret = -1;
3565 if (increment && !target) {
3566 struct binder_node *ctx_mgr_node;
3567 mutex_lock(&context->context_mgr_node_lock);
3568 ctx_mgr_node = context->binder_context_mgr_node;
3569 if (ctx_mgr_node) {
3570 if (ctx_mgr_node->proc == proc) {
3571 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3572 proc->pid, thread->pid);
3573 mutex_unlock(&context->context_mgr_node_lock);
3574 return -EINVAL;
3575 }
3576 ret = binder_inc_ref_for_node(
3577 proc, ctx_mgr_node,
3578 strong, NULL, &rdata);
3579 }
3580 mutex_unlock(&context->context_mgr_node_lock);
3581 }
3582 if (ret)
3583 ret = binder_update_ref_for_handle(
3584 proc, target, increment, strong,
3585 &rdata);
3586 if (!ret && rdata.desc != target) {
3587 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3588 proc->pid, thread->pid,
3589 target, rdata.desc);
3590 }
3591 switch (cmd) {
3592 case BC_INCREFS:
3593 debug_string = "IncRefs";
3594 break;
3595 case BC_ACQUIRE:
3596 debug_string = "Acquire";
3597 break;
3598 case BC_RELEASE:
3599 debug_string = "Release";
3600 break;
3601 case BC_DECREFS:
3602 default:
3603 debug_string = "DecRefs";
3604 break;
3605 }
3606 if (ret) {
3607 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3608 proc->pid, thread->pid, debug_string,
3609 strong, target, ret);
3610 break;
3611 }
3612 binder_debug(BINDER_DEBUG_USER_REFS,
3613 "%d:%d %s ref %d desc %d s %d w %d\n",
3614 proc->pid, thread->pid, debug_string,
3615 rdata.debug_id, rdata.desc, rdata.strong,
3616 rdata.weak);
3617 break;
3618 }
3619 case BC_INCREFS_DONE:
3620 case BC_ACQUIRE_DONE: {
3621 binder_uintptr_t node_ptr;
3622 binder_uintptr_t cookie;
3623 struct binder_node *node;
3624 bool free_node;
3625
3626 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3627 return -EFAULT;
3628 ptr += sizeof(binder_uintptr_t);
3629 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3630 return -EFAULT;
3631 ptr += sizeof(binder_uintptr_t);
3632 node = binder_get_node(proc, node_ptr);
3633 if (node == NULL) {
3634 binder_user_error("%d:%d %s u%016llx no match\n",
3635 proc->pid, thread->pid,
3636 cmd == BC_INCREFS_DONE ?
3637 "BC_INCREFS_DONE" :
3638 "BC_ACQUIRE_DONE",
3639 (u64)node_ptr);
3640 break;
3641 }
3642 if (cookie != node->cookie) {
3643 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3644 proc->pid, thread->pid,
3645 cmd == BC_INCREFS_DONE ?
3646 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3647 (u64)node_ptr, node->debug_id,
3648 (u64)cookie, (u64)node->cookie);
3649 binder_put_node(node);
3650 break;
3651 }
3652 binder_node_inner_lock(node);
3653 if (cmd == BC_ACQUIRE_DONE) {
3654 if (node->pending_strong_ref == 0) {
3655 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3656 proc->pid, thread->pid,
3657 node->debug_id);
3658 binder_node_inner_unlock(node);
3659 binder_put_node(node);
3660 break;
3661 }
3662 node->pending_strong_ref = 0;
3663 } else {
3664 if (node->pending_weak_ref == 0) {
3665 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3666 proc->pid, thread->pid,
3667 node->debug_id);
3668 binder_node_inner_unlock(node);
3669 binder_put_node(node);
3670 break;
3671 }
3672 node->pending_weak_ref = 0;
3673 }
3674 free_node = binder_dec_node_nilocked(node,
3675 cmd == BC_ACQUIRE_DONE, 0);
3676 WARN_ON(free_node);
3677 binder_debug(BINDER_DEBUG_USER_REFS,
3678 "%d:%d %s node %d ls %d lw %d tr %d\n",
3679 proc->pid, thread->pid,
3680 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3681 node->debug_id, node->local_strong_refs,
3682 node->local_weak_refs, node->tmp_refs);
3683 binder_node_inner_unlock(node);
3684 binder_put_node(node);
3685 break;
3686 }
3687 case BC_ATTEMPT_ACQUIRE:
3688 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3689 return -EINVAL;
3690 case BC_ACQUIRE_RESULT:
3691 pr_err("BC_ACQUIRE_RESULT not supported\n");
3692 return -EINVAL;
3693
3694 case BC_FREE_BUFFER: {
3695 binder_uintptr_t data_ptr;
3696 struct binder_buffer *buffer;
3697
3698 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3699 return -EFAULT;
3700 ptr += sizeof(binder_uintptr_t);
3701
3702 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3703 data_ptr);
3704 if (IS_ERR_OR_NULL(buffer)) {
3705 if (PTR_ERR(buffer) == -EPERM) {
3706 binder_user_error(
3707 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3708 proc->pid, thread->pid,
3709 (u64)data_ptr);
3710 } else {
3711 binder_user_error(
3712 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3713 proc->pid, thread->pid,
3714 (u64)data_ptr);
3715 }
3716 break;
3717 }
3718 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3719 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3720 proc->pid, thread->pid, (u64)data_ptr,
3721 buffer->debug_id,
3722 buffer->transaction ? "active" : "finished");
3723
3724 binder_inner_proc_lock(proc);
3725 if (buffer->transaction) {
3726 buffer->transaction->buffer = NULL;
3727 buffer->transaction = NULL;
3728 }
3729 binder_inner_proc_unlock(proc);
3730 if (buffer->async_transaction && buffer->target_node) {
3731 struct binder_node *buf_node;
3732 struct binder_work *w;
3733
3734 buf_node = buffer->target_node;
3735 binder_node_inner_lock(buf_node);
3736 BUG_ON(!buf_node->has_async_transaction);
3737 BUG_ON(buf_node->proc != proc);
3738 w = binder_dequeue_work_head_ilocked(
3739 &buf_node->async_todo);
3740 if (!w) {
3741 buf_node->has_async_transaction = false;
3742 } else {
3743 binder_enqueue_work_ilocked(
3744 w, &proc->todo);
3745 binder_wakeup_proc_ilocked(proc);
3746 }
3747 binder_node_inner_unlock(buf_node);
3748 }
3749 trace_binder_transaction_buffer_release(buffer);
3750 binder_transaction_buffer_release(proc, buffer, NULL);
3751 binder_alloc_free_buf(&proc->alloc, buffer);
3752 break;
3753 }
3754
3755 case BC_TRANSACTION_SG:
3756 case BC_REPLY_SG: {
3757 struct binder_transaction_data_sg tr;
3758
3759 if (copy_from_user(&tr, ptr, sizeof(tr)))
3760 return -EFAULT;
3761 ptr += sizeof(tr);
3762 binder_transaction(proc, thread, &tr.transaction_data,
3763 cmd == BC_REPLY_SG, tr.buffers_size);
3764 break;
3765 }
3766 case BC_TRANSACTION:
3767 case BC_REPLY: {
3768 struct binder_transaction_data tr;
3769
3770 if (copy_from_user(&tr, ptr, sizeof(tr)))
3771 return -EFAULT;
3772 ptr += sizeof(tr);
3773 binder_transaction(proc, thread, &tr,
3774 cmd == BC_REPLY, 0);
3775 break;
3776 }
3777
3778 case BC_REGISTER_LOOPER:
3779 binder_debug(BINDER_DEBUG_THREADS,
3780 "%d:%d BC_REGISTER_LOOPER\n",
3781 proc->pid, thread->pid);
3782 binder_inner_proc_lock(proc);
3783 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3784 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3785 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3786 proc->pid, thread->pid);
3787 } else if (proc->requested_threads == 0) {
3788 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3789 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3790 proc->pid, thread->pid);
3791 } else {
3792 proc->requested_threads--;
3793 proc->requested_threads_started++;
3794 }
3795 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3796 binder_inner_proc_unlock(proc);
3797 break;
3798 case BC_ENTER_LOOPER:
3799 binder_debug(BINDER_DEBUG_THREADS,
3800 "%d:%d BC_ENTER_LOOPER\n",
3801 proc->pid, thread->pid);
3802 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3803 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3804 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3805 proc->pid, thread->pid);
3806 }
3807 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3808 break;
3809 case BC_EXIT_LOOPER:
3810 binder_debug(BINDER_DEBUG_THREADS,
3811 "%d:%d BC_EXIT_LOOPER\n",
3812 proc->pid, thread->pid);
3813 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3814 break;
3815
3816 case BC_REQUEST_DEATH_NOTIFICATION:
3817 case BC_CLEAR_DEATH_NOTIFICATION: {
3818 uint32_t target;
3819 binder_uintptr_t cookie;
3820 struct binder_ref *ref;
3821 struct binder_ref_death *death = NULL;
3822
3823 if (get_user(target, (uint32_t __user *)ptr))
3824 return -EFAULT;
3825 ptr += sizeof(uint32_t);
3826 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3827 return -EFAULT;
3828 ptr += sizeof(binder_uintptr_t);
3829 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3830 /*
3831 * Allocate memory for death notification
3832 * before taking lock
3833 */
3834 death = kzalloc(sizeof(*death), GFP_KERNEL);
3835 if (death == NULL) {
3836 WARN_ON(thread->return_error.cmd !=
3837 BR_OK);
3838 thread->return_error.cmd = BR_ERROR;
3839 binder_enqueue_thread_work(
3840 thread,
3841 &thread->return_error.work);
3842 binder_debug(
3843 BINDER_DEBUG_FAILED_TRANSACTION,
3844 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3845 proc->pid, thread->pid);
3846 break;
3847 }
3848 }
3849 binder_proc_lock(proc);
3850 ref = binder_get_ref_olocked(proc, target, false);
3851 if (ref == NULL) {
3852 binder_user_error("%d:%d %s invalid ref %d\n",
3853 proc->pid, thread->pid,
3854 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3855 "BC_REQUEST_DEATH_NOTIFICATION" :
3856 "BC_CLEAR_DEATH_NOTIFICATION",
3857 target);
3858 binder_proc_unlock(proc);
3859 kfree(death);
3860 break;
3861 }
3862
3863 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3864 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3865 proc->pid, thread->pid,
3866 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3867 "BC_REQUEST_DEATH_NOTIFICATION" :
3868 "BC_CLEAR_DEATH_NOTIFICATION",
3869 (u64)cookie, ref->data.debug_id,
3870 ref->data.desc, ref->data.strong,
3871 ref->data.weak, ref->node->debug_id);
3872
3873 binder_node_lock(ref->node);
3874 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3875 if (ref->death) {
3876 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3877 proc->pid, thread->pid);
3878 binder_node_unlock(ref->node);
3879 binder_proc_unlock(proc);
3880 kfree(death);
3881 break;
3882 }
3883 binder_stats_created(BINDER_STAT_DEATH);
3884 INIT_LIST_HEAD(&death->work.entry);
3885 death->cookie = cookie;
3886 ref->death = death;
3887 if (ref->node->proc == NULL) {
3888 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3889
3890 binder_inner_proc_lock(proc);
3891 binder_enqueue_work_ilocked(
3892 &ref->death->work, &proc->todo);
3893 binder_wakeup_proc_ilocked(proc);
3894 binder_inner_proc_unlock(proc);
3895 }
3896 } else {
3897 if (ref->death == NULL) {
3898 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3899 proc->pid, thread->pid);
3900 binder_node_unlock(ref->node);
3901 binder_proc_unlock(proc);
3902 break;
3903 }
3904 death = ref->death;
3905 if (death->cookie != cookie) {
3906 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3907 proc->pid, thread->pid,
3908 (u64)death->cookie,
3909 (u64)cookie);
3910 binder_node_unlock(ref->node);
3911 binder_proc_unlock(proc);
3912 break;
3913 }
3914 ref->death = NULL;
3915 binder_inner_proc_lock(proc);
3916 if (list_empty(&death->work.entry)) {
3917 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3918 if (thread->looper &
3919 (BINDER_LOOPER_STATE_REGISTERED |
3920 BINDER_LOOPER_STATE_ENTERED))
3921 binder_enqueue_thread_work_ilocked(
3922 thread,
3923 &death->work);
3924 else {
3925 binder_enqueue_work_ilocked(
3926 &death->work,
3927 &proc->todo);
3928 binder_wakeup_proc_ilocked(
3929 proc);
3930 }
3931 } else {
3932 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3933 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3934 }
3935 binder_inner_proc_unlock(proc);
3936 }
3937 binder_node_unlock(ref->node);
3938 binder_proc_unlock(proc);
3939 } break;
3940 case BC_DEAD_BINDER_DONE: {
3941 struct binder_work *w;
3942 binder_uintptr_t cookie;
3943 struct binder_ref_death *death = NULL;
3944
3945 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3946 return -EFAULT;
3947
3948 ptr += sizeof(cookie);
3949 binder_inner_proc_lock(proc);
3950 list_for_each_entry(w, &proc->delivered_death,
3951 entry) {
3952 struct binder_ref_death *tmp_death =
3953 container_of(w,
3954 struct binder_ref_death,
3955 work);
3956
3957 if (tmp_death->cookie == cookie) {
3958 death = tmp_death;
3959 break;
3960 }
3961 }
3962 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3963 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3964 proc->pid, thread->pid, (u64)cookie,
3965 death);
3966 if (death == NULL) {
3967 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3968 proc->pid, thread->pid, (u64)cookie);
3969 binder_inner_proc_unlock(proc);
3970 break;
3971 }
3972 binder_dequeue_work_ilocked(&death->work);
3973 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3974 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3975 if (thread->looper &
3976 (BINDER_LOOPER_STATE_REGISTERED |
3977 BINDER_LOOPER_STATE_ENTERED))
3978 binder_enqueue_thread_work_ilocked(
3979 thread, &death->work);
3980 else {
3981 binder_enqueue_work_ilocked(
3982 &death->work,
3983 &proc->todo);
3984 binder_wakeup_proc_ilocked(proc);
3985 }
3986 }
3987 binder_inner_proc_unlock(proc);
3988 } break;
3989
3990 default:
3991 pr_err("%d:%d unknown command %d\n",
3992 proc->pid, thread->pid, cmd);
3993 return -EINVAL;
3994 }
3995 *consumed = ptr - buffer;
3996 }
3997 return 0;
3998}
3999
4000static void binder_stat_br(struct binder_proc *proc,
4001 struct binder_thread *thread, uint32_t cmd)
4002{
4003 trace_binder_return(cmd);
4004 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4005 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4006 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4007 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4008 }
4009}
4010
4011static int binder_put_node_cmd(struct binder_proc *proc,
4012 struct binder_thread *thread,
4013 void __user **ptrp,
4014 binder_uintptr_t node_ptr,
4015 binder_uintptr_t node_cookie,
4016 int node_debug_id,
4017 uint32_t cmd, const char *cmd_name)
4018{
4019 void __user *ptr = *ptrp;
4020
4021 if (put_user(cmd, (uint32_t __user *)ptr))
4022 return -EFAULT;
4023 ptr += sizeof(uint32_t);
4024
4025 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4026 return -EFAULT;
4027 ptr += sizeof(binder_uintptr_t);
4028
4029 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4030 return -EFAULT;
4031 ptr += sizeof(binder_uintptr_t);
4032
4033 binder_stat_br(proc, thread, cmd);
4034 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4035 proc->pid, thread->pid, cmd_name, node_debug_id,
4036 (u64)node_ptr, (u64)node_cookie);
4037
4038 *ptrp = ptr;
4039 return 0;
4040}
4041
4042static int binder_wait_for_work(struct binder_thread *thread,
4043 bool do_proc_work)
4044{
4045 DEFINE_WAIT(wait);
4046 struct binder_proc *proc = thread->proc;
4047 int ret = 0;
4048
4049 freezer_do_not_count();
4050 binder_inner_proc_lock(proc);
4051 for (;;) {
4052 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4053 if (binder_has_work_ilocked(thread, do_proc_work))
4054 break;
4055 if (do_proc_work)
4056 list_add(&thread->waiting_thread_node,
4057 &proc->waiting_threads);
4058 binder_inner_proc_unlock(proc);
4059 schedule();
4060 binder_inner_proc_lock(proc);
4061 list_del_init(&thread->waiting_thread_node);
4062 if (signal_pending(current)) {
4063 ret = -ERESTARTSYS;
4064 break;
4065 }
4066 }
4067 finish_wait(&thread->wait, &wait);
4068 binder_inner_proc_unlock(proc);
4069 freezer_count();
4070
4071 return ret;
4072}
4073
4074static int binder_thread_read(struct binder_proc *proc,
4075 struct binder_thread *thread,
4076 binder_uintptr_t binder_buffer, size_t size,
4077 binder_size_t *consumed, int non_block)
4078{
4079 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4080 void __user *ptr = buffer + *consumed;
4081 void __user *end = buffer + size;
4082
4083 int ret = 0;
4084 int wait_for_proc_work;
4085
4086 if (*consumed == 0) {
4087 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4088 return -EFAULT;
4089 ptr += sizeof(uint32_t);
4090 }
4091
4092retry:
4093 binder_inner_proc_lock(proc);
4094 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4095 binder_inner_proc_unlock(proc);
4096
4097 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4098
4099 trace_binder_wait_for_work(wait_for_proc_work,
4100 !!thread->transaction_stack,
4101 !binder_worklist_empty(proc, &thread->todo));
4102 if (wait_for_proc_work) {
4103 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4104 BINDER_LOOPER_STATE_ENTERED))) {
4105 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4106 proc->pid, thread->pid, thread->looper);
4107 wait_event_interruptible(binder_user_error_wait,
4108 binder_stop_on_user_error < 2);
4109 }
4110 binder_restore_priority(current, proc->default_priority);
4111 }
4112
4113 if (non_block) {
4114 if (!binder_has_work(thread, wait_for_proc_work))
4115 ret = -EAGAIN;
4116 } else {
4117 ret = binder_wait_for_work(thread, wait_for_proc_work);
4118 }
4119
4120 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4121
4122 if (ret)
4123 return ret;
4124
4125 while (1) {
4126 uint32_t cmd;
4127 struct binder_transaction_data_secctx tr;
4128 struct binder_transaction_data *trd = &tr.transaction_data;
4129 struct binder_work *w = NULL;
4130 struct list_head *list = NULL;
4131 struct binder_transaction *t = NULL;
4132 struct binder_thread *t_from;
4133 size_t trsize = sizeof(*trd);
4134
4135 binder_inner_proc_lock(proc);
4136 if (!binder_worklist_empty_ilocked(&thread->todo))
4137 list = &thread->todo;
4138 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4139 wait_for_proc_work)
4140 list = &proc->todo;
4141 else {
4142 binder_inner_proc_unlock(proc);
4143
4144 /* no data added */
4145 if (ptr - buffer == 4 && !thread->looper_need_return)
4146 goto retry;
4147 break;
4148 }
4149
4150 if (end - ptr < sizeof(tr) + 4) {
4151 binder_inner_proc_unlock(proc);
4152 break;
4153 }
4154 w = binder_dequeue_work_head_ilocked(list);
4155 if (binder_worklist_empty_ilocked(&thread->todo))
4156 thread->process_todo = false;
4157
4158 switch (w->type) {
4159 case BINDER_WORK_TRANSACTION: {
4160 binder_inner_proc_unlock(proc);
4161 t = container_of(w, struct binder_transaction, work);
4162 } break;
4163 case BINDER_WORK_RETURN_ERROR: {
4164 struct binder_error *e = container_of(
4165 w, struct binder_error, work);
4166
4167 WARN_ON(e->cmd == BR_OK);
4168 binder_inner_proc_unlock(proc);
4169 if (put_user(e->cmd, (uint32_t __user *)ptr))
4170 return -EFAULT;
4171 cmd = e->cmd;
4172 e->cmd = BR_OK;
4173 ptr += sizeof(uint32_t);
4174
4175 binder_stat_br(proc, thread, e->cmd);
4176 } break;
4177 case BINDER_WORK_TRANSACTION_COMPLETE: {
4178 binder_inner_proc_unlock(proc);
4179 cmd = BR_TRANSACTION_COMPLETE;
4180 kfree(w);
4181 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4182 if (put_user(cmd, (uint32_t __user *)ptr))
4183 return -EFAULT;
4184 ptr += sizeof(uint32_t);
4185
4186 binder_stat_br(proc, thread, cmd);
4187 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4188 "%d:%d BR_TRANSACTION_COMPLETE\n",
4189 proc->pid, thread->pid);
4190 } break;
4191 case BINDER_WORK_NODE: {
4192 struct binder_node *node = container_of(w, struct binder_node, work);
4193 int strong, weak;
4194 binder_uintptr_t node_ptr = node->ptr;
4195 binder_uintptr_t node_cookie = node->cookie;
4196 int node_debug_id = node->debug_id;
4197 int has_weak_ref;
4198 int has_strong_ref;
4199 void __user *orig_ptr = ptr;
4200
4201 BUG_ON(proc != node->proc);
4202 strong = node->internal_strong_refs ||
4203 node->local_strong_refs;
4204 weak = !hlist_empty(&node->refs) ||
4205 node->local_weak_refs ||
4206 node->tmp_refs || strong;
4207 has_strong_ref = node->has_strong_ref;
4208 has_weak_ref = node->has_weak_ref;
4209
4210 if (weak && !has_weak_ref) {
4211 node->has_weak_ref = 1;
4212 node->pending_weak_ref = 1;
4213 node->local_weak_refs++;
4214 }
4215 if (strong && !has_strong_ref) {
4216 node->has_strong_ref = 1;
4217 node->pending_strong_ref = 1;
4218 node->local_strong_refs++;
4219 }
4220 if (!strong && has_strong_ref)
4221 node->has_strong_ref = 0;
4222 if (!weak && has_weak_ref)
4223 node->has_weak_ref = 0;
4224 if (!weak && !strong) {
4225 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4226 "%d:%d node %d u%016llx c%016llx deleted\n",
4227 proc->pid, thread->pid,
4228 node_debug_id,
4229 (u64)node_ptr,
4230 (u64)node_cookie);
4231 rb_erase(&node->rb_node, &proc->nodes);
4232 binder_inner_proc_unlock(proc);
4233 binder_node_lock(node);
4234 /*
4235 * Acquire the node lock before freeing the
4236 * node to serialize with other threads that
4237 * may have been holding the node lock while
4238 * decrementing this node (avoids race where
4239 * this thread frees while the other thread
4240 * is unlocking the node after the final
4241 * decrement)
4242 */
4243 binder_node_unlock(node);
4244 binder_free_node(node);
4245 } else
4246 binder_inner_proc_unlock(proc);
4247
4248 if (weak && !has_weak_ref)
4249 ret = binder_put_node_cmd(
4250 proc, thread, &ptr, node_ptr,
4251 node_cookie, node_debug_id,
4252 BR_INCREFS, "BR_INCREFS");
4253 if (!ret && strong && !has_strong_ref)
4254 ret = binder_put_node_cmd(
4255 proc, thread, &ptr, node_ptr,
4256 node_cookie, node_debug_id,
4257 BR_ACQUIRE, "BR_ACQUIRE");
4258 if (!ret && !strong && has_strong_ref)
4259 ret = binder_put_node_cmd(
4260 proc, thread, &ptr, node_ptr,
4261 node_cookie, node_debug_id,
4262 BR_RELEASE, "BR_RELEASE");
4263 if (!ret && !weak && has_weak_ref)
4264 ret = binder_put_node_cmd(
4265 proc, thread, &ptr, node_ptr,
4266 node_cookie, node_debug_id,
4267 BR_DECREFS, "BR_DECREFS");
4268 if (orig_ptr == ptr)
4269 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4270 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4271 proc->pid, thread->pid,
4272 node_debug_id,
4273 (u64)node_ptr,
4274 (u64)node_cookie);
4275 if (ret)
4276 return ret;
4277 } break;
4278 case BINDER_WORK_DEAD_BINDER:
4279 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4280 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4281 struct binder_ref_death *death;
4282 uint32_t cmd;
4283 binder_uintptr_t cookie;
4284
4285 death = container_of(w, struct binder_ref_death, work);
4286 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4287 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4288 else
4289 cmd = BR_DEAD_BINDER;
4290 cookie = death->cookie;
4291
4292 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4293 "%d:%d %s %016llx\n",
4294 proc->pid, thread->pid,
4295 cmd == BR_DEAD_BINDER ?
4296 "BR_DEAD_BINDER" :
4297 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4298 (u64)cookie);
4299 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4300 binder_inner_proc_unlock(proc);
4301 kfree(death);
4302 binder_stats_deleted(BINDER_STAT_DEATH);
4303 } else {
4304 binder_enqueue_work_ilocked(
4305 w, &proc->delivered_death);
4306 binder_inner_proc_unlock(proc);
4307 }
4308 if (put_user(cmd, (uint32_t __user *)ptr))
4309 return -EFAULT;
4310 ptr += sizeof(uint32_t);
4311 if (put_user(cookie,
4312 (binder_uintptr_t __user *)ptr))
4313 return -EFAULT;
4314 ptr += sizeof(binder_uintptr_t);
4315 binder_stat_br(proc, thread, cmd);
4316 if (cmd == BR_DEAD_BINDER)
4317 goto done; /* DEAD_BINDER notifications can cause transactions */
4318 } break;
4319 }
4320
4321 if (!t)
4322 continue;
4323
4324 BUG_ON(t->buffer == NULL);
4325 if (t->buffer->target_node) {
4326 struct binder_node *target_node = t->buffer->target_node;
4327 struct binder_priority node_prio;
4328
4329 trd->target.ptr = target_node->ptr;
4330 trd->cookie = target_node->cookie;
4331 node_prio.sched_policy = target_node->sched_policy;
4332 node_prio.prio = target_node->min_priority;
4333 binder_transaction_priority(current, t, node_prio,
4334 target_node->inherit_rt);
4335 cmd = BR_TRANSACTION;
4336 } else {
4337 trd->target.ptr = 0;
4338 trd->cookie = 0;
4339 cmd = BR_REPLY;
4340 }
4341 trd->code = t->code;
4342 trd->flags = t->flags;
4343 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4344
4345 t_from = binder_get_txn_from(t);
4346 if (t_from) {
4347 struct task_struct *sender = t_from->proc->tsk;
4348
4349 trd->sender_pid =
4350 task_tgid_nr_ns(sender,
4351 task_active_pid_ns(current));
4352 } else {
4353 trd->sender_pid = 0;
4354 }
4355
4356 trd->data_size = t->buffer->data_size;
4357 trd->offsets_size = t->buffer->offsets_size;
4358 trd->data.ptr.buffer = (binder_uintptr_t)
4359 ((uintptr_t)t->buffer->data +
4360 binder_alloc_get_user_buffer_offset(&proc->alloc));
4361 trd->data.ptr.offsets = trd->data.ptr.buffer +
4362 ALIGN(t->buffer->data_size,
4363 sizeof(void *));
4364
4365 tr.secctx = t->security_ctx;
4366 if (t->security_ctx) {
4367 cmd = BR_TRANSACTION_SEC_CTX;
4368 trsize = sizeof(tr);
4369 }
4370 if (put_user(cmd, (uint32_t __user *)ptr)) {
4371 if (t_from)
4372 binder_thread_dec_tmpref(t_from);
4373
4374 binder_cleanup_transaction(t, "put_user failed",
4375 BR_FAILED_REPLY);
4376
4377 return -EFAULT;
4378 }
4379 ptr += sizeof(uint32_t);
4380 if (copy_to_user(ptr, &tr, trsize)) {
4381 if (t_from)
4382 binder_thread_dec_tmpref(t_from);
4383
4384 binder_cleanup_transaction(t, "copy_to_user failed",
4385 BR_FAILED_REPLY);
4386
4387 return -EFAULT;
4388 }
4389 ptr += trsize;
4390
4391 trace_binder_transaction_received(t);
4392 binder_stat_br(proc, thread, cmd);
4393 binder_debug(BINDER_DEBUG_TRANSACTION,
4394 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4395 proc->pid, thread->pid,
4396 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4397 (cmd == BR_TRANSACTION_SEC_CTX) ?
4398 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4399 t->debug_id, t_from ? t_from->proc->pid : 0,
4400 t_from ? t_from->pid : 0, cmd,
4401 t->buffer->data_size, t->buffer->offsets_size,
4402 (u64)trd->data.ptr.buffer,
4403 (u64)trd->data.ptr.offsets);
4404
4405 if (t_from)
4406 binder_thread_dec_tmpref(t_from);
4407 t->buffer->allow_user_free = 1;
4408 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4409 binder_inner_proc_lock(thread->proc);
4410 t->to_parent = thread->transaction_stack;
4411 t->to_thread = thread;
4412 thread->transaction_stack = t;
4413 binder_inner_proc_unlock(thread->proc);
4414 } else {
4415 binder_free_transaction(t);
4416 }
4417 break;
4418 }
4419
4420done:
4421
4422 *consumed = ptr - buffer;
4423 binder_inner_proc_lock(proc);
4424 if (proc->requested_threads == 0 &&
4425 list_empty(&thread->proc->waiting_threads) &&
4426 proc->requested_threads_started < proc->max_threads &&
4427 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4428 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4429 /*spawn a new thread if we leave this out */) {
4430 proc->requested_threads++;
4431 binder_inner_proc_unlock(proc);
4432 binder_debug(BINDER_DEBUG_THREADS,
4433 "%d:%d BR_SPAWN_LOOPER\n",
4434 proc->pid, thread->pid);
4435 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4436 return -EFAULT;
4437 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4438 } else
4439 binder_inner_proc_unlock(proc);
4440 return 0;
4441}
4442
4443static void binder_release_work(struct binder_proc *proc,
4444 struct list_head *list)
4445{
4446 struct binder_work *w;
4447
4448 while (1) {
4449 w = binder_dequeue_work_head(proc, list);
4450 if (!w)
4451 return;
4452
4453 switch (w->type) {
4454 case BINDER_WORK_TRANSACTION: {
4455 struct binder_transaction *t;
4456
4457 t = container_of(w, struct binder_transaction, work);
4458
4459 binder_cleanup_transaction(t, "process died.",
4460 BR_DEAD_REPLY);
4461 } break;
4462 case BINDER_WORK_RETURN_ERROR: {
4463 struct binder_error *e = container_of(
4464 w, struct binder_error, work);
4465
4466 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4467 "undelivered TRANSACTION_ERROR: %u\n",
4468 e->cmd);
4469 } break;
4470 case BINDER_WORK_TRANSACTION_COMPLETE: {
4471 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4472 "undelivered TRANSACTION_COMPLETE\n");
4473 kfree(w);
4474 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4475 } break;
4476 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4477 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4478 struct binder_ref_death *death;
4479
4480 death = container_of(w, struct binder_ref_death, work);
4481 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4482 "undelivered death notification, %016llx\n",
4483 (u64)death->cookie);
4484 kfree(death);
4485 binder_stats_deleted(BINDER_STAT_DEATH);
4486 } break;
4487 default:
4488 pr_err("unexpected work type, %d, not freed\n",
4489 w->type);
4490 break;
4491 }
4492 }
4493
4494}
4495
4496static struct binder_thread *binder_get_thread_ilocked(
4497 struct binder_proc *proc, struct binder_thread *new_thread)
4498{
4499 struct binder_thread *thread = NULL;
4500 struct rb_node *parent = NULL;
4501 struct rb_node **p = &proc->threads.rb_node;
4502
4503 while (*p) {
4504 parent = *p;
4505 thread = rb_entry(parent, struct binder_thread, rb_node);
4506
4507 if (current->pid < thread->pid)
4508 p = &(*p)->rb_left;
4509 else if (current->pid > thread->pid)
4510 p = &(*p)->rb_right;
4511 else
4512 return thread;
4513 }
4514 if (!new_thread)
4515 return NULL;
4516 thread = new_thread;
4517 binder_stats_created(BINDER_STAT_THREAD);
4518 thread->proc = proc;
4519 thread->pid = current->pid;
4520 get_task_struct(current);
4521 thread->task = current;
4522 atomic_set(&thread->tmp_ref, 0);
4523 init_waitqueue_head(&thread->wait);
4524 INIT_LIST_HEAD(&thread->todo);
4525 rb_link_node(&thread->rb_node, parent, p);
4526 rb_insert_color(&thread->rb_node, &proc->threads);
4527 thread->looper_need_return = true;
4528 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4529 thread->return_error.cmd = BR_OK;
4530 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4531 thread->reply_error.cmd = BR_OK;
4532 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4533 return thread;
4534}
4535
4536static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4537{
4538 struct binder_thread *thread;
4539 struct binder_thread *new_thread;
4540
4541 binder_inner_proc_lock(proc);
4542 thread = binder_get_thread_ilocked(proc, NULL);
4543 binder_inner_proc_unlock(proc);
4544 if (!thread) {
4545 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4546 if (new_thread == NULL)
4547 return NULL;
4548 binder_inner_proc_lock(proc);
4549 thread = binder_get_thread_ilocked(proc, new_thread);
4550 binder_inner_proc_unlock(proc);
4551 if (thread != new_thread)
4552 kfree(new_thread);
4553 }
4554 return thread;
4555}
4556
4557static void binder_free_proc(struct binder_proc *proc)
4558{
4559 BUG_ON(!list_empty(&proc->todo));
4560 BUG_ON(!list_empty(&proc->delivered_death));
4561 binder_alloc_deferred_release(&proc->alloc);
4562 put_task_struct(proc->tsk);
4563 binder_stats_deleted(BINDER_STAT_PROC);
4564 kfree(proc);
4565}
4566
4567static void binder_free_thread(struct binder_thread *thread)
4568{
4569 BUG_ON(!list_empty(&thread->todo));
4570 binder_stats_deleted(BINDER_STAT_THREAD);
4571 binder_proc_dec_tmpref(thread->proc);
4572 put_task_struct(thread->task);
4573 kfree(thread);
4574}
4575
4576static int binder_thread_release(struct binder_proc *proc,
4577 struct binder_thread *thread)
4578{
4579 struct binder_transaction *t;
4580 struct binder_transaction *send_reply = NULL;
4581 int active_transactions = 0;
4582 struct binder_transaction *last_t = NULL;
4583
4584 binder_inner_proc_lock(thread->proc);
4585 /*
4586 * take a ref on the proc so it survives
4587 * after we remove this thread from proc->threads.
4588 * The corresponding dec is when we actually
4589 * free the thread in binder_free_thread()
4590 */
4591 proc->tmp_ref++;
4592 /*
4593 * take a ref on this thread to ensure it
4594 * survives while we are releasing it
4595 */
4596 atomic_inc(&thread->tmp_ref);
4597 rb_erase(&thread->rb_node, &proc->threads);
4598 t = thread->transaction_stack;
4599 if (t) {
4600 spin_lock(&t->lock);
4601 if (t->to_thread == thread)
4602 send_reply = t;
4603 }
4604 thread->is_dead = true;
4605
4606 while (t) {
4607 last_t = t;
4608 active_transactions++;
4609 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4610 "release %d:%d transaction %d %s, still active\n",
4611 proc->pid, thread->pid,
4612 t->debug_id,
4613 (t->to_thread == thread) ? "in" : "out");
4614
4615 if (t->to_thread == thread) {
4616 t->to_proc = NULL;
4617 t->to_thread = NULL;
4618 if (t->buffer) {
4619 t->buffer->transaction = NULL;
4620 t->buffer = NULL;
4621 }
4622 t = t->to_parent;
4623 } else if (t->from == thread) {
4624 t->from = NULL;
4625 t = t->from_parent;
4626 } else
4627 BUG();
4628 spin_unlock(&last_t->lock);
4629 if (t)
4630 spin_lock(&t->lock);
4631 }
4632
4633 /*
4634 * If this thread used poll, make sure we remove the waitqueue
4635 * from any epoll data structures holding it with POLLFREE.
4636 * waitqueue_active() is safe to use here because we're holding
4637 * the inner lock.
4638 */
4639 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4640 waitqueue_active(&thread->wait)) {
4641 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4642 }
4643
4644 binder_inner_proc_unlock(thread->proc);
4645
4646 /*
4647 * This is needed to avoid races between wake_up_poll() above and
4648 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4649 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4650 * lock, so we can be sure it's done after calling synchronize_rcu().
4651 */
4652 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4653 synchronize_rcu();
4654
4655 if (send_reply)
4656 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4657 binder_release_work(proc, &thread->todo);
4658 binder_thread_dec_tmpref(thread);
4659 return active_transactions;
4660}
4661
4662static unsigned int binder_poll(struct file *filp,
4663 struct poll_table_struct *wait)
4664{
4665 struct binder_proc *proc = filp->private_data;
4666 struct binder_thread *thread = NULL;
4667 bool wait_for_proc_work;
4668
4669 thread = binder_get_thread(proc);
4670 if (!thread)
4671 return POLLERR;
4672
4673 binder_inner_proc_lock(thread->proc);
4674 thread->looper |= BINDER_LOOPER_STATE_POLL;
4675 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4676
4677 binder_inner_proc_unlock(thread->proc);
4678
4679 poll_wait(filp, &thread->wait, wait);
4680
4681 if (binder_has_work(thread, wait_for_proc_work))
4682 return POLLIN;
4683
4684 return 0;
4685}
4686
4687static int binder_ioctl_write_read(struct file *filp,
4688 unsigned int cmd, unsigned long arg,
4689 struct binder_thread *thread)
4690{
4691 int ret = 0;
4692 struct binder_proc *proc = filp->private_data;
4693 unsigned int size = _IOC_SIZE(cmd);
4694 void __user *ubuf = (void __user *)arg;
4695 struct binder_write_read bwr;
4696
4697 if (size != sizeof(struct binder_write_read)) {
4698 ret = -EINVAL;
4699 goto out;
4700 }
4701 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4702 ret = -EFAULT;
4703 goto out;
4704 }
4705 binder_debug(BINDER_DEBUG_READ_WRITE,
4706 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4707 proc->pid, thread->pid,
4708 (u64)bwr.write_size, (u64)bwr.write_buffer,
4709 (u64)bwr.read_size, (u64)bwr.read_buffer);
4710
4711 if (bwr.write_size > 0) {
4712 ret = binder_thread_write(proc, thread,
4713 bwr.write_buffer,
4714 bwr.write_size,
4715 &bwr.write_consumed);
4716 trace_binder_write_done(ret);
4717 if (ret < 0) {
4718 bwr.read_consumed = 0;
4719 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4720 ret = -EFAULT;
4721 goto out;
4722 }
4723 }
4724 if (bwr.read_size > 0) {
4725 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4726 bwr.read_size,
4727 &bwr.read_consumed,
4728 filp->f_flags & O_NONBLOCK);
4729 trace_binder_read_done(ret);
4730 binder_inner_proc_lock(proc);
4731 if (!binder_worklist_empty_ilocked(&proc->todo))
4732 binder_wakeup_proc_ilocked(proc);
4733 binder_inner_proc_unlock(proc);
4734 if (ret < 0) {
4735 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4736 ret = -EFAULT;
4737 goto out;
4738 }
4739 }
4740 binder_debug(BINDER_DEBUG_READ_WRITE,
4741 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4742 proc->pid, thread->pid,
4743 (u64)bwr.write_consumed, (u64)bwr.write_size,
4744 (u64)bwr.read_consumed, (u64)bwr.read_size);
4745 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4746 ret = -EFAULT;
4747 goto out;
4748 }
4749out:
4750 return ret;
4751}
4752
4753static int binder_ioctl_set_ctx_mgr(struct file *filp,
4754 struct flat_binder_object *fbo)
4755{
4756 int ret = 0;
4757 struct binder_proc *proc = filp->private_data;
4758 struct binder_context *context = proc->context;
4759 struct binder_node *new_node;
4760 kuid_t curr_euid = current_euid();
4761
4762 mutex_lock(&context->context_mgr_node_lock);
4763 if (context->binder_context_mgr_node) {
4764 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4765 ret = -EBUSY;
4766 goto out;
4767 }
4768 ret = security_binder_set_context_mgr(proc->tsk);
4769 if (ret < 0)
4770 goto out;
4771 if (uid_valid(context->binder_context_mgr_uid)) {
4772 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4773 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4774 from_kuid(&init_user_ns, curr_euid),
4775 from_kuid(&init_user_ns,
4776 context->binder_context_mgr_uid));
4777 ret = -EPERM;
4778 goto out;
4779 }
4780 } else {
4781 context->binder_context_mgr_uid = curr_euid;
4782 }
4783 new_node = binder_new_node(proc, fbo);
4784 if (!new_node) {
4785 ret = -ENOMEM;
4786 goto out;
4787 }
4788 binder_node_lock(new_node);
4789 new_node->local_weak_refs++;
4790 new_node->local_strong_refs++;
4791 new_node->has_strong_ref = 1;
4792 new_node->has_weak_ref = 1;
4793 context->binder_context_mgr_node = new_node;
4794 binder_node_unlock(new_node);
4795 binder_put_node(new_node);
4796out:
4797 mutex_unlock(&context->context_mgr_node_lock);
4798 return ret;
4799}
4800
4801static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4802 struct binder_node_debug_info *info)
4803{
4804 struct rb_node *n;
4805 binder_uintptr_t ptr = info->ptr;
4806
4807 memset(info, 0, sizeof(*info));
4808
4809 binder_inner_proc_lock(proc);
4810 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4811 struct binder_node *node = rb_entry(n, struct binder_node,
4812 rb_node);
4813 if (node->ptr > ptr) {
4814 info->ptr = node->ptr;
4815 info->cookie = node->cookie;
4816 info->has_strong_ref = node->has_strong_ref;
4817 info->has_weak_ref = node->has_weak_ref;
4818 break;
4819 }
4820 }
4821 binder_inner_proc_unlock(proc);
4822
4823 return 0;
4824}
4825
4826static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4827{
4828 int ret;
4829 struct binder_proc *proc = filp->private_data;
4830 struct binder_thread *thread;
4831 unsigned int size = _IOC_SIZE(cmd);
4832 void __user *ubuf = (void __user *)arg;
4833
4834 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4835 proc->pid, current->pid, cmd, arg);*/
4836
4837 binder_selftest_alloc(&proc->alloc);
4838
4839 trace_binder_ioctl(cmd, arg);
4840
4841 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4842 if (ret)
4843 goto err_unlocked;
4844
4845 thread = binder_get_thread(proc);
4846 if (thread == NULL) {
4847 ret = -ENOMEM;
4848 goto err;
4849 }
4850
4851 switch (cmd) {
4852 case BINDER_WRITE_READ:
4853 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4854 if (ret)
4855 goto err;
4856 break;
4857 case BINDER_SET_MAX_THREADS: {
4858 int max_threads;
4859
4860 if (copy_from_user(&max_threads, ubuf,
4861 sizeof(max_threads))) {
4862 ret = -EINVAL;
4863 goto err;
4864 }
4865 binder_inner_proc_lock(proc);
4866 proc->max_threads = max_threads;
4867 binder_inner_proc_unlock(proc);
4868 break;
4869 }
4870 case BINDER_SET_CONTEXT_MGR_EXT: {
4871 struct flat_binder_object fbo;
4872
4873 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4874 ret = -EINVAL;
4875 goto err;
4876 }
4877 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4878 if (ret)
4879 goto err;
4880 break;
4881 }
4882 case BINDER_SET_CONTEXT_MGR:
4883 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4884 if (ret)
4885 goto err;
4886 break;
4887 case BINDER_THREAD_EXIT:
4888 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4889 proc->pid, thread->pid);
4890 binder_thread_release(proc, thread);
4891 thread = NULL;
4892 break;
4893 case BINDER_VERSION: {
4894 struct binder_version __user *ver = ubuf;
4895
4896 if (size != sizeof(struct binder_version)) {
4897 ret = -EINVAL;
4898 goto err;
4899 }
4900 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4901 &ver->protocol_version)) {
4902 ret = -EINVAL;
4903 goto err;
4904 }
4905 break;
4906 }
4907 case BINDER_GET_NODE_DEBUG_INFO: {
4908 struct binder_node_debug_info info;
4909
4910 if (copy_from_user(&info, ubuf, sizeof(info))) {
4911 ret = -EFAULT;
4912 goto err;
4913 }
4914
4915 ret = binder_ioctl_get_node_debug_info(proc, &info);
4916 if (ret < 0)
4917 goto err;
4918
4919 if (copy_to_user(ubuf, &info, sizeof(info))) {
4920 ret = -EFAULT;
4921 goto err;
4922 }
4923 break;
4924 }
4925 default:
4926 ret = -EINVAL;
4927 goto err;
4928 }
4929 ret = 0;
4930err:
4931 if (thread)
4932 thread->looper_need_return = false;
4933 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4934 if (ret && ret != -ERESTARTSYS)
4935 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4936err_unlocked:
4937 trace_binder_ioctl_done(ret);
4938 return ret;
4939}
4940
4941static void binder_vma_open(struct vm_area_struct *vma)
4942{
4943 struct binder_proc *proc = vma->vm_private_data;
4944
4945 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4946 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4947 proc->pid, vma->vm_start, vma->vm_end,
4948 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4949 (unsigned long)pgprot_val(vma->vm_page_prot));
4950}
4951
4952static void binder_vma_close(struct vm_area_struct *vma)
4953{
4954 struct binder_proc *proc = vma->vm_private_data;
4955
4956 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4957 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4958 proc->pid, vma->vm_start, vma->vm_end,
4959 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4960 (unsigned long)pgprot_val(vma->vm_page_prot));
4961 binder_alloc_vma_close(&proc->alloc);
4962 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4963}
4964
4965static int binder_vm_fault(struct vm_fault *vmf)
4966{
4967 return VM_FAULT_SIGBUS;
4968}
4969
4970static const struct vm_operations_struct binder_vm_ops = {
4971 .open = binder_vma_open,
4972 .close = binder_vma_close,
4973 .fault = binder_vm_fault,
4974};
4975
4976static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4977{
4978 int ret;
4979 struct binder_proc *proc = filp->private_data;
4980 const char *failure_string;
4981
4982 if (proc->tsk != current->group_leader)
4983 return -EINVAL;
4984
4985 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4986 vma->vm_end = vma->vm_start + SZ_4M;
4987
4988 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4989 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4990 __func__, proc->pid, vma->vm_start, vma->vm_end,
4991 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4992 (unsigned long)pgprot_val(vma->vm_page_prot));
4993
4994 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4995 ret = -EPERM;
4996 failure_string = "bad vm_flags";
4997 goto err_bad_arg;
4998 }
4999 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5000 vma->vm_flags &= ~VM_MAYWRITE;
5001
5002 vma->vm_ops = &binder_vm_ops;
5003 vma->vm_private_data = proc;
5004
5005 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5006 if (ret)
5007 return ret;
5008 mutex_lock(&proc->files_lock);
5009 proc->files = get_files_struct(current);
5010 mutex_unlock(&proc->files_lock);
5011 return 0;
5012
5013err_bad_arg:
5014 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5015 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5016 return ret;
5017}
5018
5019static int binder_open(struct inode *nodp, struct file *filp)
5020{
5021 struct binder_proc *proc;
5022 struct binder_device *binder_dev;
5023
5024 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5025 current->group_leader->pid, current->pid);
5026
5027 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5028 if (proc == NULL)
5029 return -ENOMEM;
5030 spin_lock_init(&proc->inner_lock);
5031 spin_lock_init(&proc->outer_lock);
5032 get_task_struct(current->group_leader);
5033 proc->tsk = current->group_leader;
5034 mutex_init(&proc->files_lock);
5035 INIT_LIST_HEAD(&proc->todo);
5036 if (binder_supported_policy(current->policy)) {
5037 proc->default_priority.sched_policy = current->policy;
5038 proc->default_priority.prio = current->normal_prio;
5039 } else {
5040 proc->default_priority.sched_policy = SCHED_NORMAL;
5041 proc->default_priority.prio = NICE_TO_PRIO(0);
5042 }
5043
5044 binder_dev = container_of(filp->private_data, struct binder_device,
5045 miscdev);
5046 proc->context = &binder_dev->context;
5047 binder_alloc_init(&proc->alloc);
5048
5049 binder_stats_created(BINDER_STAT_PROC);
5050 proc->pid = current->group_leader->pid;
5051 INIT_LIST_HEAD(&proc->delivered_death);
5052 INIT_LIST_HEAD(&proc->waiting_threads);
5053 filp->private_data = proc;
5054
5055 mutex_lock(&binder_procs_lock);
5056 hlist_add_head(&proc->proc_node, &binder_procs);
5057 mutex_unlock(&binder_procs_lock);
5058
5059 if (binder_debugfs_dir_entry_proc) {
5060 char strbuf[11];
5061
5062 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5063 /*
5064 * proc debug entries are shared between contexts, so
5065 * this will fail if the process tries to open the driver
5066 * again with a different context. The priting code will
5067 * anyway print all contexts that a given PID has, so this
5068 * is not a problem.
5069 */
5070 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5071 binder_debugfs_dir_entry_proc,
5072 (void *)(unsigned long)proc->pid,
5073 &binder_proc_fops);
5074 }
5075
5076 return 0;
5077}
5078
5079static int binder_flush(struct file *filp, fl_owner_t id)
5080{
5081 struct binder_proc *proc = filp->private_data;
5082
5083 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5084
5085 return 0;
5086}
5087
5088static void binder_deferred_flush(struct binder_proc *proc)
5089{
5090 struct rb_node *n;
5091 int wake_count = 0;
5092
5093 binder_inner_proc_lock(proc);
5094 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5095 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5096
5097 thread->looper_need_return = true;
5098 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5099 wake_up_interruptible(&thread->wait);
5100 wake_count++;
5101 }
5102 }
5103 binder_inner_proc_unlock(proc);
5104
5105 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5106 "binder_flush: %d woke %d threads\n", proc->pid,
5107 wake_count);
5108}
5109
5110static int binder_release(struct inode *nodp, struct file *filp)
5111{
5112 struct binder_proc *proc = filp->private_data;
5113
5114 debugfs_remove(proc->debugfs_entry);
5115 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5116
5117 return 0;
5118}
5119
5120static int binder_node_release(struct binder_node *node, int refs)
5121{
5122 struct binder_ref *ref;
5123 int death = 0;
5124 struct binder_proc *proc = node->proc;
5125
5126 binder_release_work(proc, &node->async_todo);
5127
5128 binder_node_lock(node);
5129 binder_inner_proc_lock(proc);
5130 binder_dequeue_work_ilocked(&node->work);
5131 /*
5132 * The caller must have taken a temporary ref on the node,
5133 */
5134 BUG_ON(!node->tmp_refs);
5135 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5136 binder_inner_proc_unlock(proc);
5137 binder_node_unlock(node);
5138 binder_free_node(node);
5139
5140 return refs;
5141 }
5142
5143 node->proc = NULL;
5144 node->local_strong_refs = 0;
5145 node->local_weak_refs = 0;
5146 binder_inner_proc_unlock(proc);
5147
5148 spin_lock(&binder_dead_nodes_lock);
5149 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5150 spin_unlock(&binder_dead_nodes_lock);
5151
5152 hlist_for_each_entry(ref, &node->refs, node_entry) {
5153 refs++;
5154 /*
5155 * Need the node lock to synchronize
5156 * with new notification requests and the
5157 * inner lock to synchronize with queued
5158 * death notifications.
5159 */
5160 binder_inner_proc_lock(ref->proc);
5161 if (!ref->death) {
5162 binder_inner_proc_unlock(ref->proc);
5163 continue;
5164 }
5165
5166 death++;
5167
5168 BUG_ON(!list_empty(&ref->death->work.entry));
5169 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5170 binder_enqueue_work_ilocked(&ref->death->work,
5171 &ref->proc->todo);
5172 binder_wakeup_proc_ilocked(ref->proc);
5173 binder_inner_proc_unlock(ref->proc);
5174 }
5175
5176 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5177 "node %d now dead, refs %d, death %d\n",
5178 node->debug_id, refs, death);
5179 binder_node_unlock(node);
5180 binder_put_node(node);
5181
5182 return refs;
5183}
5184
5185static void binder_deferred_release(struct binder_proc *proc)
5186{
5187 struct binder_context *context = proc->context;
5188 struct rb_node *n;
5189 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5190
5191 BUG_ON(proc->files);
5192
5193 mutex_lock(&binder_procs_lock);
5194 hlist_del(&proc->proc_node);
5195 mutex_unlock(&binder_procs_lock);
5196
5197 mutex_lock(&context->context_mgr_node_lock);
5198 if (context->binder_context_mgr_node &&
5199 context->binder_context_mgr_node->proc == proc) {
5200 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5201 "%s: %d context_mgr_node gone\n",
5202 __func__, proc->pid);
5203 context->binder_context_mgr_node = NULL;
5204 }
5205 mutex_unlock(&context->context_mgr_node_lock);
5206 binder_inner_proc_lock(proc);
5207 /*
5208 * Make sure proc stays alive after we
5209 * remove all the threads
5210 */
5211 proc->tmp_ref++;
5212
5213 proc->is_dead = true;
5214 threads = 0;
5215 active_transactions = 0;
5216 while ((n = rb_first(&proc->threads))) {
5217 struct binder_thread *thread;
5218
5219 thread = rb_entry(n, struct binder_thread, rb_node);
5220 binder_inner_proc_unlock(proc);
5221 threads++;
5222 active_transactions += binder_thread_release(proc, thread);
5223 binder_inner_proc_lock(proc);
5224 }
5225
5226 nodes = 0;
5227 incoming_refs = 0;
5228 while ((n = rb_first(&proc->nodes))) {
5229 struct binder_node *node;
5230
5231 node = rb_entry(n, struct binder_node, rb_node);
5232 nodes++;
5233 /*
5234 * take a temporary ref on the node before
5235 * calling binder_node_release() which will either
5236 * kfree() the node or call binder_put_node()
5237 */
5238 binder_inc_node_tmpref_ilocked(node);
5239 rb_erase(&node->rb_node, &proc->nodes);
5240 binder_inner_proc_unlock(proc);
5241 incoming_refs = binder_node_release(node, incoming_refs);
5242 binder_inner_proc_lock(proc);
5243 }
5244 binder_inner_proc_unlock(proc);
5245
5246 outgoing_refs = 0;
5247 binder_proc_lock(proc);
5248 while ((n = rb_first(&proc->refs_by_desc))) {
5249 struct binder_ref *ref;
5250
5251 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5252 outgoing_refs++;
5253 binder_cleanup_ref_olocked(ref);
5254 binder_proc_unlock(proc);
5255 binder_free_ref(ref);
5256 binder_proc_lock(proc);
5257 }
5258 binder_proc_unlock(proc);
5259
5260 binder_release_work(proc, &proc->todo);
5261 binder_release_work(proc, &proc->delivered_death);
5262
5263 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5264 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5265 __func__, proc->pid, threads, nodes, incoming_refs,
5266 outgoing_refs, active_transactions);
5267
5268 binder_proc_dec_tmpref(proc);
5269}
5270
5271static void binder_deferred_func(struct work_struct *work)
5272{
5273 struct binder_proc *proc;
5274 struct files_struct *files;
5275
5276 int defer;
5277
5278 do {
5279 mutex_lock(&binder_deferred_lock);
5280 if (!hlist_empty(&binder_deferred_list)) {
5281 proc = hlist_entry(binder_deferred_list.first,
5282 struct binder_proc, deferred_work_node);
5283 hlist_del_init(&proc->deferred_work_node);
5284 defer = proc->deferred_work;
5285 proc->deferred_work = 0;
5286 } else {
5287 proc = NULL;
5288 defer = 0;
5289 }
5290 mutex_unlock(&binder_deferred_lock);
5291
5292 files = NULL;
5293 if (defer & BINDER_DEFERRED_PUT_FILES) {
5294 mutex_lock(&proc->files_lock);
5295 files = proc->files;
5296 if (files)
5297 proc->files = NULL;
5298 mutex_unlock(&proc->files_lock);
5299 }
5300
5301 if (defer & BINDER_DEFERRED_FLUSH)
5302 binder_deferred_flush(proc);
5303
5304 if (defer & BINDER_DEFERRED_RELEASE)
5305 binder_deferred_release(proc); /* frees proc */
5306
5307 if (files)
5308 put_files_struct(files);
5309 } while (proc);
5310}
5311static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5312
5313static void
5314binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5315{
5316 mutex_lock(&binder_deferred_lock);
5317 proc->deferred_work |= defer;
5318 if (hlist_unhashed(&proc->deferred_work_node)) {
5319 hlist_add_head(&proc->deferred_work_node,
5320 &binder_deferred_list);
5321 schedule_work(&binder_deferred_work);
5322 }
5323 mutex_unlock(&binder_deferred_lock);
5324}
5325
5326static void print_binder_transaction_ilocked(struct seq_file *m,
5327 struct binder_proc *proc,
5328 const char *prefix,
5329 struct binder_transaction *t)
5330{
5331 struct binder_proc *to_proc;
5332 struct binder_buffer *buffer = t->buffer;
5333
5334 spin_lock(&t->lock);
5335 to_proc = t->to_proc;
5336 seq_printf(m,
5337 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5338 prefix, t->debug_id, t,
5339 t->from ? t->from->proc->pid : 0,
5340 t->from ? t->from->pid : 0,
5341 to_proc ? to_proc->pid : 0,
5342 t->to_thread ? t->to_thread->pid : 0,
5343 t->code, t->flags, t->priority.sched_policy,
5344 t->priority.prio, t->need_reply);
5345 spin_unlock(&t->lock);
5346
5347 if (proc != to_proc) {
5348 /*
5349 * Can only safely deref buffer if we are holding the
5350 * correct proc inner lock for this node
5351 */
5352 seq_puts(m, "\n");
5353 return;
5354 }
5355
5356 if (buffer == NULL) {
5357 seq_puts(m, " buffer free\n");
5358 return;
5359 }
5360 if (buffer->target_node)
5361 seq_printf(m, " node %d", buffer->target_node->debug_id);
5362 seq_printf(m, " size %zd:%zd data %pK\n",
5363 buffer->data_size, buffer->offsets_size,
5364 buffer->data);
5365}
5366
5367static void print_binder_work_ilocked(struct seq_file *m,
5368 struct binder_proc *proc,
5369 const char *prefix,
5370 const char *transaction_prefix,
5371 struct binder_work *w)
5372{
5373 struct binder_node *node;
5374 struct binder_transaction *t;
5375
5376 switch (w->type) {
5377 case BINDER_WORK_TRANSACTION:
5378 t = container_of(w, struct binder_transaction, work);
5379 print_binder_transaction_ilocked(
5380 m, proc, transaction_prefix, t);
5381 break;
5382 case BINDER_WORK_RETURN_ERROR: {
5383 struct binder_error *e = container_of(
5384 w, struct binder_error, work);
5385
5386 seq_printf(m, "%stransaction error: %u\n",
5387 prefix, e->cmd);
5388 } break;
5389 case BINDER_WORK_TRANSACTION_COMPLETE:
5390 seq_printf(m, "%stransaction complete\n", prefix);
5391 break;
5392 case BINDER_WORK_NODE:
5393 node = container_of(w, struct binder_node, work);
5394 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5395 prefix, node->debug_id,
5396 (u64)node->ptr, (u64)node->cookie);
5397 break;
5398 case BINDER_WORK_DEAD_BINDER:
5399 seq_printf(m, "%shas dead binder\n", prefix);
5400 break;
5401 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5402 seq_printf(m, "%shas cleared dead binder\n", prefix);
5403 break;
5404 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5405 seq_printf(m, "%shas cleared death notification\n", prefix);
5406 break;
5407 default:
5408 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5409 break;
5410 }
5411}
5412
5413static void print_binder_thread_ilocked(struct seq_file *m,
5414 struct binder_thread *thread,
5415 int print_always)
5416{
5417 struct binder_transaction *t;
5418 struct binder_work *w;
5419 size_t start_pos = m->count;
5420 size_t header_pos;
5421
5422 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5423 thread->pid, thread->looper,
5424 thread->looper_need_return,
5425 atomic_read(&thread->tmp_ref));
5426 header_pos = m->count;
5427 t = thread->transaction_stack;
5428 while (t) {
5429 if (t->from == thread) {
5430 print_binder_transaction_ilocked(m, thread->proc,
5431 " outgoing transaction", t);
5432 t = t->from_parent;
5433 } else if (t->to_thread == thread) {
5434 print_binder_transaction_ilocked(m, thread->proc,
5435 " incoming transaction", t);
5436 t = t->to_parent;
5437 } else {
5438 print_binder_transaction_ilocked(m, thread->proc,
5439 " bad transaction", t);
5440 t = NULL;
5441 }
5442 }
5443 list_for_each_entry(w, &thread->todo, entry) {
5444 print_binder_work_ilocked(m, thread->proc, " ",
5445 " pending transaction", w);
5446 }
5447 if (!print_always && m->count == header_pos)
5448 m->count = start_pos;
5449}
5450
5451static void print_binder_node_nilocked(struct seq_file *m,
5452 struct binder_node *node)
5453{
5454 struct binder_ref *ref;
5455 struct binder_work *w;
5456 int count;
5457
5458 count = 0;
5459 hlist_for_each_entry(ref, &node->refs, node_entry)
5460 count++;
5461
5462 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5463 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5464 node->sched_policy, node->min_priority,
5465 node->has_strong_ref, node->has_weak_ref,
5466 node->local_strong_refs, node->local_weak_refs,
5467 node->internal_strong_refs, count, node->tmp_refs);
5468 if (count) {
5469 seq_puts(m, " proc");
5470 hlist_for_each_entry(ref, &node->refs, node_entry)
5471 seq_printf(m, " %d", ref->proc->pid);
5472 }
5473 seq_puts(m, "\n");
5474 if (node->proc) {
5475 list_for_each_entry(w, &node->async_todo, entry)
5476 print_binder_work_ilocked(m, node->proc, " ",
5477 " pending async transaction", w);
5478 }
5479}
5480
5481static void print_binder_ref_olocked(struct seq_file *m,
5482 struct binder_ref *ref)
5483{
5484 binder_node_lock(ref->node);
5485 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5486 ref->data.debug_id, ref->data.desc,
5487 ref->node->proc ? "" : "dead ",
5488 ref->node->debug_id, ref->data.strong,
5489 ref->data.weak, ref->death);
5490 binder_node_unlock(ref->node);
5491}
5492
5493static void print_binder_proc(struct seq_file *m,
5494 struct binder_proc *proc, int print_all)
5495{
5496 struct binder_work *w;
5497 struct rb_node *n;
5498 size_t start_pos = m->count;
5499 size_t header_pos;
5500 struct binder_node *last_node = NULL;
5501
5502 seq_printf(m, "proc %d\n", proc->pid);
5503 seq_printf(m, "context %s\n", proc->context->name);
5504 header_pos = m->count;
5505
5506 binder_inner_proc_lock(proc);
5507 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5508 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5509 rb_node), print_all);
5510
5511 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5512 struct binder_node *node = rb_entry(n, struct binder_node,
5513 rb_node);
5514 /*
5515 * take a temporary reference on the node so it
5516 * survives and isn't removed from the tree
5517 * while we print it.
5518 */
5519 binder_inc_node_tmpref_ilocked(node);
5520 /* Need to drop inner lock to take node lock */
5521 binder_inner_proc_unlock(proc);
5522 if (last_node)
5523 binder_put_node(last_node);
5524 binder_node_inner_lock(node);
5525 print_binder_node_nilocked(m, node);
5526 binder_node_inner_unlock(node);
5527 last_node = node;
5528 binder_inner_proc_lock(proc);
5529 }
5530 binder_inner_proc_unlock(proc);
5531 if (last_node)
5532 binder_put_node(last_node);
5533
5534 if (print_all) {
5535 binder_proc_lock(proc);
5536 for (n = rb_first(&proc->refs_by_desc);
5537 n != NULL;
5538 n = rb_next(n))
5539 print_binder_ref_olocked(m, rb_entry(n,
5540 struct binder_ref,
5541 rb_node_desc));
5542 binder_proc_unlock(proc);
5543 }
5544 binder_alloc_print_allocated(m, &proc->alloc);
5545 binder_inner_proc_lock(proc);
5546 list_for_each_entry(w, &proc->todo, entry)
5547 print_binder_work_ilocked(m, proc, " ",
5548 " pending transaction", w);
5549 list_for_each_entry(w, &proc->delivered_death, entry) {
5550 seq_puts(m, " has delivered dead binder\n");
5551 break;
5552 }
5553 binder_inner_proc_unlock(proc);
5554 if (!print_all && m->count == header_pos)
5555 m->count = start_pos;
5556}
5557
5558static const char * const binder_return_strings[] = {
5559 "BR_ERROR",
5560 "BR_OK",
5561 "BR_TRANSACTION",
5562 "BR_REPLY",
5563 "BR_ACQUIRE_RESULT",
5564 "BR_DEAD_REPLY",
5565 "BR_TRANSACTION_COMPLETE",
5566 "BR_INCREFS",
5567 "BR_ACQUIRE",
5568 "BR_RELEASE",
5569 "BR_DECREFS",
5570 "BR_ATTEMPT_ACQUIRE",
5571 "BR_NOOP",
5572 "BR_SPAWN_LOOPER",
5573 "BR_FINISHED",
5574 "BR_DEAD_BINDER",
5575 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5576 "BR_FAILED_REPLY"
5577};
5578
5579static const char * const binder_command_strings[] = {
5580 "BC_TRANSACTION",
5581 "BC_REPLY",
5582 "BC_ACQUIRE_RESULT",
5583 "BC_FREE_BUFFER",
5584 "BC_INCREFS",
5585 "BC_ACQUIRE",
5586 "BC_RELEASE",
5587 "BC_DECREFS",
5588 "BC_INCREFS_DONE",
5589 "BC_ACQUIRE_DONE",
5590 "BC_ATTEMPT_ACQUIRE",
5591 "BC_REGISTER_LOOPER",
5592 "BC_ENTER_LOOPER",
5593 "BC_EXIT_LOOPER",
5594 "BC_REQUEST_DEATH_NOTIFICATION",
5595 "BC_CLEAR_DEATH_NOTIFICATION",
5596 "BC_DEAD_BINDER_DONE",
5597 "BC_TRANSACTION_SG",
5598 "BC_REPLY_SG",
5599};
5600
5601static const char * const binder_objstat_strings[] = {
5602 "proc",
5603 "thread",
5604 "node",
5605 "ref",
5606 "death",
5607 "transaction",
5608 "transaction_complete"
5609};
5610
5611static void print_binder_stats(struct seq_file *m, const char *prefix,
5612 struct binder_stats *stats)
5613{
5614 int i;
5615
5616 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5617 ARRAY_SIZE(binder_command_strings));
5618 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5619 int temp = atomic_read(&stats->bc[i]);
5620
5621 if (temp)
5622 seq_printf(m, "%s%s: %d\n", prefix,
5623 binder_command_strings[i], temp);
5624 }
5625
5626 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5627 ARRAY_SIZE(binder_return_strings));
5628 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5629 int temp = atomic_read(&stats->br[i]);
5630
5631 if (temp)
5632 seq_printf(m, "%s%s: %d\n", prefix,
5633 binder_return_strings[i], temp);
5634 }
5635
5636 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5637 ARRAY_SIZE(binder_objstat_strings));
5638 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5639 ARRAY_SIZE(stats->obj_deleted));
5640 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5641 int created = atomic_read(&stats->obj_created[i]);
5642 int deleted = atomic_read(&stats->obj_deleted[i]);
5643
5644 if (created || deleted)
5645 seq_printf(m, "%s%s: active %d total %d\n",
5646 prefix,
5647 binder_objstat_strings[i],
5648 created - deleted,
5649 created);
5650 }
5651}
5652
5653static void print_binder_proc_stats(struct seq_file *m,
5654 struct binder_proc *proc)
5655{
5656 struct binder_work *w;
5657 struct binder_thread *thread;
5658 struct rb_node *n;
5659 int count, strong, weak, ready_threads;
5660 size_t free_async_space =
5661 binder_alloc_get_free_async_space(&proc->alloc);
5662
5663 seq_printf(m, "proc %d\n", proc->pid);
5664 seq_printf(m, "context %s\n", proc->context->name);
5665 count = 0;
5666 ready_threads = 0;
5667 binder_inner_proc_lock(proc);
5668 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5669 count++;
5670
5671 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5672 ready_threads++;
5673
5674 seq_printf(m, " threads: %d\n", count);
5675 seq_printf(m, " requested threads: %d+%d/%d\n"
5676 " ready threads %d\n"
5677 " free async space %zd\n", proc->requested_threads,
5678 proc->requested_threads_started, proc->max_threads,
5679 ready_threads,
5680 free_async_space);
5681 count = 0;
5682 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5683 count++;
5684 binder_inner_proc_unlock(proc);
5685 seq_printf(m, " nodes: %d\n", count);
5686 count = 0;
5687 strong = 0;
5688 weak = 0;
5689 binder_proc_lock(proc);
5690 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5691 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5692 rb_node_desc);
5693 count++;
5694 strong += ref->data.strong;
5695 weak += ref->data.weak;
5696 }
5697 binder_proc_unlock(proc);
5698 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5699
5700 count = binder_alloc_get_allocated_count(&proc->alloc);
5701 seq_printf(m, " buffers: %d\n", count);
5702
5703 binder_alloc_print_pages(m, &proc->alloc);
5704
5705 count = 0;
5706 binder_inner_proc_lock(proc);
5707 list_for_each_entry(w, &proc->todo, entry) {
5708 if (w->type == BINDER_WORK_TRANSACTION)
5709 count++;
5710 }
5711 binder_inner_proc_unlock(proc);
5712 seq_printf(m, " pending transactions: %d\n", count);
5713
5714 print_binder_stats(m, " ", &proc->stats);
5715}
5716
5717
5718static int binder_state_show(struct seq_file *m, void *unused)
5719{
5720 struct binder_proc *proc;
5721 struct binder_node *node;
5722 struct binder_node *last_node = NULL;
5723
5724 seq_puts(m, "binder state:\n");
5725
5726 spin_lock(&binder_dead_nodes_lock);
5727 if (!hlist_empty(&binder_dead_nodes))
5728 seq_puts(m, "dead nodes:\n");
5729 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5730 /*
5731 * take a temporary reference on the node so it
5732 * survives and isn't removed from the list
5733 * while we print it.
5734 */
5735 node->tmp_refs++;
5736 spin_unlock(&binder_dead_nodes_lock);
5737 if (last_node)
5738 binder_put_node(last_node);
5739 binder_node_lock(node);
5740 print_binder_node_nilocked(m, node);
5741 binder_node_unlock(node);
5742 last_node = node;
5743 spin_lock(&binder_dead_nodes_lock);
5744 }
5745 spin_unlock(&binder_dead_nodes_lock);
5746 if (last_node)
5747 binder_put_node(last_node);
5748
5749 mutex_lock(&binder_procs_lock);
5750 hlist_for_each_entry(proc, &binder_procs, proc_node)
5751 print_binder_proc(m, proc, 1);
5752 mutex_unlock(&binder_procs_lock);
5753
5754 return 0;
5755}
5756
5757static int binder_stats_show(struct seq_file *m, void *unused)
5758{
5759 struct binder_proc *proc;
5760
5761 seq_puts(m, "binder stats:\n");
5762
5763 print_binder_stats(m, "", &binder_stats);
5764
5765 mutex_lock(&binder_procs_lock);
5766 hlist_for_each_entry(proc, &binder_procs, proc_node)
5767 print_binder_proc_stats(m, proc);
5768 mutex_unlock(&binder_procs_lock);
5769
5770 return 0;
5771}
5772
5773static int binder_transactions_show(struct seq_file *m, void *unused)
5774{
5775 struct binder_proc *proc;
5776
5777 seq_puts(m, "binder transactions:\n");
5778 mutex_lock(&binder_procs_lock);
5779 hlist_for_each_entry(proc, &binder_procs, proc_node)
5780 print_binder_proc(m, proc, 0);
5781 mutex_unlock(&binder_procs_lock);
5782
5783 return 0;
5784}
5785
5786static int binder_proc_show(struct seq_file *m, void *unused)
5787{
5788 struct binder_proc *itr;
5789 int pid = (unsigned long)m->private;
5790
5791 mutex_lock(&binder_procs_lock);
5792 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5793 if (itr->pid == pid) {
5794 seq_puts(m, "binder proc state:\n");
5795 print_binder_proc(m, itr, 1);
5796 }
5797 }
5798 mutex_unlock(&binder_procs_lock);
5799
5800 return 0;
5801}
5802
5803static void print_binder_transaction_log_entry(struct seq_file *m,
5804 struct binder_transaction_log_entry *e)
5805{
5806 int debug_id = READ_ONCE(e->debug_id_done);
5807 /*
5808 * read barrier to guarantee debug_id_done read before
5809 * we print the log values
5810 */
5811 smp_rmb();
5812 seq_printf(m,
5813 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5814 e->debug_id, (e->call_type == 2) ? "reply" :
5815 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5816 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5817 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5818 e->return_error, e->return_error_param,
5819 e->return_error_line);
5820 /*
5821 * read-barrier to guarantee read of debug_id_done after
5822 * done printing the fields of the entry
5823 */
5824 smp_rmb();
5825 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5826 "\n" : " (incomplete)\n");
5827}
5828
5829static int binder_transaction_log_show(struct seq_file *m, void *unused)
5830{
5831 struct binder_transaction_log *log = m->private;
5832 unsigned int log_cur = atomic_read(&log->cur);
5833 unsigned int count;
5834 unsigned int cur;
5835 int i;
5836
5837 count = log_cur + 1;
5838 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5839 0 : count % ARRAY_SIZE(log->entry);
5840 if (count > ARRAY_SIZE(log->entry) || log->full)
5841 count = ARRAY_SIZE(log->entry);
5842 for (i = 0; i < count; i++) {
5843 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5844
5845 print_binder_transaction_log_entry(m, &log->entry[index]);
5846 }
5847 return 0;
5848}
5849
5850static const struct file_operations binder_fops = {
5851 .owner = THIS_MODULE,
5852 .poll = binder_poll,
5853 .unlocked_ioctl = binder_ioctl,
5854 .compat_ioctl = binder_ioctl,
5855 .mmap = binder_mmap,
5856 .open = binder_open,
5857 .flush = binder_flush,
5858 .release = binder_release,
5859};
5860
5861BINDER_DEBUG_ENTRY(state);
5862BINDER_DEBUG_ENTRY(stats);
5863BINDER_DEBUG_ENTRY(transactions);
5864BINDER_DEBUG_ENTRY(transaction_log);
5865
5866static int __init init_binder_device(const char *name)
5867{
5868 int ret;
5869 struct binder_device *binder_device;
5870
5871 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5872 if (!binder_device)
5873 return -ENOMEM;
5874
5875 binder_device->miscdev.fops = &binder_fops;
5876 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5877 binder_device->miscdev.name = name;
5878
5879 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5880 binder_device->context.name = name;
5881 mutex_init(&binder_device->context.context_mgr_node_lock);
5882
5883 ret = misc_register(&binder_device->miscdev);
5884 if (ret < 0) {
5885 kfree(binder_device);
5886 return ret;
5887 }
5888
5889 hlist_add_head(&binder_device->hlist, &binder_devices);
5890
5891 return ret;
5892}
5893
5894static int __init binder_init(void)
5895{
5896 int ret;
5897 char *device_name, *device_names, *device_tmp;
5898 struct binder_device *device;
5899 struct hlist_node *tmp;
5900
5901 ret = binder_alloc_shrinker_init();
5902 if (ret)
5903 return ret;
5904
5905 atomic_set(&binder_transaction_log.cur, ~0U);
5906 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5907
5908 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5909 if (binder_debugfs_dir_entry_root)
5910 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5911 binder_debugfs_dir_entry_root);
5912
5913 if (binder_debugfs_dir_entry_root) {
5914 debugfs_create_file("state",
5915 0444,
5916 binder_debugfs_dir_entry_root,
5917 NULL,
5918 &binder_state_fops);
5919 debugfs_create_file("stats",
5920 0444,
5921 binder_debugfs_dir_entry_root,
5922 NULL,
5923 &binder_stats_fops);
5924 debugfs_create_file("transactions",
5925 0444,
5926 binder_debugfs_dir_entry_root,
5927 NULL,
5928 &binder_transactions_fops);
5929 debugfs_create_file("transaction_log",
5930 0444,
5931 binder_debugfs_dir_entry_root,
5932 &binder_transaction_log,
5933 &binder_transaction_log_fops);
5934 debugfs_create_file("failed_transaction_log",
5935 0444,
5936 binder_debugfs_dir_entry_root,
5937 &binder_transaction_log_failed,
5938 &binder_transaction_log_fops);
5939 }
5940
5941 /*
5942 * Copy the module_parameter string, because we don't want to
5943 * tokenize it in-place.
5944 */
5945 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5946 if (!device_names) {
5947 ret = -ENOMEM;
5948 goto err_alloc_device_names_failed;
5949 }
5950 strcpy(device_names, binder_devices_param);
5951
5952 device_tmp = device_names;
5953 while ((device_name = strsep(&device_tmp, ","))) {
5954 ret = init_binder_device(device_name);
5955 if (ret)
5956 goto err_init_binder_device_failed;
5957 }
5958
5959 return ret;
5960
5961err_init_binder_device_failed:
5962 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5963 misc_deregister(&device->miscdev);
5964 hlist_del(&device->hlist);
5965 kfree(device);
5966 }
5967
5968 kfree(device_names);
5969
5970err_alloc_device_names_failed:
5971 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5972
5973 return ret;
5974}
5975
5976device_initcall(binder_init);
5977
5978#define CREATE_TRACE_POINTS
5979#include "binder_trace.h"
5980
5981MODULE_LICENSE("GPL v2");