3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
74 #include <uapi/linux/android/binder.h>
75 #include "binder_alloc.h"
76 #include "binder_trace.h"
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
91 static struct workqueue_struct *binder_deferred_workqueue;
93 #define BINDER_DEBUG_ENTRY(name) \
94 static int binder_##name##_open(struct inode *inode, struct file *file) \
96 return single_open(file, binder_##name##_show, inode->i_private); \
99 static const struct file_operations binder_##name##_fops = { \
100 .owner = THIS_MODULE, \
101 .open = binder_##name##_open, \
103 .llseek = seq_lseek, \
104 .release = single_release, \
107 static int binder_proc_show(struct seq_file *m, void *unused);
108 BINDER_DEBUG_ENTRY(proc);
110 /* This is only defined in include/asm-arm/sizes.h */
116 #define SZ_4M 0x400000
119 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
121 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
124 BINDER_DEBUG_USER_ERROR = 1U << 0,
125 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
126 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
127 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
128 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
129 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
130 BINDER_DEBUG_READ_WRITE = 1U << 6,
131 BINDER_DEBUG_USER_REFS = 1U << 7,
132 BINDER_DEBUG_THREADS = 1U << 8,
133 BINDER_DEBUG_TRANSACTION = 1U << 9,
134 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
135 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
136 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
137 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
138 BINDER_DEBUG_SPINLOCKS = 1U << 14,
140 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
141 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
142 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
144 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
145 module_param_named(devices, binder_devices_param, charp, S_IRUGO);
147 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
148 static int binder_stop_on_user_error;
150 static int binder_set_stop_on_user_error(const char *val,
151 struct kernel_param *kp)
155 ret = param_set_int(val, kp);
156 if (binder_stop_on_user_error < 2)
157 wake_up(&binder_user_error_wait);
160 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
161 param_get_int, &binder_stop_on_user_error, 0644);
163 #define binder_debug(mask, x...) \
165 if (binder_debug_mask & mask) \
169 #define binder_user_error(x...) \
171 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
173 if (binder_stop_on_user_error) \
174 binder_stop_on_user_error = 2; \
177 #define to_flat_binder_object(hdr) \
178 container_of(hdr, struct flat_binder_object, hdr)
180 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182 #define to_binder_buffer_object(hdr) \
183 container_of(hdr, struct binder_buffer_object, hdr)
185 #define to_binder_fd_array_object(hdr) \
186 container_of(hdr, struct binder_fd_array_object, hdr)
188 enum binder_stat_types {
194 BINDER_STAT_TRANSACTION,
195 BINDER_STAT_TRANSACTION_COMPLETE,
199 struct binder_stats {
200 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
201 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
202 atomic_t obj_created[BINDER_STAT_COUNT];
203 atomic_t obj_deleted[BINDER_STAT_COUNT];
206 static struct binder_stats binder_stats;
208 static inline void binder_stats_deleted(enum binder_stat_types type)
210 atomic_inc(&binder_stats.obj_deleted[type]);
213 static inline void binder_stats_created(enum binder_stat_types type)
215 atomic_inc(&binder_stats.obj_created[type]);
218 struct binder_transaction_log_entry {
230 int return_error_line;
231 uint32_t return_error;
232 uint32_t return_error_param;
233 const char *context_name;
235 struct binder_transaction_log {
238 struct binder_transaction_log_entry entry[32];
240 static struct binder_transaction_log binder_transaction_log;
241 static struct binder_transaction_log binder_transaction_log_failed;
243 static struct binder_transaction_log_entry *binder_transaction_log_add(
244 struct binder_transaction_log *log)
246 struct binder_transaction_log_entry *e;
247 unsigned int cur = atomic_inc_return(&log->cur);
249 if (cur >= ARRAY_SIZE(log->entry))
251 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
252 WRITE_ONCE(e->debug_id_done, 0);
254 * write-barrier to synchronize access to e->debug_id_done.
255 * We make sure the initialized 0 value is seen before
256 * memset() other fields are zeroed by memset.
259 memset(e, 0, sizeof(*e));
263 struct binder_context {
264 struct binder_node *binder_context_mgr_node;
265 struct mutex context_mgr_node_lock;
267 kuid_t binder_context_mgr_uid;
271 struct binder_device {
272 struct hlist_node hlist;
273 struct miscdevice miscdev;
274 struct binder_context context;
278 * struct binder_work - work enqueued on a worklist
279 * @entry: node enqueued on list
280 * @type: type of work to be performed
282 * There are separate work lists for proc, thread, and node (async).
285 struct list_head entry;
288 BINDER_WORK_TRANSACTION = 1,
289 BINDER_WORK_TRANSACTION_COMPLETE,
290 BINDER_WORK_RETURN_ERROR,
292 BINDER_WORK_DEAD_BINDER,
293 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
294 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
298 struct binder_error {
299 struct binder_work work;
304 * struct binder_node - binder node bookkeeping
305 * @debug_id: unique ID for debugging
306 * (invariant after initialized)
307 * @lock: lock for node fields
308 * @work: worklist element for node work
309 * (protected by @proc->inner_lock)
310 * @rb_node: element for proc->nodes tree
311 * (protected by @proc->inner_lock)
312 * @dead_node: element for binder_dead_nodes list
313 * (protected by binder_dead_nodes_lock)
314 * @proc: binder_proc that owns this node
315 * (invariant after initialized)
316 * @refs: list of references on this node
317 * (protected by @lock)
318 * @internal_strong_refs: used to take strong references when
319 * initiating a transaction
320 * (protected by @proc->inner_lock if @proc
322 * @local_weak_refs: weak user refs from local process
323 * (protected by @proc->inner_lock if @proc
325 * @local_strong_refs: strong user refs from local process
326 * (protected by @proc->inner_lock if @proc
328 * @tmp_refs: temporary kernel refs
329 * (protected by @proc->inner_lock while @proc
330 * is valid, and by binder_dead_nodes_lock
331 * if @proc is NULL. During inc/dec and node release
332 * it is also protected by @lock to provide safety
333 * as the node dies and @proc becomes NULL)
334 * @ptr: userspace pointer for node
335 * (invariant, no lock needed)
336 * @cookie: userspace cookie for node
337 * (invariant, no lock needed)
338 * @has_strong_ref: userspace notified of strong ref
339 * (protected by @proc->inner_lock if @proc
341 * @pending_strong_ref: userspace has acked notification of strong ref
342 * (protected by @proc->inner_lock if @proc
344 * @has_weak_ref: userspace notified of weak ref
345 * (protected by @proc->inner_lock if @proc
347 * @pending_weak_ref: userspace has acked notification of weak ref
348 * (protected by @proc->inner_lock if @proc
350 * @has_async_transaction: async transaction to node in progress
351 * (protected by @lock)
352 * @sched_policy: minimum scheduling policy for node
353 * (invariant after initialized)
354 * @accept_fds: file descriptor operations supported for node
355 * (invariant after initialized)
356 * @min_priority: minimum scheduling priority
357 * (invariant after initialized)
358 * @inherit_rt: inherit RT scheduling policy from caller
359 * @txn_security_ctx: require sender's security context
360 * (invariant after initialized)
361 * @async_todo: list of async work items
362 * (protected by @proc->inner_lock)
364 * Bookkeeping structure for binder nodes.
369 struct binder_work work;
371 struct rb_node rb_node;
372 struct hlist_node dead_node;
374 struct binder_proc *proc;
375 struct hlist_head refs;
376 int internal_strong_refs;
378 int local_strong_refs;
380 binder_uintptr_t ptr;
381 binder_uintptr_t cookie;
384 * bitfield elements protected by
388 u8 pending_strong_ref:1;
390 u8 pending_weak_ref:1;
394 * invariant after initialization
399 u8 txn_security_ctx:1;
402 bool has_async_transaction;
403 struct list_head async_todo;
406 struct binder_ref_death {
408 * @work: worklist element for death notifications
409 * (protected by inner_lock of the proc that
410 * this ref belongs to)
412 struct binder_work work;
413 binder_uintptr_t cookie;
417 * struct binder_ref_data - binder_ref counts and id
418 * @debug_id: unique ID for the ref
419 * @desc: unique userspace handle for ref
420 * @strong: strong ref count (debugging only if not locked)
421 * @weak: weak ref count (debugging only if not locked)
423 * Structure to hold ref count and ref id information. Since
424 * the actual ref can only be accessed with a lock, this structure
425 * is used to return information about the ref to callers of
426 * ref inc/dec functions.
428 struct binder_ref_data {
436 * struct binder_ref - struct to track references on nodes
437 * @data: binder_ref_data containing id, handle, and current refcounts
438 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
439 * @rb_node_node: node for lookup by @node in proc's rb_tree
440 * @node_entry: list entry for node->refs list in target node
441 * (protected by @node->lock)
442 * @proc: binder_proc containing ref
443 * @node: binder_node of target node. When cleaning up a
444 * ref for deletion in binder_cleanup_ref, a non-NULL
445 * @node indicates the node must be freed
446 * @death: pointer to death notification (ref_death) if requested
447 * (protected by @node->lock)
449 * Structure to track references from procA to target node (on procB). This
450 * structure is unsafe to access without holding @proc->outer_lock.
453 /* Lookups needed: */
454 /* node + proc => ref (transaction) */
455 /* desc + proc => ref (transaction, inc/dec ref) */
456 /* node => refs + procs (proc exit) */
457 struct binder_ref_data data;
458 struct rb_node rb_node_desc;
459 struct rb_node rb_node_node;
460 struct hlist_node node_entry;
461 struct binder_proc *proc;
462 struct binder_node *node;
463 struct binder_ref_death *death;
466 enum binder_deferred_state {
467 BINDER_DEFERRED_PUT_FILES = 0x01,
468 BINDER_DEFERRED_FLUSH = 0x02,
469 BINDER_DEFERRED_RELEASE = 0x04,
473 * struct binder_priority - scheduler policy and priority
474 * @sched_policy scheduler policy
475 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
477 * The binder driver supports inheriting the following scheduler policies:
483 struct binder_priority {
484 unsigned int sched_policy;
489 * struct binder_proc - binder process bookkeeping
490 * @proc_node: element for binder_procs list
491 * @threads: rbtree of binder_threads in this proc
492 * (protected by @inner_lock)
493 * @nodes: rbtree of binder nodes associated with
494 * this proc ordered by node->ptr
495 * (protected by @inner_lock)
496 * @refs_by_desc: rbtree of refs ordered by ref->desc
497 * (protected by @outer_lock)
498 * @refs_by_node: rbtree of refs ordered by ref->node
499 * (protected by @outer_lock)
500 * @waiting_threads: threads currently waiting for proc work
501 * (protected by @inner_lock)
502 * @pid PID of group_leader of process
503 * (invariant after initialized)
504 * @tsk task_struct for group_leader of process
505 * (invariant after initialized)
506 * @files files_struct for process
507 * (protected by @files_lock)
508 * @files_lock mutex to protect @files
509 * @deferred_work_node: element for binder_deferred_list
510 * (protected by binder_deferred_lock)
511 * @deferred_work: bitmap of deferred work to perform
512 * (protected by binder_deferred_lock)
513 * @is_dead: process is dead and awaiting free
514 * when outstanding transactions are cleaned up
515 * (protected by @inner_lock)
516 * @todo: list of work for this process
517 * (protected by @inner_lock)
518 * @stats: per-process binder statistics
519 * (atomics, no lock needed)
520 * @delivered_death: list of delivered death notification
521 * (protected by @inner_lock)
522 * @max_threads: cap on number of binder threads
523 * (protected by @inner_lock)
524 * @requested_threads: number of binder threads requested but not
525 * yet started. In current implementation, can
527 * (protected by @inner_lock)
528 * @requested_threads_started: number binder threads started
529 * (protected by @inner_lock)
530 * @tmp_ref: temporary reference to indicate proc is in use
531 * (atomic since @proc->inner_lock cannot
532 * always be acquired)
533 * @default_priority: default scheduler priority
534 * (invariant after initialized)
535 * @debugfs_entry: debugfs node
536 * @alloc: binder allocator bookkeeping
537 * @context: binder_context for this proc
538 * (invariant after initialized)
539 * @inner_lock: can nest under outer_lock and/or node lock
540 * @outer_lock: no nesting under innor or node lock
541 * Lock order: 1) outer, 2) node, 3) inner
543 * Bookkeeping structure for binder processes
546 struct hlist_node proc_node;
547 struct rb_root threads;
548 struct rb_root nodes;
549 struct rb_root refs_by_desc;
550 struct rb_root refs_by_node;
551 struct list_head waiting_threads;
553 struct task_struct *tsk;
554 struct files_struct *files;
555 struct mutex files_lock;
556 struct hlist_node deferred_work_node;
560 struct list_head todo;
561 struct binder_stats stats;
562 struct list_head delivered_death;
564 int requested_threads;
565 int requested_threads_started;
567 struct binder_priority default_priority;
568 struct dentry *debugfs_entry;
569 struct binder_alloc alloc;
570 struct binder_context *context;
571 spinlock_t inner_lock;
572 spinlock_t outer_lock;
576 BINDER_LOOPER_STATE_REGISTERED = 0x01,
577 BINDER_LOOPER_STATE_ENTERED = 0x02,
578 BINDER_LOOPER_STATE_EXITED = 0x04,
579 BINDER_LOOPER_STATE_INVALID = 0x08,
580 BINDER_LOOPER_STATE_WAITING = 0x10,
581 BINDER_LOOPER_STATE_POLL = 0x20,
585 * struct binder_thread - binder thread bookkeeping
586 * @proc: binder process for this thread
587 * (invariant after initialization)
588 * @rb_node: element for proc->threads rbtree
589 * (protected by @proc->inner_lock)
590 * @waiting_thread_node: element for @proc->waiting_threads list
591 * (protected by @proc->inner_lock)
592 * @pid: PID for this thread
593 * (invariant after initialization)
594 * @looper: bitmap of looping state
595 * (only accessed by this thread)
596 * @looper_needs_return: looping thread needs to exit driver
598 * @transaction_stack: stack of in-progress transactions for this thread
599 * (protected by @proc->inner_lock)
600 * @todo: list of work to do for this thread
601 * (protected by @proc->inner_lock)
602 * @process_todo: whether work in @todo should be processed
603 * (protected by @proc->inner_lock)
604 * @return_error: transaction errors reported by this thread
605 * (only accessed by this thread)
606 * @reply_error: transaction errors reported by target thread
607 * (protected by @proc->inner_lock)
608 * @wait: wait queue for thread work
609 * @stats: per-thread statistics
610 * (atomics, no lock needed)
611 * @tmp_ref: temporary reference to indicate thread is in use
612 * (atomic since @proc->inner_lock cannot
613 * always be acquired)
614 * @is_dead: thread is dead and awaiting free
615 * when outstanding transactions are cleaned up
616 * (protected by @proc->inner_lock)
617 * @task: struct task_struct for this thread
619 * Bookkeeping structure for binder threads.
621 struct binder_thread {
622 struct binder_proc *proc;
623 struct rb_node rb_node;
624 struct list_head waiting_thread_node;
626 int looper; /* only modified by this thread */
627 bool looper_need_return; /* can be written by other thread */
628 struct binder_transaction *transaction_stack;
629 struct list_head todo;
631 struct binder_error return_error;
632 struct binder_error reply_error;
633 wait_queue_head_t wait;
634 struct binder_stats stats;
637 struct task_struct *task;
640 struct binder_transaction {
642 struct binder_work work;
643 struct binder_thread *from;
644 struct binder_transaction *from_parent;
645 struct binder_proc *to_proc;
646 struct binder_thread *to_thread;
647 struct binder_transaction *to_parent;
648 unsigned need_reply:1;
649 /* unsigned is_dead:1; */ /* not used at the moment */
651 struct binder_buffer *buffer;
654 struct binder_priority priority;
655 struct binder_priority saved_priority;
656 bool set_priority_called;
658 binder_uintptr_t security_ctx;
660 * @lock: protects @from, @to_proc, and @to_thread
662 * @from, @to_proc, and @to_thread can be set to NULL
663 * during thread teardown
669 * binder_proc_lock() - Acquire outer lock for given binder_proc
670 * @proc: struct binder_proc to acquire
672 * Acquires proc->outer_lock. Used to protect binder_ref
673 * structures associated with the given proc.
675 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
677 _binder_proc_lock(struct binder_proc *proc, int line)
679 binder_debug(BINDER_DEBUG_SPINLOCKS,
680 "%s: line=%d\n", __func__, line);
681 spin_lock(&proc->outer_lock);
685 * binder_proc_unlock() - Release spinlock for given binder_proc
686 * @proc: struct binder_proc to acquire
688 * Release lock acquired via binder_proc_lock()
690 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
692 _binder_proc_unlock(struct binder_proc *proc, int line)
694 binder_debug(BINDER_DEBUG_SPINLOCKS,
695 "%s: line=%d\n", __func__, line);
696 spin_unlock(&proc->outer_lock);
700 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
701 * @proc: struct binder_proc to acquire
703 * Acquires proc->inner_lock. Used to protect todo lists
705 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
707 _binder_inner_proc_lock(struct binder_proc *proc, int line)
709 binder_debug(BINDER_DEBUG_SPINLOCKS,
710 "%s: line=%d\n", __func__, line);
711 spin_lock(&proc->inner_lock);
715 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
716 * @proc: struct binder_proc to acquire
718 * Release lock acquired via binder_inner_proc_lock()
720 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
722 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
724 binder_debug(BINDER_DEBUG_SPINLOCKS,
725 "%s: line=%d\n", __func__, line);
726 spin_unlock(&proc->inner_lock);
730 * binder_node_lock() - Acquire spinlock for given binder_node
731 * @node: struct binder_node to acquire
733 * Acquires node->lock. Used to protect binder_node fields
735 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
737 _binder_node_lock(struct binder_node *node, int line)
739 binder_debug(BINDER_DEBUG_SPINLOCKS,
740 "%s: line=%d\n", __func__, line);
741 spin_lock(&node->lock);
745 * binder_node_unlock() - Release spinlock for given binder_proc
746 * @node: struct binder_node to acquire
748 * Release lock acquired via binder_node_lock()
750 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
752 _binder_node_unlock(struct binder_node *node, int line)
754 binder_debug(BINDER_DEBUG_SPINLOCKS,
755 "%s: line=%d\n", __func__, line);
756 spin_unlock(&node->lock);
760 * binder_node_inner_lock() - Acquire node and inner locks
761 * @node: struct binder_node to acquire
763 * Acquires node->lock. If node->proc also acquires
764 * proc->inner_lock. Used to protect binder_node fields
766 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
768 _binder_node_inner_lock(struct binder_node *node, int line)
770 binder_debug(BINDER_DEBUG_SPINLOCKS,
771 "%s: line=%d\n", __func__, line);
772 spin_lock(&node->lock);
774 binder_inner_proc_lock(node->proc);
778 * binder_node_unlock() - Release node and inner locks
779 * @node: struct binder_node to acquire
781 * Release lock acquired via binder_node_lock()
783 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
785 _binder_node_inner_unlock(struct binder_node *node, int line)
787 struct binder_proc *proc = node->proc;
789 binder_debug(BINDER_DEBUG_SPINLOCKS,
790 "%s: line=%d\n", __func__, line);
792 binder_inner_proc_unlock(proc);
793 spin_unlock(&node->lock);
796 static bool binder_worklist_empty_ilocked(struct list_head *list)
798 return list_empty(list);
802 * binder_worklist_empty() - Check if no items on the work list
803 * @proc: binder_proc associated with list
804 * @list: list to check
806 * Return: true if there are no items on list, else false
808 static bool binder_worklist_empty(struct binder_proc *proc,
809 struct list_head *list)
813 binder_inner_proc_lock(proc);
814 ret = binder_worklist_empty_ilocked(list);
815 binder_inner_proc_unlock(proc);
820 * binder_enqueue_work_ilocked() - Add an item to the work list
821 * @work: struct binder_work to add to list
822 * @target_list: list to add work to
824 * Adds the work to the specified list. Asserts that work
825 * is not already on a list.
827 * Requires the proc->inner_lock to be held.
830 binder_enqueue_work_ilocked(struct binder_work *work,
831 struct list_head *target_list)
833 BUG_ON(target_list == NULL);
834 BUG_ON(work->entry.next && !list_empty(&work->entry));
835 list_add_tail(&work->entry, target_list);
839 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
840 * @thread: thread to queue work to
841 * @work: struct binder_work to add to list
843 * Adds the work to the todo list of the thread. Doesn't set the process_todo
844 * flag, which means that (if it wasn't already set) the thread will go to
845 * sleep without handling this work when it calls read.
847 * Requires the proc->inner_lock to be held.
850 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
851 struct binder_work *work)
853 binder_enqueue_work_ilocked(work, &thread->todo);
857 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
858 * @thread: thread to queue work to
859 * @work: struct binder_work to add to list
861 * Adds the work to the todo list of the thread, and enables processing
864 * Requires the proc->inner_lock to be held.
867 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
868 struct binder_work *work)
870 binder_enqueue_work_ilocked(work, &thread->todo);
871 thread->process_todo = true;
875 * binder_enqueue_thread_work() - Add an item to the thread work list
876 * @thread: thread to queue work to
877 * @work: struct binder_work to add to list
879 * Adds the work to the todo list of the thread, and enables processing
883 binder_enqueue_thread_work(struct binder_thread *thread,
884 struct binder_work *work)
886 binder_inner_proc_lock(thread->proc);
887 binder_enqueue_thread_work_ilocked(thread, work);
888 binder_inner_proc_unlock(thread->proc);
892 binder_dequeue_work_ilocked(struct binder_work *work)
894 list_del_init(&work->entry);
898 * binder_dequeue_work() - Removes an item from the work list
899 * @proc: binder_proc associated with list
900 * @work: struct binder_work to remove from list
902 * Removes the specified work item from whatever list it is on.
903 * Can safely be called if work is not on any list.
906 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
908 binder_inner_proc_lock(proc);
909 binder_dequeue_work_ilocked(work);
910 binder_inner_proc_unlock(proc);
913 static struct binder_work *binder_dequeue_work_head_ilocked(
914 struct list_head *list)
916 struct binder_work *w;
918 w = list_first_entry_or_null(list, struct binder_work, entry);
920 list_del_init(&w->entry);
925 * binder_dequeue_work_head() - Dequeues the item at head of list
926 * @proc: binder_proc associated with list
927 * @list: list to dequeue head
929 * Removes the head of the list if there are items on the list
931 * Return: pointer dequeued binder_work, NULL if list was empty
933 static struct binder_work *binder_dequeue_work_head(
934 struct binder_proc *proc,
935 struct list_head *list)
937 struct binder_work *w;
939 binder_inner_proc_lock(proc);
940 w = binder_dequeue_work_head_ilocked(list);
941 binder_inner_proc_unlock(proc);
946 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
947 static void binder_free_thread(struct binder_thread *thread);
948 static void binder_free_proc(struct binder_proc *proc);
949 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
951 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
953 unsigned long rlim_cur;
957 mutex_lock(&proc->files_lock);
958 if (proc->files == NULL) {
962 if (!lock_task_sighand(proc->tsk, &irqs)) {
966 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
967 unlock_task_sighand(proc->tsk, &irqs);
969 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
971 mutex_unlock(&proc->files_lock);
976 * copied from fd_install
978 static void task_fd_install(
979 struct binder_proc *proc, unsigned int fd, struct file *file)
981 mutex_lock(&proc->files_lock);
983 __fd_install(proc->files, fd, file);
984 mutex_unlock(&proc->files_lock);
988 * copied from sys_close
990 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
994 mutex_lock(&proc->files_lock);
995 if (proc->files == NULL) {
999 retval = __close_fd(proc->files, fd);
1000 /* can't restart close syscall because file table entry was cleared */
1001 if (unlikely(retval == -ERESTARTSYS ||
1002 retval == -ERESTARTNOINTR ||
1003 retval == -ERESTARTNOHAND ||
1004 retval == -ERESTART_RESTARTBLOCK))
1007 mutex_unlock(&proc->files_lock);
1011 static bool binder_has_work_ilocked(struct binder_thread *thread,
1014 return thread->process_todo ||
1015 thread->looper_need_return ||
1017 !binder_worklist_empty_ilocked(&thread->proc->todo));
1020 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1024 binder_inner_proc_lock(thread->proc);
1025 has_work = binder_has_work_ilocked(thread, do_proc_work);
1026 binder_inner_proc_unlock(thread->proc);
1031 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1033 return !thread->transaction_stack &&
1034 binder_worklist_empty_ilocked(&thread->todo) &&
1035 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1036 BINDER_LOOPER_STATE_REGISTERED));
1039 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1043 struct binder_thread *thread;
1045 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1046 thread = rb_entry(n, struct binder_thread, rb_node);
1047 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1048 binder_available_for_proc_work_ilocked(thread)) {
1050 wake_up_interruptible_sync(&thread->wait);
1052 wake_up_interruptible(&thread->wait);
1058 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1059 * @proc: process to select a thread from
1061 * Note that calling this function moves the thread off the waiting_threads
1062 * list, so it can only be woken up by the caller of this function, or a
1063 * signal. Therefore, callers *should* always wake up the thread this function
1066 * Return: If there's a thread currently waiting for process work,
1067 * returns that thread. Otherwise returns NULL.
1069 static struct binder_thread *
1070 binder_select_thread_ilocked(struct binder_proc *proc)
1072 struct binder_thread *thread;
1074 assert_spin_locked(&proc->inner_lock);
1075 thread = list_first_entry_or_null(&proc->waiting_threads,
1076 struct binder_thread,
1077 waiting_thread_node);
1080 list_del_init(&thread->waiting_thread_node);
1086 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1087 * @proc: process to wake up a thread in
1088 * @thread: specific thread to wake-up (may be NULL)
1089 * @sync: whether to do a synchronous wake-up
1091 * This function wakes up a thread in the @proc process.
1092 * The caller may provide a specific thread to wake-up in
1093 * the @thread parameter. If @thread is NULL, this function
1094 * will wake up threads that have called poll().
1096 * Note that for this function to work as expected, callers
1097 * should first call binder_select_thread() to find a thread
1098 * to handle the work (if they don't have a thread already),
1099 * and pass the result into the @thread parameter.
1101 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1102 struct binder_thread *thread,
1105 assert_spin_locked(&proc->inner_lock);
1109 wake_up_interruptible_sync(&thread->wait);
1111 wake_up_interruptible(&thread->wait);
1115 /* Didn't find a thread waiting for proc work; this can happen
1117 * 1. All threads are busy handling transactions
1118 * In that case, one of those threads should call back into
1119 * the kernel driver soon and pick up this work.
1120 * 2. Threads are using the (e)poll interface, in which case
1121 * they may be blocked on the waitqueue without having been
1122 * added to waiting_threads. For this case, we just iterate
1123 * over all threads not handling transaction work, and
1124 * wake them all up. We wake all because we don't know whether
1125 * a thread that called into (e)poll is handling non-binder
1128 binder_wakeup_poll_threads_ilocked(proc, sync);
1131 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1133 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1135 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1138 static bool is_rt_policy(int policy)
1140 return policy == SCHED_FIFO || policy == SCHED_RR;
1143 static bool is_fair_policy(int policy)
1145 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1148 static bool binder_supported_policy(int policy)
1150 return is_fair_policy(policy) || is_rt_policy(policy);
1153 static int to_userspace_prio(int policy, int kernel_priority)
1155 if (is_fair_policy(policy))
1156 return PRIO_TO_NICE(kernel_priority);
1158 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1161 static int to_kernel_prio(int policy, int user_priority)
1163 if (is_fair_policy(policy))
1164 return NICE_TO_PRIO(user_priority);
1166 return MAX_USER_RT_PRIO - 1 - user_priority;
1169 static void binder_do_set_priority(struct task_struct *task,
1170 struct binder_priority desired,
1173 int priority; /* user-space prio value */
1175 unsigned int policy = desired.sched_policy;
1177 if (task->policy == policy && task->normal_prio == desired.prio)
1180 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1182 priority = to_userspace_prio(policy, desired.prio);
1184 if (verify && is_rt_policy(policy) && !has_cap_nice) {
1185 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1187 if (max_rtprio == 0) {
1188 policy = SCHED_NORMAL;
1189 priority = MIN_NICE;
1190 } else if (priority > max_rtprio) {
1191 priority = max_rtprio;
1195 if (verify && is_fair_policy(policy) && !has_cap_nice) {
1196 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1198 if (min_nice > MAX_NICE) {
1199 binder_user_error("%d RLIMIT_NICE not set\n",
1202 } else if (priority < min_nice) {
1203 priority = min_nice;
1207 if (policy != desired.sched_policy ||
1208 to_kernel_prio(policy, priority) != desired.prio)
1209 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1210 "%d: priority %d not allowed, using %d instead\n",
1211 task->pid, desired.prio,
1212 to_kernel_prio(policy, priority));
1214 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1215 to_kernel_prio(policy, priority),
1218 /* Set the actual priority */
1219 if (task->policy != policy || is_rt_policy(policy)) {
1220 struct sched_param params;
1222 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1224 sched_setscheduler_nocheck(task,
1225 policy | SCHED_RESET_ON_FORK,
1228 if (is_fair_policy(policy))
1229 set_user_nice(task, priority);
1232 static void binder_set_priority(struct task_struct *task,
1233 struct binder_priority desired)
1235 binder_do_set_priority(task, desired, /* verify = */ true);
1238 static void binder_restore_priority(struct task_struct *task,
1239 struct binder_priority desired)
1241 binder_do_set_priority(task, desired, /* verify = */ false);
1244 static void binder_transaction_priority(struct task_struct *task,
1245 struct binder_transaction *t,
1246 struct binder_priority node_prio,
1249 struct binder_priority desired_prio = t->priority;
1251 if (t->set_priority_called)
1254 t->set_priority_called = true;
1255 t->saved_priority.sched_policy = task->policy;
1256 t->saved_priority.prio = task->normal_prio;
1258 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1259 desired_prio.prio = NICE_TO_PRIO(0);
1260 desired_prio.sched_policy = SCHED_NORMAL;
1263 if (node_prio.prio < t->priority.prio ||
1264 (node_prio.prio == t->priority.prio &&
1265 node_prio.sched_policy == SCHED_FIFO)) {
1267 * In case the minimum priority on the node is
1268 * higher (lower value), use that priority. If
1269 * the priority is the same, but the node uses
1270 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1271 * run unbounded, unlike SCHED_RR.
1273 desired_prio = node_prio;
1276 binder_set_priority(task, desired_prio);
1279 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1280 binder_uintptr_t ptr)
1282 struct rb_node *n = proc->nodes.rb_node;
1283 struct binder_node *node;
1285 assert_spin_locked(&proc->inner_lock);
1288 node = rb_entry(n, struct binder_node, rb_node);
1290 if (ptr < node->ptr)
1292 else if (ptr > node->ptr)
1296 * take an implicit weak reference
1297 * to ensure node stays alive until
1298 * call to binder_put_node()
1300 binder_inc_node_tmpref_ilocked(node);
1307 static struct binder_node *binder_get_node(struct binder_proc *proc,
1308 binder_uintptr_t ptr)
1310 struct binder_node *node;
1312 binder_inner_proc_lock(proc);
1313 node = binder_get_node_ilocked(proc, ptr);
1314 binder_inner_proc_unlock(proc);
1318 static struct binder_node *binder_init_node_ilocked(
1319 struct binder_proc *proc,
1320 struct binder_node *new_node,
1321 struct flat_binder_object *fp)
1323 struct rb_node **p = &proc->nodes.rb_node;
1324 struct rb_node *parent = NULL;
1325 struct binder_node *node;
1326 binder_uintptr_t ptr = fp ? fp->binder : 0;
1327 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1328 __u32 flags = fp ? fp->flags : 0;
1331 assert_spin_locked(&proc->inner_lock);
1336 node = rb_entry(parent, struct binder_node, rb_node);
1338 if (ptr < node->ptr)
1340 else if (ptr > node->ptr)
1341 p = &(*p)->rb_right;
1344 * A matching node is already in
1345 * the rb tree. Abandon the init
1348 binder_inc_node_tmpref_ilocked(node);
1353 binder_stats_created(BINDER_STAT_NODE);
1355 rb_link_node(&node->rb_node, parent, p);
1356 rb_insert_color(&node->rb_node, &proc->nodes);
1357 node->debug_id = atomic_inc_return(&binder_last_id);
1360 node->cookie = cookie;
1361 node->work.type = BINDER_WORK_NODE;
1362 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1363 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
1364 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1365 node->min_priority = to_kernel_prio(node->sched_policy, priority);
1366 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1367 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
1368 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1369 spin_lock_init(&node->lock);
1370 INIT_LIST_HEAD(&node->work.entry);
1371 INIT_LIST_HEAD(&node->async_todo);
1372 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1373 "%d:%d node %d u%016llx c%016llx created\n",
1374 proc->pid, current->pid, node->debug_id,
1375 (u64)node->ptr, (u64)node->cookie);
1380 static struct binder_node *binder_new_node(struct binder_proc *proc,
1381 struct flat_binder_object *fp)
1383 struct binder_node *node;
1384 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1388 binder_inner_proc_lock(proc);
1389 node = binder_init_node_ilocked(proc, new_node, fp);
1390 binder_inner_proc_unlock(proc);
1391 if (node != new_node)
1393 * The node was already added by another thread
1400 static void binder_free_node(struct binder_node *node)
1403 binder_stats_deleted(BINDER_STAT_NODE);
1406 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1408 struct list_head *target_list)
1410 struct binder_proc *proc = node->proc;
1412 assert_spin_locked(&node->lock);
1414 assert_spin_locked(&proc->inner_lock);
1417 if (target_list == NULL &&
1418 node->internal_strong_refs == 0 &&
1420 node == node->proc->context->
1421 binder_context_mgr_node &&
1422 node->has_strong_ref)) {
1423 pr_err("invalid inc strong node for %d\n",
1427 node->internal_strong_refs++;
1429 node->local_strong_refs++;
1430 if (!node->has_strong_ref && target_list) {
1431 binder_dequeue_work_ilocked(&node->work);
1433 * Note: this function is the only place where we queue
1434 * directly to a thread->todo without using the
1435 * corresponding binder_enqueue_thread_work() helper
1436 * functions; in this case it's ok to not set the
1437 * process_todo flag, since we know this node work will
1438 * always be followed by other work that starts queue
1439 * processing: in case of synchronous transactions, a
1440 * BR_REPLY or BR_ERROR; in case of oneway
1441 * transactions, a BR_TRANSACTION_COMPLETE.
1443 binder_enqueue_work_ilocked(&node->work, target_list);
1447 node->local_weak_refs++;
1448 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1449 if (target_list == NULL) {
1450 pr_err("invalid inc weak node for %d\n",
1457 binder_enqueue_work_ilocked(&node->work, target_list);
1463 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1464 struct list_head *target_list)
1468 binder_node_inner_lock(node);
1469 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1470 binder_node_inner_unlock(node);
1475 static bool binder_dec_node_nilocked(struct binder_node *node,
1476 int strong, int internal)
1478 struct binder_proc *proc = node->proc;
1480 assert_spin_locked(&node->lock);
1482 assert_spin_locked(&proc->inner_lock);
1485 node->internal_strong_refs--;
1487 node->local_strong_refs--;
1488 if (node->local_strong_refs || node->internal_strong_refs)
1492 node->local_weak_refs--;
1493 if (node->local_weak_refs || node->tmp_refs ||
1494 !hlist_empty(&node->refs))
1498 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1499 if (list_empty(&node->work.entry)) {
1500 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1501 binder_wakeup_proc_ilocked(proc);
1504 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1505 !node->local_weak_refs && !node->tmp_refs) {
1507 binder_dequeue_work_ilocked(&node->work);
1508 rb_erase(&node->rb_node, &proc->nodes);
1509 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1510 "refless node %d deleted\n",
1513 BUG_ON(!list_empty(&node->work.entry));
1514 spin_lock(&binder_dead_nodes_lock);
1516 * tmp_refs could have changed so
1519 if (node->tmp_refs) {
1520 spin_unlock(&binder_dead_nodes_lock);
1523 hlist_del(&node->dead_node);
1524 spin_unlock(&binder_dead_nodes_lock);
1525 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1526 "dead node %d deleted\n",
1535 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1539 binder_node_inner_lock(node);
1540 free_node = binder_dec_node_nilocked(node, strong, internal);
1541 binder_node_inner_unlock(node);
1543 binder_free_node(node);
1546 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1549 * No call to binder_inc_node() is needed since we
1550 * don't need to inform userspace of any changes to
1557 * binder_inc_node_tmpref() - take a temporary reference on node
1558 * @node: node to reference
1560 * Take reference on node to prevent the node from being freed
1561 * while referenced only by a local variable. The inner lock is
1562 * needed to serialize with the node work on the queue (which
1563 * isn't needed after the node is dead). If the node is dead
1564 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1565 * node->tmp_refs against dead-node-only cases where the node
1566 * lock cannot be acquired (eg traversing the dead node list to
1569 static void binder_inc_node_tmpref(struct binder_node *node)
1571 binder_node_lock(node);
1573 binder_inner_proc_lock(node->proc);
1575 spin_lock(&binder_dead_nodes_lock);
1576 binder_inc_node_tmpref_ilocked(node);
1578 binder_inner_proc_unlock(node->proc);
1580 spin_unlock(&binder_dead_nodes_lock);
1581 binder_node_unlock(node);
1585 * binder_dec_node_tmpref() - remove a temporary reference on node
1586 * @node: node to reference
1588 * Release temporary reference on node taken via binder_inc_node_tmpref()
1590 static void binder_dec_node_tmpref(struct binder_node *node)
1594 binder_node_inner_lock(node);
1596 spin_lock(&binder_dead_nodes_lock);
1598 BUG_ON(node->tmp_refs < 0);
1600 spin_unlock(&binder_dead_nodes_lock);
1602 * Call binder_dec_node() to check if all refcounts are 0
1603 * and cleanup is needed. Calling with strong=0 and internal=1
1604 * causes no actual reference to be released in binder_dec_node().
1605 * If that changes, a change is needed here too.
1607 free_node = binder_dec_node_nilocked(node, 0, 1);
1608 binder_node_inner_unlock(node);
1610 binder_free_node(node);
1613 static void binder_put_node(struct binder_node *node)
1615 binder_dec_node_tmpref(node);
1618 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1619 u32 desc, bool need_strong_ref)
1621 struct rb_node *n = proc->refs_by_desc.rb_node;
1622 struct binder_ref *ref;
1625 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1627 if (desc < ref->data.desc) {
1629 } else if (desc > ref->data.desc) {
1631 } else if (need_strong_ref && !ref->data.strong) {
1632 binder_user_error("tried to use weak ref as strong ref\n");
1642 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1643 * @proc: binder_proc that owns the ref
1644 * @node: binder_node of target
1645 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1647 * Look up the ref for the given node and return it if it exists
1649 * If it doesn't exist and the caller provides a newly allocated
1650 * ref, initialize the fields of the newly allocated ref and insert
1651 * into the given proc rb_trees and node refs list.
1653 * Return: the ref for node. It is possible that another thread
1654 * allocated/initialized the ref first in which case the
1655 * returned ref would be different than the passed-in
1656 * new_ref. new_ref must be kfree'd by the caller in
1659 static struct binder_ref *binder_get_ref_for_node_olocked(
1660 struct binder_proc *proc,
1661 struct binder_node *node,
1662 struct binder_ref *new_ref)
1664 struct binder_context *context = proc->context;
1665 struct rb_node **p = &proc->refs_by_node.rb_node;
1666 struct rb_node *parent = NULL;
1667 struct binder_ref *ref;
1672 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1674 if (node < ref->node)
1676 else if (node > ref->node)
1677 p = &(*p)->rb_right;
1684 binder_stats_created(BINDER_STAT_REF);
1685 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1686 new_ref->proc = proc;
1687 new_ref->node = node;
1688 rb_link_node(&new_ref->rb_node_node, parent, p);
1689 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1691 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1692 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1693 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1694 if (ref->data.desc > new_ref->data.desc)
1696 new_ref->data.desc = ref->data.desc + 1;
1699 p = &proc->refs_by_desc.rb_node;
1702 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1704 if (new_ref->data.desc < ref->data.desc)
1706 else if (new_ref->data.desc > ref->data.desc)
1707 p = &(*p)->rb_right;
1711 rb_link_node(&new_ref->rb_node_desc, parent, p);
1712 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1714 binder_node_lock(node);
1715 hlist_add_head(&new_ref->node_entry, &node->refs);
1717 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1718 "%d new ref %d desc %d for node %d\n",
1719 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1721 binder_node_unlock(node);
1725 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1727 bool delete_node = false;
1729 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1730 "%d delete ref %d desc %d for node %d\n",
1731 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1732 ref->node->debug_id);
1734 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1735 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1737 binder_node_inner_lock(ref->node);
1738 if (ref->data.strong)
1739 binder_dec_node_nilocked(ref->node, 1, 1);
1741 hlist_del(&ref->node_entry);
1742 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1743 binder_node_inner_unlock(ref->node);
1745 * Clear ref->node unless we want the caller to free the node
1749 * The caller uses ref->node to determine
1750 * whether the node needs to be freed. Clear
1751 * it since the node is still alive.
1757 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1758 "%d delete ref %d desc %d has death notification\n",
1759 ref->proc->pid, ref->data.debug_id,
1761 binder_dequeue_work(ref->proc, &ref->death->work);
1762 binder_stats_deleted(BINDER_STAT_DEATH);
1764 binder_stats_deleted(BINDER_STAT_REF);
1768 * binder_inc_ref_olocked() - increment the ref for given handle
1769 * @ref: ref to be incremented
1770 * @strong: if true, strong increment, else weak
1771 * @target_list: list to queue node work on
1773 * Increment the ref. @ref->proc->outer_lock must be held on entry
1775 * Return: 0, if successful, else errno
1777 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1778 struct list_head *target_list)
1783 if (ref->data.strong == 0) {
1784 ret = binder_inc_node(ref->node, 1, 1, target_list);
1790 if (ref->data.weak == 0) {
1791 ret = binder_inc_node(ref->node, 0, 1, target_list);
1801 * binder_dec_ref() - dec the ref for given handle
1802 * @ref: ref to be decremented
1803 * @strong: if true, strong decrement, else weak
1805 * Decrement the ref.
1807 * Return: true if ref is cleaned up and ready to be freed
1809 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1812 if (ref->data.strong == 0) {
1813 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1814 ref->proc->pid, ref->data.debug_id,
1815 ref->data.desc, ref->data.strong,
1820 if (ref->data.strong == 0)
1821 binder_dec_node(ref->node, strong, 1);
1823 if (ref->data.weak == 0) {
1824 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1825 ref->proc->pid, ref->data.debug_id,
1826 ref->data.desc, ref->data.strong,
1832 if (ref->data.strong == 0 && ref->data.weak == 0) {
1833 binder_cleanup_ref_olocked(ref);
1840 * binder_get_node_from_ref() - get the node from the given proc/desc
1841 * @proc: proc containing the ref
1842 * @desc: the handle associated with the ref
1843 * @need_strong_ref: if true, only return node if ref is strong
1844 * @rdata: the id/refcount data for the ref
1846 * Given a proc and ref handle, return the associated binder_node
1848 * Return: a binder_node or NULL if not found or not strong when strong required
1850 static struct binder_node *binder_get_node_from_ref(
1851 struct binder_proc *proc,
1852 u32 desc, bool need_strong_ref,
1853 struct binder_ref_data *rdata)
1855 struct binder_node *node;
1856 struct binder_ref *ref;
1858 binder_proc_lock(proc);
1859 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1864 * Take an implicit reference on the node to ensure
1865 * it stays alive until the call to binder_put_node()
1867 binder_inc_node_tmpref(node);
1870 binder_proc_unlock(proc);
1875 binder_proc_unlock(proc);
1880 * binder_free_ref() - free the binder_ref
1883 * Free the binder_ref. Free the binder_node indicated by ref->node
1884 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1886 static void binder_free_ref(struct binder_ref *ref)
1889 binder_free_node(ref->node);
1895 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1896 * @proc: proc containing the ref
1897 * @desc: the handle associated with the ref
1898 * @increment: true=inc reference, false=dec reference
1899 * @strong: true=strong reference, false=weak reference
1900 * @rdata: the id/refcount data for the ref
1902 * Given a proc and ref handle, increment or decrement the ref
1903 * according to "increment" arg.
1905 * Return: 0 if successful, else errno
1907 static int binder_update_ref_for_handle(struct binder_proc *proc,
1908 uint32_t desc, bool increment, bool strong,
1909 struct binder_ref_data *rdata)
1912 struct binder_ref *ref;
1913 bool delete_ref = false;
1915 binder_proc_lock(proc);
1916 ref = binder_get_ref_olocked(proc, desc, strong);
1922 ret = binder_inc_ref_olocked(ref, strong, NULL);
1924 delete_ref = binder_dec_ref_olocked(ref, strong);
1928 binder_proc_unlock(proc);
1931 binder_free_ref(ref);
1935 binder_proc_unlock(proc);
1940 * binder_dec_ref_for_handle() - dec the ref for given handle
1941 * @proc: proc containing the ref
1942 * @desc: the handle associated with the ref
1943 * @strong: true=strong reference, false=weak reference
1944 * @rdata: the id/refcount data for the ref
1946 * Just calls binder_update_ref_for_handle() to decrement the ref.
1948 * Return: 0 if successful, else errno
1950 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1951 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1953 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1958 * binder_inc_ref_for_node() - increment the ref for given proc/node
1959 * @proc: proc containing the ref
1960 * @node: target node
1961 * @strong: true=strong reference, false=weak reference
1962 * @target_list: worklist to use if node is incremented
1963 * @rdata: the id/refcount data for the ref
1965 * Given a proc and node, increment the ref. Create the ref if it
1966 * doesn't already exist
1968 * Return: 0 if successful, else errno
1970 static int binder_inc_ref_for_node(struct binder_proc *proc,
1971 struct binder_node *node,
1973 struct list_head *target_list,
1974 struct binder_ref_data *rdata)
1976 struct binder_ref *ref;
1977 struct binder_ref *new_ref = NULL;
1980 binder_proc_lock(proc);
1981 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1983 binder_proc_unlock(proc);
1984 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1987 binder_proc_lock(proc);
1988 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1990 ret = binder_inc_ref_olocked(ref, strong, target_list);
1992 binder_proc_unlock(proc);
1993 if (new_ref && ref != new_ref)
1995 * Another thread created the ref first so
1996 * free the one we allocated
2002 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
2003 struct binder_transaction *t)
2005 BUG_ON(!target_thread);
2006 assert_spin_locked(&target_thread->proc->inner_lock);
2007 BUG_ON(target_thread->transaction_stack != t);
2008 BUG_ON(target_thread->transaction_stack->from != target_thread);
2009 target_thread->transaction_stack =
2010 target_thread->transaction_stack->from_parent;
2015 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2016 * @thread: thread to decrement
2018 * A thread needs to be kept alive while being used to create or
2019 * handle a transaction. binder_get_txn_from() is used to safely
2020 * extract t->from from a binder_transaction and keep the thread
2021 * indicated by t->from from being freed. When done with that
2022 * binder_thread, this function is called to decrement the
2023 * tmp_ref and free if appropriate (thread has been released
2024 * and no transaction being processed by the driver)
2026 static void binder_thread_dec_tmpref(struct binder_thread *thread)
2029 * atomic is used to protect the counter value while
2030 * it cannot reach zero or thread->is_dead is false
2032 binder_inner_proc_lock(thread->proc);
2033 atomic_dec(&thread->tmp_ref);
2034 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
2035 binder_inner_proc_unlock(thread->proc);
2036 binder_free_thread(thread);
2039 binder_inner_proc_unlock(thread->proc);
2043 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2044 * @proc: proc to decrement
2046 * A binder_proc needs to be kept alive while being used to create or
2047 * handle a transaction. proc->tmp_ref is incremented when
2048 * creating a new transaction or the binder_proc is currently in-use
2049 * by threads that are being released. When done with the binder_proc,
2050 * this function is called to decrement the counter and free the
2051 * proc if appropriate (proc has been released, all threads have
2052 * been released and not currenly in-use to process a transaction).
2054 static void binder_proc_dec_tmpref(struct binder_proc *proc)
2056 binder_inner_proc_lock(proc);
2057 atomic_dec(&proc->tmp_ref);
2058 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2059 !atomic_read(&proc->tmp_ref)) {
2060 binder_inner_proc_unlock(proc);
2061 binder_free_proc(proc);
2064 binder_inner_proc_unlock(proc);
2068 * binder_get_txn_from() - safely extract the "from" thread in transaction
2069 * @t: binder transaction for t->from
2071 * Atomically return the "from" thread and increment the tmp_ref
2072 * count for the thread to ensure it stays alive until
2073 * binder_thread_dec_tmpref() is called.
2075 * Return: the value of t->from
2077 static struct binder_thread *binder_get_txn_from(
2078 struct binder_transaction *t)
2080 struct binder_thread *from;
2082 spin_lock(&t->lock);
2085 atomic_inc(&from->tmp_ref);
2086 spin_unlock(&t->lock);
2091 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2092 * @t: binder transaction for t->from
2094 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2095 * to guarantee that the thread cannot be released while operating on it.
2096 * The caller must call binder_inner_proc_unlock() to release the inner lock
2097 * as well as call binder_dec_thread_txn() to release the reference.
2099 * Return: the value of t->from
2101 static struct binder_thread *binder_get_txn_from_and_acq_inner(
2102 struct binder_transaction *t)
2104 struct binder_thread *from;
2106 from = binder_get_txn_from(t);
2109 binder_inner_proc_lock(from->proc);
2111 BUG_ON(from != t->from);
2114 binder_inner_proc_unlock(from->proc);
2115 binder_thread_dec_tmpref(from);
2119 static void binder_free_transaction(struct binder_transaction *t)
2121 struct binder_proc *target_proc;
2123 spin_lock(&t->lock);
2124 target_proc = t->to_proc;
2126 atomic_inc(&target_proc->tmp_ref);
2127 spin_unlock(&t->lock);
2129 binder_inner_proc_lock(target_proc);
2131 t->buffer->transaction = NULL;
2132 binder_inner_proc_unlock(target_proc);
2133 binder_proc_dec_tmpref(target_proc);
2136 * If the transaction has no target_proc, then
2137 * t->buffer->transaction * has already been cleared.
2139 spin_unlock(&t->lock);
2142 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2145 static void binder_send_failed_reply(struct binder_transaction *t,
2146 uint32_t error_code)
2148 struct binder_thread *target_thread;
2149 struct binder_transaction *next;
2151 BUG_ON(t->flags & TF_ONE_WAY);
2153 target_thread = binder_get_txn_from_and_acq_inner(t);
2154 if (target_thread) {
2155 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2156 "send failed reply for transaction %d to %d:%d\n",
2158 target_thread->proc->pid,
2159 target_thread->pid);
2161 binder_pop_transaction_ilocked(target_thread, t);
2162 if (target_thread->reply_error.cmd == BR_OK) {
2163 target_thread->reply_error.cmd = error_code;
2164 binder_enqueue_thread_work_ilocked(
2166 &target_thread->reply_error.work);
2167 wake_up_interruptible(&target_thread->wait);
2170 * Cannot get here for normal operation, but
2171 * we can if multiple synchronous transactions
2172 * are sent without blocking for responses.
2173 * Just ignore the 2nd error in this case.
2175 pr_warn("Unexpected reply error: %u\n",
2176 target_thread->reply_error.cmd);
2178 binder_inner_proc_unlock(target_thread->proc);
2179 binder_thread_dec_tmpref(target_thread);
2180 binder_free_transaction(t);
2183 next = t->from_parent;
2185 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2186 "send failed reply for transaction %d, target dead\n",
2189 binder_free_transaction(t);
2191 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2192 "reply failed, no target thread at root\n");
2196 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2197 "reply failed, no target thread -- retry %d\n",
2203 * binder_cleanup_transaction() - cleans up undelivered transaction
2204 * @t: transaction that needs to be cleaned up
2205 * @reason: reason the transaction wasn't delivered
2206 * @error_code: error to return to caller (if synchronous call)
2208 static void binder_cleanup_transaction(struct binder_transaction *t,
2210 uint32_t error_code)
2212 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2213 binder_send_failed_reply(t, error_code);
2215 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2216 "undelivered transaction %d, %s\n",
2217 t->debug_id, reason);
2218 binder_free_transaction(t);
2223 * binder_validate_object() - checks for a valid metadata object in a buffer.
2224 * @buffer: binder_buffer that we're parsing.
2225 * @offset: offset in the buffer at which to validate an object.
2227 * Return: If there's a valid metadata object at @offset in @buffer, the
2228 * size of that object. Otherwise, it returns zero.
2230 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2232 /* Check if we can read a header first */
2233 struct binder_object_header *hdr;
2234 size_t object_size = 0;
2236 if (buffer->data_size < sizeof(*hdr) ||
2237 offset > buffer->data_size - sizeof(*hdr) ||
2238 !IS_ALIGNED(offset, sizeof(u32)))
2241 /* Ok, now see if we can read a complete object. */
2242 hdr = (struct binder_object_header *)(buffer->data + offset);
2243 switch (hdr->type) {
2244 case BINDER_TYPE_BINDER:
2245 case BINDER_TYPE_WEAK_BINDER:
2246 case BINDER_TYPE_HANDLE:
2247 case BINDER_TYPE_WEAK_HANDLE:
2248 object_size = sizeof(struct flat_binder_object);
2250 case BINDER_TYPE_FD:
2251 object_size = sizeof(struct binder_fd_object);
2253 case BINDER_TYPE_PTR:
2254 object_size = sizeof(struct binder_buffer_object);
2256 case BINDER_TYPE_FDA:
2257 object_size = sizeof(struct binder_fd_array_object);
2262 if (offset <= buffer->data_size - object_size &&
2263 buffer->data_size >= object_size)
2270 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2271 * @b: binder_buffer containing the object
2272 * @index: index in offset array at which the binder_buffer_object is
2274 * @start: points to the start of the offset array
2275 * @num_valid: the number of valid offsets in the offset array
2277 * Return: If @index is within the valid range of the offset array
2278 * described by @start and @num_valid, and if there's a valid
2279 * binder_buffer_object at the offset found in index @index
2280 * of the offset array, that object is returned. Otherwise,
2281 * %NULL is returned.
2282 * Note that the offset found in index @index itself is not
2283 * verified; this function assumes that @num_valid elements
2284 * from @start were previously verified to have valid offsets.
2286 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2287 binder_size_t index,
2288 binder_size_t *start,
2289 binder_size_t num_valid)
2291 struct binder_buffer_object *buffer_obj;
2292 binder_size_t *offp;
2294 if (index >= num_valid)
2297 offp = start + index;
2298 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2299 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2306 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2307 * @b: transaction buffer
2308 * @objects_start start of objects buffer
2309 * @buffer: binder_buffer_object in which to fix up
2310 * @offset: start offset in @buffer to fix up
2311 * @last_obj: last binder_buffer_object that we fixed up in
2312 * @last_min_offset: minimum fixup offset in @last_obj
2314 * Return: %true if a fixup in buffer @buffer at offset @offset is
2317 * For safety reasons, we only allow fixups inside a buffer to happen
2318 * at increasing offsets; additionally, we only allow fixup on the last
2319 * buffer object that was verified, or one of its parents.
2321 * Example of what is allowed:
2324 * B (parent = A, offset = 0)
2325 * C (parent = A, offset = 16)
2326 * D (parent = C, offset = 0)
2327 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2329 * Examples of what is not allowed:
2331 * Decreasing offsets within the same parent:
2333 * C (parent = A, offset = 16)
2334 * B (parent = A, offset = 0) // decreasing offset within A
2336 * Referring to a parent that wasn't the last object or any of its parents:
2338 * B (parent = A, offset = 0)
2339 * C (parent = A, offset = 0)
2340 * C (parent = A, offset = 16)
2341 * D (parent = B, offset = 0) // B is not A or any of A's parents
2343 static bool binder_validate_fixup(struct binder_buffer *b,
2344 binder_size_t *objects_start,
2345 struct binder_buffer_object *buffer,
2346 binder_size_t fixup_offset,
2347 struct binder_buffer_object *last_obj,
2348 binder_size_t last_min_offset)
2351 /* Nothing to fix up in */
2355 while (last_obj != buffer) {
2357 * Safe to retrieve the parent of last_obj, since it
2358 * was already previously verified by the driver.
2360 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2362 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2363 last_obj = (struct binder_buffer_object *)
2364 (b->data + *(objects_start + last_obj->parent));
2366 return (fixup_offset >= last_min_offset);
2369 static void binder_transaction_buffer_release(struct binder_proc *proc,
2370 struct binder_buffer *buffer,
2371 binder_size_t *failed_at)
2373 binder_size_t *offp, *off_start, *off_end;
2374 int debug_id = buffer->debug_id;
2376 binder_debug(BINDER_DEBUG_TRANSACTION,
2377 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2378 proc->pid, buffer->debug_id,
2379 buffer->data_size, buffer->offsets_size, failed_at);
2381 if (buffer->target_node)
2382 binder_dec_node(buffer->target_node, 1, 0);
2384 off_start = (binder_size_t *)(buffer->data +
2385 ALIGN(buffer->data_size, sizeof(void *)));
2387 off_end = failed_at;
2389 off_end = (void *)off_start + buffer->offsets_size;
2390 for (offp = off_start; offp < off_end; offp++) {
2391 struct binder_object_header *hdr;
2392 size_t object_size = binder_validate_object(buffer, *offp);
2394 if (object_size == 0) {
2395 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2396 debug_id, (u64)*offp, buffer->data_size);
2399 hdr = (struct binder_object_header *)(buffer->data + *offp);
2400 switch (hdr->type) {
2401 case BINDER_TYPE_BINDER:
2402 case BINDER_TYPE_WEAK_BINDER: {
2403 struct flat_binder_object *fp;
2404 struct binder_node *node;
2406 fp = to_flat_binder_object(hdr);
2407 node = binder_get_node(proc, fp->binder);
2409 pr_err("transaction release %d bad node %016llx\n",
2410 debug_id, (u64)fp->binder);
2413 binder_debug(BINDER_DEBUG_TRANSACTION,
2414 " node %d u%016llx\n",
2415 node->debug_id, (u64)node->ptr);
2416 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2418 binder_put_node(node);
2420 case BINDER_TYPE_HANDLE:
2421 case BINDER_TYPE_WEAK_HANDLE: {
2422 struct flat_binder_object *fp;
2423 struct binder_ref_data rdata;
2426 fp = to_flat_binder_object(hdr);
2427 ret = binder_dec_ref_for_handle(proc, fp->handle,
2428 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2431 pr_err("transaction release %d bad handle %d, ret = %d\n",
2432 debug_id, fp->handle, ret);
2435 binder_debug(BINDER_DEBUG_TRANSACTION,
2436 " ref %d desc %d\n",
2437 rdata.debug_id, rdata.desc);
2440 case BINDER_TYPE_FD: {
2441 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2443 binder_debug(BINDER_DEBUG_TRANSACTION,
2444 " fd %d\n", fp->fd);
2446 task_close_fd(proc, fp->fd);
2448 case BINDER_TYPE_PTR:
2450 * Nothing to do here, this will get cleaned up when the
2451 * transaction buffer gets freed
2454 case BINDER_TYPE_FDA: {
2455 struct binder_fd_array_object *fda;
2456 struct binder_buffer_object *parent;
2457 uintptr_t parent_buffer;
2460 binder_size_t fd_buf_size;
2462 fda = to_binder_fd_array_object(hdr);
2463 parent = binder_validate_ptr(buffer, fda->parent,
2467 pr_err("transaction release %d bad parent offset",
2472 * Since the parent was already fixed up, convert it
2473 * back to kernel address space to access it
2475 parent_buffer = parent->buffer -
2476 binder_alloc_get_user_buffer_offset(
2479 fd_buf_size = sizeof(u32) * fda->num_fds;
2480 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2481 pr_err("transaction release %d invalid number of fds (%lld)\n",
2482 debug_id, (u64)fda->num_fds);
2485 if (fd_buf_size > parent->length ||
2486 fda->parent_offset > parent->length - fd_buf_size) {
2487 /* No space for all file descriptors here. */
2488 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2489 debug_id, (u64)fda->num_fds);
2492 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2493 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2494 task_close_fd(proc, fd_array[fd_index]);
2497 pr_err("transaction release %d bad object type %x\n",
2498 debug_id, hdr->type);
2504 static int binder_translate_binder(struct flat_binder_object *fp,
2505 struct binder_transaction *t,
2506 struct binder_thread *thread)
2508 struct binder_node *node;
2509 struct binder_proc *proc = thread->proc;
2510 struct binder_proc *target_proc = t->to_proc;
2511 struct binder_ref_data rdata;
2514 node = binder_get_node(proc, fp->binder);
2516 node = binder_new_node(proc, fp);
2520 if (fp->cookie != node->cookie) {
2521 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2522 proc->pid, thread->pid, (u64)fp->binder,
2523 node->debug_id, (u64)fp->cookie,
2528 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2533 ret = binder_inc_ref_for_node(target_proc, node,
2534 fp->hdr.type == BINDER_TYPE_BINDER,
2535 &thread->todo, &rdata);
2539 if (fp->hdr.type == BINDER_TYPE_BINDER)
2540 fp->hdr.type = BINDER_TYPE_HANDLE;
2542 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2544 fp->handle = rdata.desc;
2547 trace_binder_transaction_node_to_ref(t, node, &rdata);
2548 binder_debug(BINDER_DEBUG_TRANSACTION,
2549 " node %d u%016llx -> ref %d desc %d\n",
2550 node->debug_id, (u64)node->ptr,
2551 rdata.debug_id, rdata.desc);
2553 binder_put_node(node);
2557 static int binder_translate_handle(struct flat_binder_object *fp,
2558 struct binder_transaction *t,
2559 struct binder_thread *thread)
2561 struct binder_proc *proc = thread->proc;
2562 struct binder_proc *target_proc = t->to_proc;
2563 struct binder_node *node;
2564 struct binder_ref_data src_rdata;
2567 node = binder_get_node_from_ref(proc, fp->handle,
2568 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2570 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2571 proc->pid, thread->pid, fp->handle);
2574 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2579 binder_node_lock(node);
2580 if (node->proc == target_proc) {
2581 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2582 fp->hdr.type = BINDER_TYPE_BINDER;
2584 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2585 fp->binder = node->ptr;
2586 fp->cookie = node->cookie;
2588 binder_inner_proc_lock(node->proc);
2589 binder_inc_node_nilocked(node,
2590 fp->hdr.type == BINDER_TYPE_BINDER,
2593 binder_inner_proc_unlock(node->proc);
2594 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2595 binder_debug(BINDER_DEBUG_TRANSACTION,
2596 " ref %d desc %d -> node %d u%016llx\n",
2597 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2599 binder_node_unlock(node);
2601 struct binder_ref_data dest_rdata;
2603 binder_node_unlock(node);
2604 ret = binder_inc_ref_for_node(target_proc, node,
2605 fp->hdr.type == BINDER_TYPE_HANDLE,
2611 fp->handle = dest_rdata.desc;
2613 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2615 binder_debug(BINDER_DEBUG_TRANSACTION,
2616 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2617 src_rdata.debug_id, src_rdata.desc,
2618 dest_rdata.debug_id, dest_rdata.desc,
2622 binder_put_node(node);
2626 static int binder_translate_fd(int fd,
2627 struct binder_transaction *t,
2628 struct binder_thread *thread,
2629 struct binder_transaction *in_reply_to)
2631 struct binder_proc *proc = thread->proc;
2632 struct binder_proc *target_proc = t->to_proc;
2636 bool target_allows_fd;
2639 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2641 target_allows_fd = t->buffer->target_node->accept_fds;
2642 if (!target_allows_fd) {
2643 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2644 proc->pid, thread->pid,
2645 in_reply_to ? "reply" : "transaction",
2648 goto err_fd_not_accepted;
2653 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2654 proc->pid, thread->pid, fd);
2658 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2664 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2665 if (target_fd < 0) {
2667 goto err_get_unused_fd;
2669 task_fd_install(target_proc, target_fd, file);
2670 trace_binder_transaction_fd(t, fd, target_fd);
2671 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2680 err_fd_not_accepted:
2684 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2685 struct binder_buffer_object *parent,
2686 struct binder_transaction *t,
2687 struct binder_thread *thread,
2688 struct binder_transaction *in_reply_to)
2690 binder_size_t fdi, fd_buf_size, num_installed_fds;
2692 uintptr_t parent_buffer;
2694 struct binder_proc *proc = thread->proc;
2695 struct binder_proc *target_proc = t->to_proc;
2697 fd_buf_size = sizeof(u32) * fda->num_fds;
2698 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2699 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2700 proc->pid, thread->pid, (u64)fda->num_fds);
2703 if (fd_buf_size > parent->length ||
2704 fda->parent_offset > parent->length - fd_buf_size) {
2705 /* No space for all file descriptors here. */
2706 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2707 proc->pid, thread->pid, (u64)fda->num_fds);
2711 * Since the parent was already fixed up, convert it
2712 * back to the kernel address space to access it
2714 parent_buffer = parent->buffer -
2715 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2716 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2717 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2718 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2719 proc->pid, thread->pid);
2722 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2723 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2726 goto err_translate_fd_failed;
2727 fd_array[fdi] = target_fd;
2731 err_translate_fd_failed:
2733 * Failed to allocate fd or security error, free fds
2736 num_installed_fds = fdi;
2737 for (fdi = 0; fdi < num_installed_fds; fdi++)
2738 task_close_fd(target_proc, fd_array[fdi]);
2742 static int binder_fixup_parent(struct binder_transaction *t,
2743 struct binder_thread *thread,
2744 struct binder_buffer_object *bp,
2745 binder_size_t *off_start,
2746 binder_size_t num_valid,
2747 struct binder_buffer_object *last_fixup_obj,
2748 binder_size_t last_fixup_min_off)
2750 struct binder_buffer_object *parent;
2752 struct binder_buffer *b = t->buffer;
2753 struct binder_proc *proc = thread->proc;
2754 struct binder_proc *target_proc = t->to_proc;
2756 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2759 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2761 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2762 proc->pid, thread->pid);
2766 if (!binder_validate_fixup(b, off_start,
2767 parent, bp->parent_offset,
2769 last_fixup_min_off)) {
2770 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2771 proc->pid, thread->pid);
2775 if (parent->length < sizeof(binder_uintptr_t) ||
2776 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2777 /* No space for a pointer here! */
2778 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2779 proc->pid, thread->pid);
2782 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2783 binder_alloc_get_user_buffer_offset(
2784 &target_proc->alloc));
2785 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2791 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2792 * @t: transaction to send
2793 * @proc: process to send the transaction to
2794 * @thread: thread in @proc to send the transaction to (may be NULL)
2796 * This function queues a transaction to the specified process. It will try
2797 * to find a thread in the target process to handle the transaction and
2798 * wake it up. If no thread is found, the work is queued to the proc
2801 * If the @thread parameter is not NULL, the transaction is always queued
2802 * to the waitlist of that specific thread.
2804 * Return: true if the transactions was successfully queued
2805 * false if the target process or thread is dead
2807 static bool binder_proc_transaction(struct binder_transaction *t,
2808 struct binder_proc *proc,
2809 struct binder_thread *thread)
2811 struct binder_node *node = t->buffer->target_node;
2812 struct binder_priority node_prio;
2813 bool oneway = !!(t->flags & TF_ONE_WAY);
2814 bool pending_async = false;
2817 binder_node_lock(node);
2818 node_prio.prio = node->min_priority;
2819 node_prio.sched_policy = node->sched_policy;
2823 if (node->has_async_transaction) {
2824 pending_async = true;
2826 node->has_async_transaction = true;
2830 binder_inner_proc_lock(proc);
2832 if (proc->is_dead || (thread && thread->is_dead)) {
2833 binder_inner_proc_unlock(proc);
2834 binder_node_unlock(node);
2838 if (!thread && !pending_async)
2839 thread = binder_select_thread_ilocked(proc);
2842 binder_transaction_priority(thread->task, t, node_prio,
2844 binder_enqueue_thread_work_ilocked(thread, &t->work);
2845 } else if (!pending_async) {
2846 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2848 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2852 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2854 binder_inner_proc_unlock(proc);
2855 binder_node_unlock(node);
2861 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2862 * @node: struct binder_node for which to get refs
2863 * @proc: returns @node->proc if valid
2864 * @error: if no @proc then returns BR_DEAD_REPLY
2866 * User-space normally keeps the node alive when creating a transaction
2867 * since it has a reference to the target. The local strong ref keeps it
2868 * alive if the sending process dies before the target process processes
2869 * the transaction. If the source process is malicious or has a reference
2870 * counting bug, relying on the local strong ref can fail.
2872 * Since user-space can cause the local strong ref to go away, we also take
2873 * a tmpref on the node to ensure it survives while we are constructing
2874 * the transaction. We also need a tmpref on the proc while we are
2875 * constructing the transaction, so we take that here as well.
2877 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2878 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2879 * target proc has died, @error is set to BR_DEAD_REPLY
2881 static struct binder_node *binder_get_node_refs_for_txn(
2882 struct binder_node *node,
2883 struct binder_proc **procp,
2886 struct binder_node *target_node = NULL;
2888 binder_node_inner_lock(node);
2891 binder_inc_node_nilocked(node, 1, 0, NULL);
2892 binder_inc_node_tmpref_ilocked(node);
2893 atomic_inc(&node->proc->tmp_ref);
2894 *procp = node->proc;
2896 *error = BR_DEAD_REPLY;
2897 binder_node_inner_unlock(node);
2902 static void binder_transaction(struct binder_proc *proc,
2903 struct binder_thread *thread,
2904 struct binder_transaction_data *tr, int reply,
2905 binder_size_t extra_buffers_size)
2908 struct binder_transaction *t;
2909 struct binder_work *tcomplete;
2910 binder_size_t *offp, *off_end, *off_start;
2911 binder_size_t off_min;
2912 u8 *sg_bufp, *sg_buf_end;
2913 struct binder_proc *target_proc = NULL;
2914 struct binder_thread *target_thread = NULL;
2915 struct binder_node *target_node = NULL;
2916 struct binder_transaction *in_reply_to = NULL;
2917 struct binder_transaction_log_entry *e;
2918 uint32_t return_error = 0;
2919 uint32_t return_error_param = 0;
2920 uint32_t return_error_line = 0;
2921 struct binder_buffer_object *last_fixup_obj = NULL;
2922 binder_size_t last_fixup_min_off = 0;
2923 struct binder_context *context = proc->context;
2924 int t_debug_id = atomic_inc_return(&binder_last_id);
2925 char *secctx = NULL;
2928 e = binder_transaction_log_add(&binder_transaction_log);
2929 e->debug_id = t_debug_id;
2930 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2931 e->from_proc = proc->pid;
2932 e->from_thread = thread->pid;
2933 e->target_handle = tr->target.handle;
2934 e->data_size = tr->data_size;
2935 e->offsets_size = tr->offsets_size;
2936 e->context_name = proc->context->name;
2939 binder_inner_proc_lock(proc);
2940 in_reply_to = thread->transaction_stack;
2941 if (in_reply_to == NULL) {
2942 binder_inner_proc_unlock(proc);
2943 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2944 proc->pid, thread->pid);
2945 return_error = BR_FAILED_REPLY;
2946 return_error_param = -EPROTO;
2947 return_error_line = __LINE__;
2948 goto err_empty_call_stack;
2950 if (in_reply_to->to_thread != thread) {
2951 spin_lock(&in_reply_to->lock);
2952 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2953 proc->pid, thread->pid, in_reply_to->debug_id,
2954 in_reply_to->to_proc ?
2955 in_reply_to->to_proc->pid : 0,
2956 in_reply_to->to_thread ?
2957 in_reply_to->to_thread->pid : 0);
2958 spin_unlock(&in_reply_to->lock);
2959 binder_inner_proc_unlock(proc);
2960 return_error = BR_FAILED_REPLY;
2961 return_error_param = -EPROTO;
2962 return_error_line = __LINE__;
2964 goto err_bad_call_stack;
2966 thread->transaction_stack = in_reply_to->to_parent;
2967 binder_inner_proc_unlock(proc);
2968 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2969 if (target_thread == NULL) {
2970 return_error = BR_DEAD_REPLY;
2971 return_error_line = __LINE__;
2972 goto err_dead_binder;
2974 if (target_thread->transaction_stack != in_reply_to) {
2975 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2976 proc->pid, thread->pid,
2977 target_thread->transaction_stack ?
2978 target_thread->transaction_stack->debug_id : 0,
2979 in_reply_to->debug_id);
2980 binder_inner_proc_unlock(target_thread->proc);
2981 return_error = BR_FAILED_REPLY;
2982 return_error_param = -EPROTO;
2983 return_error_line = __LINE__;
2985 target_thread = NULL;
2986 goto err_dead_binder;
2988 target_proc = target_thread->proc;
2989 atomic_inc(&target_proc->tmp_ref);
2990 binder_inner_proc_unlock(target_thread->proc);
2992 if (tr->target.handle) {
2993 struct binder_ref *ref;
2996 * There must already be a strong ref
2997 * on this node. If so, do a strong
2998 * increment on the node to ensure it
2999 * stays alive until the transaction is
3002 binder_proc_lock(proc);
3003 ref = binder_get_ref_olocked(proc, tr->target.handle,
3006 target_node = binder_get_node_refs_for_txn(
3007 ref->node, &target_proc,
3010 binder_user_error("%d:%d got transaction to invalid handle\n",
3011 proc->pid, thread->pid);
3012 return_error = BR_FAILED_REPLY;
3014 binder_proc_unlock(proc);
3016 mutex_lock(&context->context_mgr_node_lock);
3017 target_node = context->binder_context_mgr_node;
3019 target_node = binder_get_node_refs_for_txn(
3020 target_node, &target_proc,
3023 return_error = BR_DEAD_REPLY;
3024 mutex_unlock(&context->context_mgr_node_lock);
3025 if (target_node && target_proc == proc) {
3026 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3027 proc->pid, thread->pid);
3028 return_error = BR_FAILED_REPLY;
3029 return_error_param = -EINVAL;
3030 return_error_line = __LINE__;
3031 goto err_invalid_target_handle;
3036 * return_error is set above
3038 return_error_param = -EINVAL;
3039 return_error_line = __LINE__;
3040 goto err_dead_binder;
3042 e->to_node = target_node->debug_id;
3043 if (security_binder_transaction(proc->tsk,
3044 target_proc->tsk) < 0) {
3045 return_error = BR_FAILED_REPLY;
3046 return_error_param = -EPERM;
3047 return_error_line = __LINE__;
3048 goto err_invalid_target_handle;
3050 binder_inner_proc_lock(proc);
3051 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3052 struct binder_transaction *tmp;
3054 tmp = thread->transaction_stack;
3055 if (tmp->to_thread != thread) {
3056 spin_lock(&tmp->lock);
3057 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3058 proc->pid, thread->pid, tmp->debug_id,
3059 tmp->to_proc ? tmp->to_proc->pid : 0,
3061 tmp->to_thread->pid : 0);
3062 spin_unlock(&tmp->lock);
3063 binder_inner_proc_unlock(proc);
3064 return_error = BR_FAILED_REPLY;
3065 return_error_param = -EPROTO;
3066 return_error_line = __LINE__;
3067 goto err_bad_call_stack;
3070 struct binder_thread *from;
3072 spin_lock(&tmp->lock);
3074 if (from && from->proc == target_proc) {
3075 atomic_inc(&from->tmp_ref);
3076 target_thread = from;
3077 spin_unlock(&tmp->lock);
3080 spin_unlock(&tmp->lock);
3081 tmp = tmp->from_parent;
3084 binder_inner_proc_unlock(proc);
3087 e->to_thread = target_thread->pid;
3088 e->to_proc = target_proc->pid;
3090 /* TODO: reuse incoming transaction for reply */
3091 t = kzalloc(sizeof(*t), GFP_KERNEL);
3093 return_error = BR_FAILED_REPLY;
3094 return_error_param = -ENOMEM;
3095 return_error_line = __LINE__;
3096 goto err_alloc_t_failed;
3098 binder_stats_created(BINDER_STAT_TRANSACTION);
3099 spin_lock_init(&t->lock);
3101 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3102 if (tcomplete == NULL) {
3103 return_error = BR_FAILED_REPLY;
3104 return_error_param = -ENOMEM;
3105 return_error_line = __LINE__;
3106 goto err_alloc_tcomplete_failed;
3108 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3110 t->debug_id = t_debug_id;
3113 binder_debug(BINDER_DEBUG_TRANSACTION,
3114 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3115 proc->pid, thread->pid, t->debug_id,
3116 target_proc->pid, target_thread->pid,
3117 (u64)tr->data.ptr.buffer,
3118 (u64)tr->data.ptr.offsets,
3119 (u64)tr->data_size, (u64)tr->offsets_size,
3120 (u64)extra_buffers_size);
3122 binder_debug(BINDER_DEBUG_TRANSACTION,
3123 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3124 proc->pid, thread->pid, t->debug_id,
3125 target_proc->pid, target_node->debug_id,
3126 (u64)tr->data.ptr.buffer,
3127 (u64)tr->data.ptr.offsets,
3128 (u64)tr->data_size, (u64)tr->offsets_size,
3129 (u64)extra_buffers_size);
3131 if (!reply && !(tr->flags & TF_ONE_WAY))
3135 t->sender_euid = task_euid(proc->tsk);
3136 t->to_proc = target_proc;
3137 t->to_thread = target_thread;
3139 t->flags = tr->flags;
3140 if (!(t->flags & TF_ONE_WAY) &&
3141 binder_supported_policy(current->policy)) {
3142 /* Inherit supported policies for synchronous transactions */
3143 t->priority.sched_policy = current->policy;
3144 t->priority.prio = current->normal_prio;
3146 /* Otherwise, fall back to the default priority */
3147 t->priority = target_proc->default_priority;
3150 if (target_node && target_node->txn_security_ctx) {
3154 security_task_getsecid(proc->tsk, &secid);
3155 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3157 return_error = BR_FAILED_REPLY;
3158 return_error_param = ret;
3159 return_error_line = __LINE__;
3160 goto err_get_secctx_failed;
3162 added_size = ALIGN(secctx_sz, sizeof(u64));
3163 extra_buffers_size += added_size;
3164 if (extra_buffers_size < added_size) {
3165 /* integer overflow of extra_buffers_size */
3166 return_error = BR_FAILED_REPLY;
3167 return_error_param = EINVAL;
3168 return_error_line = __LINE__;
3169 goto err_bad_extra_size;
3173 trace_binder_transaction(reply, t, target_node);
3175 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3176 tr->offsets_size, extra_buffers_size,
3177 !reply && (t->flags & TF_ONE_WAY));
3178 if (IS_ERR(t->buffer)) {
3180 * -ESRCH indicates VMA cleared. The target is dying.
3182 return_error_param = PTR_ERR(t->buffer);
3183 return_error = return_error_param == -ESRCH ?
3184 BR_DEAD_REPLY : BR_FAILED_REPLY;
3185 return_error_line = __LINE__;
3187 goto err_binder_alloc_buf_failed;
3190 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3191 ALIGN(tr->offsets_size, sizeof(void *)) +
3192 ALIGN(extra_buffers_size, sizeof(void *)) -
3193 ALIGN(secctx_sz, sizeof(u64));
3194 char *kptr = t->buffer->data + buf_offset;
3196 t->security_ctx = (uintptr_t)kptr +
3197 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
3198 memcpy(kptr, secctx, secctx_sz);
3199 security_release_secctx(secctx, secctx_sz);
3202 t->buffer->debug_id = t->debug_id;
3203 t->buffer->transaction = t;
3204 t->buffer->target_node = target_node;
3205 trace_binder_transaction_alloc_buf(t->buffer);
3206 off_start = (binder_size_t *)(t->buffer->data +
3207 ALIGN(tr->data_size, sizeof(void *)));
3210 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3211 tr->data.ptr.buffer, tr->data_size)) {
3212 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3213 proc->pid, thread->pid);
3214 return_error = BR_FAILED_REPLY;
3215 return_error_param = -EFAULT;
3216 return_error_line = __LINE__;
3217 goto err_copy_data_failed;
3219 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3220 tr->data.ptr.offsets, tr->offsets_size)) {
3221 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3222 proc->pid, thread->pid);
3223 return_error = BR_FAILED_REPLY;
3224 return_error_param = -EFAULT;
3225 return_error_line = __LINE__;
3226 goto err_copy_data_failed;
3228 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3229 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3230 proc->pid, thread->pid, (u64)tr->offsets_size);
3231 return_error = BR_FAILED_REPLY;
3232 return_error_param = -EINVAL;
3233 return_error_line = __LINE__;
3234 goto err_bad_offset;
3236 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3237 binder_user_error("%d:%d got transaction with unaligned buffers size, %llu\n",
3238 proc->pid, thread->pid,
3239 (u64)extra_buffers_size);
3240 return_error = BR_FAILED_REPLY;
3241 return_error_param = -EINVAL;
3242 return_error_line = __LINE__;
3243 goto err_bad_offset;
3245 off_end = (void *)off_start + tr->offsets_size;
3246 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3247 sg_buf_end = sg_bufp + extra_buffers_size -
3248 ALIGN(secctx_sz, sizeof(u64));
3250 for (; offp < off_end; offp++) {
3251 struct binder_object_header *hdr;
3252 size_t object_size = binder_validate_object(t->buffer, *offp);
3254 if (object_size == 0 || *offp < off_min) {
3255 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3256 proc->pid, thread->pid, (u64)*offp,
3258 (u64)t->buffer->data_size);
3259 return_error = BR_FAILED_REPLY;
3260 return_error_param = -EINVAL;
3261 return_error_line = __LINE__;
3262 goto err_bad_offset;
3265 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3266 off_min = *offp + object_size;
3267 switch (hdr->type) {
3268 case BINDER_TYPE_BINDER:
3269 case BINDER_TYPE_WEAK_BINDER: {
3270 struct flat_binder_object *fp;
3272 fp = to_flat_binder_object(hdr);
3273 ret = binder_translate_binder(fp, t, thread);
3275 return_error = BR_FAILED_REPLY;
3276 return_error_param = ret;
3277 return_error_line = __LINE__;
3278 goto err_translate_failed;
3281 case BINDER_TYPE_HANDLE:
3282 case BINDER_TYPE_WEAK_HANDLE: {
3283 struct flat_binder_object *fp;
3285 fp = to_flat_binder_object(hdr);
3286 ret = binder_translate_handle(fp, t, thread);
3288 return_error = BR_FAILED_REPLY;
3289 return_error_param = ret;
3290 return_error_line = __LINE__;
3291 goto err_translate_failed;
3295 case BINDER_TYPE_FD: {
3296 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3297 int target_fd = binder_translate_fd(fp->fd, t, thread,
3300 if (target_fd < 0) {
3301 return_error = BR_FAILED_REPLY;
3302 return_error_param = target_fd;
3303 return_error_line = __LINE__;
3304 goto err_translate_failed;
3309 case BINDER_TYPE_FDA: {
3310 struct binder_fd_array_object *fda =
3311 to_binder_fd_array_object(hdr);
3312 struct binder_buffer_object *parent =
3313 binder_validate_ptr(t->buffer, fda->parent,
3317 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3318 proc->pid, thread->pid);
3319 return_error = BR_FAILED_REPLY;
3320 return_error_param = -EINVAL;
3321 return_error_line = __LINE__;
3322 goto err_bad_parent;
3324 if (!binder_validate_fixup(t->buffer, off_start,
3325 parent, fda->parent_offset,
3327 last_fixup_min_off)) {
3328 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3329 proc->pid, thread->pid);
3330 return_error = BR_FAILED_REPLY;
3331 return_error_param = -EINVAL;
3332 return_error_line = __LINE__;
3333 goto err_bad_parent;
3335 ret = binder_translate_fd_array(fda, parent, t, thread,
3338 return_error = BR_FAILED_REPLY;
3339 return_error_param = ret;
3340 return_error_line = __LINE__;
3341 goto err_translate_failed;
3343 last_fixup_obj = parent;
3344 last_fixup_min_off =
3345 fda->parent_offset + sizeof(u32) * fda->num_fds;
3347 case BINDER_TYPE_PTR: {
3348 struct binder_buffer_object *bp =
3349 to_binder_buffer_object(hdr);
3350 size_t buf_left = sg_buf_end - sg_bufp;
3352 if (bp->length > buf_left) {
3353 binder_user_error("%d:%d got transaction with too large buffer\n",
3354 proc->pid, thread->pid);
3355 return_error = BR_FAILED_REPLY;
3356 return_error_param = -EINVAL;
3357 return_error_line = __LINE__;
3358 goto err_bad_offset;
3360 if (copy_from_user(sg_bufp,
3361 (const void __user *)(uintptr_t)
3362 bp->buffer, bp->length)) {
3363 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3364 proc->pid, thread->pid);
3365 return_error_param = -EFAULT;
3366 return_error = BR_FAILED_REPLY;
3367 return_error_line = __LINE__;
3368 goto err_copy_data_failed;
3370 /* Fixup buffer pointer to target proc address space */
3371 bp->buffer = (uintptr_t)sg_bufp +
3372 binder_alloc_get_user_buffer_offset(
3373 &target_proc->alloc);
3374 sg_bufp += ALIGN(bp->length, sizeof(u64));
3376 ret = binder_fixup_parent(t, thread, bp, off_start,
3379 last_fixup_min_off);
3381 return_error = BR_FAILED_REPLY;
3382 return_error_param = ret;
3383 return_error_line = __LINE__;
3384 goto err_translate_failed;
3386 last_fixup_obj = bp;
3387 last_fixup_min_off = 0;
3390 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3391 proc->pid, thread->pid, hdr->type);
3392 return_error = BR_FAILED_REPLY;
3393 return_error_param = -EINVAL;
3394 return_error_line = __LINE__;
3395 goto err_bad_object_type;
3398 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3399 t->work.type = BINDER_WORK_TRANSACTION;
3402 binder_enqueue_thread_work(thread, tcomplete);
3403 binder_inner_proc_lock(target_proc);
3404 if (target_thread->is_dead) {
3405 binder_inner_proc_unlock(target_proc);
3406 goto err_dead_proc_or_thread;
3408 BUG_ON(t->buffer->async_transaction != 0);
3409 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3410 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3411 binder_inner_proc_unlock(target_proc);
3412 wake_up_interruptible_sync(&target_thread->wait);
3413 binder_restore_priority(current, in_reply_to->saved_priority);
3414 binder_free_transaction(in_reply_to);
3415 } else if (!(t->flags & TF_ONE_WAY)) {
3416 BUG_ON(t->buffer->async_transaction != 0);
3417 binder_inner_proc_lock(proc);
3419 * Defer the TRANSACTION_COMPLETE, so we don't return to
3420 * userspace immediately; this allows the target process to
3421 * immediately start processing this transaction, reducing
3422 * latency. We will then return the TRANSACTION_COMPLETE when
3423 * the target replies (or there is an error).
3425 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3427 t->from_parent = thread->transaction_stack;
3428 thread->transaction_stack = t;
3429 binder_inner_proc_unlock(proc);
3430 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3431 binder_inner_proc_lock(proc);
3432 binder_pop_transaction_ilocked(thread, t);
3433 binder_inner_proc_unlock(proc);
3434 goto err_dead_proc_or_thread;
3437 BUG_ON(target_node == NULL);
3438 BUG_ON(t->buffer->async_transaction != 1);
3439 binder_enqueue_thread_work(thread, tcomplete);
3440 if (!binder_proc_transaction(t, target_proc, NULL))
3441 goto err_dead_proc_or_thread;
3444 binder_thread_dec_tmpref(target_thread);
3445 binder_proc_dec_tmpref(target_proc);
3447 binder_dec_node_tmpref(target_node);
3449 * write barrier to synchronize with initialization
3453 WRITE_ONCE(e->debug_id_done, t_debug_id);
3456 err_dead_proc_or_thread:
3457 return_error = BR_DEAD_REPLY;
3458 return_error_line = __LINE__;
3459 binder_dequeue_work(proc, tcomplete);
3460 err_translate_failed:
3461 err_bad_object_type:
3464 err_copy_data_failed:
3465 trace_binder_transaction_failed_buffer_release(t->buffer);
3466 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3468 binder_dec_node_tmpref(target_node);
3470 t->buffer->transaction = NULL;
3471 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3472 err_binder_alloc_buf_failed:
3475 security_release_secctx(secctx, secctx_sz);
3476 err_get_secctx_failed:
3478 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3479 err_alloc_tcomplete_failed:
3481 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3484 err_empty_call_stack:
3486 err_invalid_target_handle:
3488 binder_thread_dec_tmpref(target_thread);
3490 binder_proc_dec_tmpref(target_proc);
3492 binder_dec_node(target_node, 1, 0);
3493 binder_dec_node_tmpref(target_node);
3496 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3497 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3498 proc->pid, thread->pid, return_error, return_error_param,
3499 (u64)tr->data_size, (u64)tr->offsets_size,
3503 struct binder_transaction_log_entry *fe;
3505 e->return_error = return_error;
3506 e->return_error_param = return_error_param;
3507 e->return_error_line = return_error_line;
3508 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3511 * write barrier to synchronize with initialization
3515 WRITE_ONCE(e->debug_id_done, t_debug_id);
3516 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3519 BUG_ON(thread->return_error.cmd != BR_OK);
3521 binder_restore_priority(current, in_reply_to->saved_priority);
3522 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3523 binder_enqueue_thread_work(thread, &thread->return_error.work);
3524 binder_send_failed_reply(in_reply_to, return_error);
3526 thread->return_error.cmd = return_error;
3527 binder_enqueue_thread_work(thread, &thread->return_error.work);
3531 static int binder_thread_write(struct binder_proc *proc,
3532 struct binder_thread *thread,
3533 binder_uintptr_t binder_buffer, size_t size,
3534 binder_size_t *consumed)
3537 struct binder_context *context = proc->context;
3538 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3539 void __user *ptr = buffer + *consumed;
3540 void __user *end = buffer + size;
3542 while (ptr < end && thread->return_error.cmd == BR_OK) {
3545 if (get_user(cmd, (uint32_t __user *)ptr))
3547 ptr += sizeof(uint32_t);
3548 trace_binder_command(cmd);
3549 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3550 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3551 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3552 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3560 const char *debug_string;
3561 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3562 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3563 struct binder_ref_data rdata;
3565 if (get_user(target, (uint32_t __user *)ptr))
3568 ptr += sizeof(uint32_t);
3570 if (increment && !target) {
3571 struct binder_node *ctx_mgr_node;
3572 mutex_lock(&context->context_mgr_node_lock);
3573 ctx_mgr_node = context->binder_context_mgr_node;
3575 ret = binder_inc_ref_for_node(
3577 strong, NULL, &rdata);
3578 mutex_unlock(&context->context_mgr_node_lock);
3581 ret = binder_update_ref_for_handle(
3582 proc, target, increment, strong,
3584 if (!ret && rdata.desc != target) {
3585 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3586 proc->pid, thread->pid,
3587 target, rdata.desc);
3591 debug_string = "IncRefs";
3594 debug_string = "Acquire";
3597 debug_string = "Release";
3601 debug_string = "DecRefs";
3605 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3606 proc->pid, thread->pid, debug_string,
3607 strong, target, ret);
3610 binder_debug(BINDER_DEBUG_USER_REFS,
3611 "%d:%d %s ref %d desc %d s %d w %d\n",
3612 proc->pid, thread->pid, debug_string,
3613 rdata.debug_id, rdata.desc, rdata.strong,
3617 case BC_INCREFS_DONE:
3618 case BC_ACQUIRE_DONE: {
3619 binder_uintptr_t node_ptr;
3620 binder_uintptr_t cookie;
3621 struct binder_node *node;
3624 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3626 ptr += sizeof(binder_uintptr_t);
3627 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3629 ptr += sizeof(binder_uintptr_t);
3630 node = binder_get_node(proc, node_ptr);
3632 binder_user_error("%d:%d %s u%016llx no match\n",
3633 proc->pid, thread->pid,
3634 cmd == BC_INCREFS_DONE ?
3640 if (cookie != node->cookie) {
3641 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3642 proc->pid, thread->pid,
3643 cmd == BC_INCREFS_DONE ?
3644 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3645 (u64)node_ptr, node->debug_id,
3646 (u64)cookie, (u64)node->cookie);
3647 binder_put_node(node);
3650 binder_node_inner_lock(node);
3651 if (cmd == BC_ACQUIRE_DONE) {
3652 if (node->pending_strong_ref == 0) {
3653 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3654 proc->pid, thread->pid,
3656 binder_node_inner_unlock(node);
3657 binder_put_node(node);
3660 node->pending_strong_ref = 0;
3662 if (node->pending_weak_ref == 0) {
3663 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3664 proc->pid, thread->pid,
3666 binder_node_inner_unlock(node);
3667 binder_put_node(node);
3670 node->pending_weak_ref = 0;
3672 free_node = binder_dec_node_nilocked(node,
3673 cmd == BC_ACQUIRE_DONE, 0);
3675 binder_debug(BINDER_DEBUG_USER_REFS,
3676 "%d:%d %s node %d ls %d lw %d tr %d\n",
3677 proc->pid, thread->pid,
3678 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3679 node->debug_id, node->local_strong_refs,
3680 node->local_weak_refs, node->tmp_refs);
3681 binder_node_inner_unlock(node);
3682 binder_put_node(node);
3685 case BC_ATTEMPT_ACQUIRE:
3686 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3688 case BC_ACQUIRE_RESULT:
3689 pr_err("BC_ACQUIRE_RESULT not supported\n");
3692 case BC_FREE_BUFFER: {
3693 binder_uintptr_t data_ptr;
3694 struct binder_buffer *buffer;
3696 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3698 ptr += sizeof(binder_uintptr_t);
3700 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3702 if (IS_ERR_OR_NULL(buffer)) {
3703 if (PTR_ERR(buffer) == -EPERM) {
3705 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3706 proc->pid, thread->pid,
3710 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3711 proc->pid, thread->pid,
3716 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3717 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3718 proc->pid, thread->pid, (u64)data_ptr,
3720 buffer->transaction ? "active" : "finished");
3722 binder_inner_proc_lock(proc);
3723 if (buffer->transaction) {
3724 buffer->transaction->buffer = NULL;
3725 buffer->transaction = NULL;
3727 binder_inner_proc_unlock(proc);
3728 if (buffer->async_transaction && buffer->target_node) {
3729 struct binder_node *buf_node;
3730 struct binder_work *w;
3732 buf_node = buffer->target_node;
3733 binder_node_inner_lock(buf_node);
3734 BUG_ON(!buf_node->has_async_transaction);
3735 BUG_ON(buf_node->proc != proc);
3736 w = binder_dequeue_work_head_ilocked(
3737 &buf_node->async_todo);
3739 buf_node->has_async_transaction = false;
3741 binder_enqueue_work_ilocked(
3743 binder_wakeup_proc_ilocked(proc);
3745 binder_node_inner_unlock(buf_node);
3747 trace_binder_transaction_buffer_release(buffer);
3748 binder_transaction_buffer_release(proc, buffer, NULL);
3749 binder_alloc_free_buf(&proc->alloc, buffer);
3753 case BC_TRANSACTION_SG:
3755 struct binder_transaction_data_sg tr;
3757 if (copy_from_user(&tr, ptr, sizeof(tr)))
3760 binder_transaction(proc, thread, &tr.transaction_data,
3761 cmd == BC_REPLY_SG, tr.buffers_size);
3764 case BC_TRANSACTION:
3766 struct binder_transaction_data tr;
3768 if (copy_from_user(&tr, ptr, sizeof(tr)))
3771 binder_transaction(proc, thread, &tr,
3772 cmd == BC_REPLY, 0);
3776 case BC_REGISTER_LOOPER:
3777 binder_debug(BINDER_DEBUG_THREADS,
3778 "%d:%d BC_REGISTER_LOOPER\n",
3779 proc->pid, thread->pid);
3780 binder_inner_proc_lock(proc);
3781 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3782 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3783 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3784 proc->pid, thread->pid);
3785 } else if (proc->requested_threads == 0) {
3786 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3787 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3788 proc->pid, thread->pid);
3790 proc->requested_threads--;
3791 proc->requested_threads_started++;
3793 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3794 binder_inner_proc_unlock(proc);
3796 case BC_ENTER_LOOPER:
3797 binder_debug(BINDER_DEBUG_THREADS,
3798 "%d:%d BC_ENTER_LOOPER\n",
3799 proc->pid, thread->pid);
3800 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3801 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3802 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3803 proc->pid, thread->pid);
3805 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3807 case BC_EXIT_LOOPER:
3808 binder_debug(BINDER_DEBUG_THREADS,
3809 "%d:%d BC_EXIT_LOOPER\n",
3810 proc->pid, thread->pid);
3811 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3814 case BC_REQUEST_DEATH_NOTIFICATION:
3815 case BC_CLEAR_DEATH_NOTIFICATION: {
3817 binder_uintptr_t cookie;
3818 struct binder_ref *ref;
3819 struct binder_ref_death *death = NULL;
3821 if (get_user(target, (uint32_t __user *)ptr))
3823 ptr += sizeof(uint32_t);
3824 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3826 ptr += sizeof(binder_uintptr_t);
3827 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3829 * Allocate memory for death notification
3830 * before taking lock
3832 death = kzalloc(sizeof(*death), GFP_KERNEL);
3833 if (death == NULL) {
3834 WARN_ON(thread->return_error.cmd !=
3836 thread->return_error.cmd = BR_ERROR;
3837 binder_enqueue_thread_work(
3839 &thread->return_error.work);
3841 BINDER_DEBUG_FAILED_TRANSACTION,
3842 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3843 proc->pid, thread->pid);
3847 binder_proc_lock(proc);
3848 ref = binder_get_ref_olocked(proc, target, false);
3850 binder_user_error("%d:%d %s invalid ref %d\n",
3851 proc->pid, thread->pid,
3852 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3853 "BC_REQUEST_DEATH_NOTIFICATION" :
3854 "BC_CLEAR_DEATH_NOTIFICATION",
3856 binder_proc_unlock(proc);
3861 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3862 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3863 proc->pid, thread->pid,
3864 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3865 "BC_REQUEST_DEATH_NOTIFICATION" :
3866 "BC_CLEAR_DEATH_NOTIFICATION",
3867 (u64)cookie, ref->data.debug_id,
3868 ref->data.desc, ref->data.strong,
3869 ref->data.weak, ref->node->debug_id);
3871 binder_node_lock(ref->node);
3872 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3874 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3875 proc->pid, thread->pid);
3876 binder_node_unlock(ref->node);
3877 binder_proc_unlock(proc);
3881 binder_stats_created(BINDER_STAT_DEATH);
3882 INIT_LIST_HEAD(&death->work.entry);
3883 death->cookie = cookie;
3885 if (ref->node->proc == NULL) {
3886 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3888 binder_inner_proc_lock(proc);
3889 binder_enqueue_work_ilocked(
3890 &ref->death->work, &proc->todo);
3891 binder_wakeup_proc_ilocked(proc);
3892 binder_inner_proc_unlock(proc);
3895 if (ref->death == NULL) {
3896 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3897 proc->pid, thread->pid);
3898 binder_node_unlock(ref->node);
3899 binder_proc_unlock(proc);
3903 if (death->cookie != cookie) {
3904 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3905 proc->pid, thread->pid,
3908 binder_node_unlock(ref->node);
3909 binder_proc_unlock(proc);
3913 binder_inner_proc_lock(proc);
3914 if (list_empty(&death->work.entry)) {
3915 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3916 if (thread->looper &
3917 (BINDER_LOOPER_STATE_REGISTERED |
3918 BINDER_LOOPER_STATE_ENTERED))
3919 binder_enqueue_thread_work_ilocked(
3923 binder_enqueue_work_ilocked(
3926 binder_wakeup_proc_ilocked(
3930 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3931 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3933 binder_inner_proc_unlock(proc);
3935 binder_node_unlock(ref->node);
3936 binder_proc_unlock(proc);
3938 case BC_DEAD_BINDER_DONE: {
3939 struct binder_work *w;
3940 binder_uintptr_t cookie;
3941 struct binder_ref_death *death = NULL;
3943 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3946 ptr += sizeof(cookie);
3947 binder_inner_proc_lock(proc);
3948 list_for_each_entry(w, &proc->delivered_death,
3950 struct binder_ref_death *tmp_death =
3952 struct binder_ref_death,
3955 if (tmp_death->cookie == cookie) {
3960 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3961 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3962 proc->pid, thread->pid, (u64)cookie,
3964 if (death == NULL) {
3965 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3966 proc->pid, thread->pid, (u64)cookie);
3967 binder_inner_proc_unlock(proc);
3970 binder_dequeue_work_ilocked(&death->work);
3971 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3972 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3973 if (thread->looper &
3974 (BINDER_LOOPER_STATE_REGISTERED |
3975 BINDER_LOOPER_STATE_ENTERED))
3976 binder_enqueue_thread_work_ilocked(
3977 thread, &death->work);
3979 binder_enqueue_work_ilocked(
3982 binder_wakeup_proc_ilocked(proc);
3985 binder_inner_proc_unlock(proc);
3989 pr_err("%d:%d unknown command %d\n",
3990 proc->pid, thread->pid, cmd);
3993 *consumed = ptr - buffer;
3998 static void binder_stat_br(struct binder_proc *proc,
3999 struct binder_thread *thread, uint32_t cmd)
4001 trace_binder_return(cmd);
4002 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4003 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4004 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4005 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4009 static int binder_put_node_cmd(struct binder_proc *proc,
4010 struct binder_thread *thread,
4012 binder_uintptr_t node_ptr,
4013 binder_uintptr_t node_cookie,
4015 uint32_t cmd, const char *cmd_name)
4017 void __user *ptr = *ptrp;
4019 if (put_user(cmd, (uint32_t __user *)ptr))
4021 ptr += sizeof(uint32_t);
4023 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4025 ptr += sizeof(binder_uintptr_t);
4027 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4029 ptr += sizeof(binder_uintptr_t);
4031 binder_stat_br(proc, thread, cmd);
4032 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4033 proc->pid, thread->pid, cmd_name, node_debug_id,
4034 (u64)node_ptr, (u64)node_cookie);
4040 static int binder_wait_for_work(struct binder_thread *thread,
4044 struct binder_proc *proc = thread->proc;
4047 freezer_do_not_count();
4048 binder_inner_proc_lock(proc);
4050 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4051 if (binder_has_work_ilocked(thread, do_proc_work))
4054 list_add(&thread->waiting_thread_node,
4055 &proc->waiting_threads);
4056 binder_inner_proc_unlock(proc);
4058 binder_inner_proc_lock(proc);
4059 list_del_init(&thread->waiting_thread_node);
4060 if (signal_pending(current)) {
4065 finish_wait(&thread->wait, &wait);
4066 binder_inner_proc_unlock(proc);
4072 static int binder_thread_read(struct binder_proc *proc,
4073 struct binder_thread *thread,
4074 binder_uintptr_t binder_buffer, size_t size,
4075 binder_size_t *consumed, int non_block)
4077 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4078 void __user *ptr = buffer + *consumed;
4079 void __user *end = buffer + size;
4082 int wait_for_proc_work;
4084 if (*consumed == 0) {
4085 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4087 ptr += sizeof(uint32_t);
4091 binder_inner_proc_lock(proc);
4092 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4093 binder_inner_proc_unlock(proc);
4095 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4097 trace_binder_wait_for_work(wait_for_proc_work,
4098 !!thread->transaction_stack,
4099 !binder_worklist_empty(proc, &thread->todo));
4100 if (wait_for_proc_work) {
4101 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4102 BINDER_LOOPER_STATE_ENTERED))) {
4103 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4104 proc->pid, thread->pid, thread->looper);
4105 wait_event_interruptible(binder_user_error_wait,
4106 binder_stop_on_user_error < 2);
4108 binder_restore_priority(current, proc->default_priority);
4112 if (!binder_has_work(thread, wait_for_proc_work))
4115 ret = binder_wait_for_work(thread, wait_for_proc_work);
4118 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4125 struct binder_transaction_data_secctx tr;
4126 struct binder_transaction_data *trd = &tr.transaction_data;
4127 struct binder_work *w = NULL;
4128 struct list_head *list = NULL;
4129 struct binder_transaction *t = NULL;
4130 struct binder_thread *t_from;
4131 size_t trsize = sizeof(*trd);
4133 binder_inner_proc_lock(proc);
4134 if (!binder_worklist_empty_ilocked(&thread->todo))
4135 list = &thread->todo;
4136 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4140 binder_inner_proc_unlock(proc);
4143 if (ptr - buffer == 4 && !thread->looper_need_return)
4148 if (end - ptr < sizeof(tr) + 4) {
4149 binder_inner_proc_unlock(proc);
4152 w = binder_dequeue_work_head_ilocked(list);
4153 if (binder_worklist_empty_ilocked(&thread->todo))
4154 thread->process_todo = false;
4157 case BINDER_WORK_TRANSACTION: {
4158 binder_inner_proc_unlock(proc);
4159 t = container_of(w, struct binder_transaction, work);
4161 case BINDER_WORK_RETURN_ERROR: {
4162 struct binder_error *e = container_of(
4163 w, struct binder_error, work);
4165 WARN_ON(e->cmd == BR_OK);
4166 binder_inner_proc_unlock(proc);
4167 if (put_user(e->cmd, (uint32_t __user *)ptr))
4171 ptr += sizeof(uint32_t);
4173 binder_stat_br(proc, thread, cmd);
4175 case BINDER_WORK_TRANSACTION_COMPLETE: {
4176 binder_inner_proc_unlock(proc);
4177 cmd = BR_TRANSACTION_COMPLETE;
4178 if (put_user(cmd, (uint32_t __user *)ptr))
4180 ptr += sizeof(uint32_t);
4182 binder_stat_br(proc, thread, cmd);
4183 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4184 "%d:%d BR_TRANSACTION_COMPLETE\n",
4185 proc->pid, thread->pid);
4187 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4189 case BINDER_WORK_NODE: {
4190 struct binder_node *node = container_of(w, struct binder_node, work);
4192 binder_uintptr_t node_ptr = node->ptr;
4193 binder_uintptr_t node_cookie = node->cookie;
4194 int node_debug_id = node->debug_id;
4197 void __user *orig_ptr = ptr;
4199 BUG_ON(proc != node->proc);
4200 strong = node->internal_strong_refs ||
4201 node->local_strong_refs;
4202 weak = !hlist_empty(&node->refs) ||
4203 node->local_weak_refs ||
4204 node->tmp_refs || strong;
4205 has_strong_ref = node->has_strong_ref;
4206 has_weak_ref = node->has_weak_ref;
4208 if (weak && !has_weak_ref) {
4209 node->has_weak_ref = 1;
4210 node->pending_weak_ref = 1;
4211 node->local_weak_refs++;
4213 if (strong && !has_strong_ref) {
4214 node->has_strong_ref = 1;
4215 node->pending_strong_ref = 1;
4216 node->local_strong_refs++;
4218 if (!strong && has_strong_ref)
4219 node->has_strong_ref = 0;
4220 if (!weak && has_weak_ref)
4221 node->has_weak_ref = 0;
4222 if (!weak && !strong) {
4223 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4224 "%d:%d node %d u%016llx c%016llx deleted\n",
4225 proc->pid, thread->pid,
4229 rb_erase(&node->rb_node, &proc->nodes);
4230 binder_inner_proc_unlock(proc);
4231 binder_node_lock(node);
4233 * Acquire the node lock before freeing the
4234 * node to serialize with other threads that
4235 * may have been holding the node lock while
4236 * decrementing this node (avoids race where
4237 * this thread frees while the other thread
4238 * is unlocking the node after the final
4241 binder_node_unlock(node);
4242 binder_free_node(node);
4244 binder_inner_proc_unlock(proc);
4246 if (weak && !has_weak_ref)
4247 ret = binder_put_node_cmd(
4248 proc, thread, &ptr, node_ptr,
4249 node_cookie, node_debug_id,
4250 BR_INCREFS, "BR_INCREFS");
4251 if (!ret && strong && !has_strong_ref)
4252 ret = binder_put_node_cmd(
4253 proc, thread, &ptr, node_ptr,
4254 node_cookie, node_debug_id,
4255 BR_ACQUIRE, "BR_ACQUIRE");
4256 if (!ret && !strong && has_strong_ref)
4257 ret = binder_put_node_cmd(
4258 proc, thread, &ptr, node_ptr,
4259 node_cookie, node_debug_id,
4260 BR_RELEASE, "BR_RELEASE");
4261 if (!ret && !weak && has_weak_ref)
4262 ret = binder_put_node_cmd(
4263 proc, thread, &ptr, node_ptr,
4264 node_cookie, node_debug_id,
4265 BR_DECREFS, "BR_DECREFS");
4266 if (orig_ptr == ptr)
4267 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4268 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4269 proc->pid, thread->pid,
4276 case BINDER_WORK_DEAD_BINDER:
4277 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4278 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4279 struct binder_ref_death *death;
4281 binder_uintptr_t cookie;
4283 death = container_of(w, struct binder_ref_death, work);
4284 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4285 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4287 cmd = BR_DEAD_BINDER;
4288 cookie = death->cookie;
4290 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4291 "%d:%d %s %016llx\n",
4292 proc->pid, thread->pid,
4293 cmd == BR_DEAD_BINDER ?
4295 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4297 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4298 binder_inner_proc_unlock(proc);
4300 binder_stats_deleted(BINDER_STAT_DEATH);
4302 binder_enqueue_work_ilocked(
4303 w, &proc->delivered_death);
4304 binder_inner_proc_unlock(proc);
4306 if (put_user(cmd, (uint32_t __user *)ptr))
4308 ptr += sizeof(uint32_t);
4309 if (put_user(cookie,
4310 (binder_uintptr_t __user *)ptr))
4312 ptr += sizeof(binder_uintptr_t);
4313 binder_stat_br(proc, thread, cmd);
4314 if (cmd == BR_DEAD_BINDER)
4315 goto done; /* DEAD_BINDER notifications can cause transactions */
4322 BUG_ON(t->buffer == NULL);
4323 if (t->buffer->target_node) {
4324 struct binder_node *target_node = t->buffer->target_node;
4325 struct binder_priority node_prio;
4327 trd->target.ptr = target_node->ptr;
4328 trd->cookie = target_node->cookie;
4329 node_prio.sched_policy = target_node->sched_policy;
4330 node_prio.prio = target_node->min_priority;
4331 binder_transaction_priority(current, t, node_prio,
4332 target_node->inherit_rt);
4333 cmd = BR_TRANSACTION;
4335 trd->target.ptr = 0;
4339 trd->code = t->code;
4340 trd->flags = t->flags;
4341 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4343 t_from = binder_get_txn_from(t);
4345 struct task_struct *sender = t_from->proc->tsk;
4348 task_tgid_nr_ns(sender,
4349 task_active_pid_ns(current));
4351 trd->sender_pid = 0;
4354 trd->data_size = t->buffer->data_size;
4355 trd->offsets_size = t->buffer->offsets_size;
4356 trd->data.ptr.buffer = (binder_uintptr_t)
4357 ((uintptr_t)t->buffer->data +
4358 binder_alloc_get_user_buffer_offset(&proc->alloc));
4359 trd->data.ptr.offsets = trd->data.ptr.buffer +
4360 ALIGN(t->buffer->data_size,
4363 tr.secctx = t->security_ctx;
4364 if (t->security_ctx) {
4365 cmd = BR_TRANSACTION_SEC_CTX;
4366 trsize = sizeof(tr);
4368 if (put_user(cmd, (uint32_t __user *)ptr)) {
4370 binder_thread_dec_tmpref(t_from);
4372 binder_cleanup_transaction(t, "put_user failed",
4377 ptr += sizeof(uint32_t);
4378 if (copy_to_user(ptr, &tr, trsize)) {
4380 binder_thread_dec_tmpref(t_from);
4382 binder_cleanup_transaction(t, "copy_to_user failed",
4389 trace_binder_transaction_received(t);
4390 binder_stat_br(proc, thread, cmd);
4391 binder_debug(BINDER_DEBUG_TRANSACTION,
4392 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4393 proc->pid, thread->pid,
4394 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4395 (cmd == BR_TRANSACTION_SEC_CTX) ?
4396 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4397 t->debug_id, t_from ? t_from->proc->pid : 0,
4398 t_from ? t_from->pid : 0, cmd,
4399 t->buffer->data_size, t->buffer->offsets_size,
4400 (u64)trd->data.ptr.buffer,
4401 (u64)trd->data.ptr.offsets);
4404 binder_thread_dec_tmpref(t_from);
4405 t->buffer->allow_user_free = 1;
4406 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4407 binder_inner_proc_lock(thread->proc);
4408 t->to_parent = thread->transaction_stack;
4409 t->to_thread = thread;
4410 thread->transaction_stack = t;
4411 binder_inner_proc_unlock(thread->proc);
4413 binder_free_transaction(t);
4420 *consumed = ptr - buffer;
4421 binder_inner_proc_lock(proc);
4422 if (proc->requested_threads == 0 &&
4423 list_empty(&thread->proc->waiting_threads) &&
4424 proc->requested_threads_started < proc->max_threads &&
4425 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4426 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4427 /*spawn a new thread if we leave this out */) {
4428 proc->requested_threads++;
4429 binder_inner_proc_unlock(proc);
4430 binder_debug(BINDER_DEBUG_THREADS,
4431 "%d:%d BR_SPAWN_LOOPER\n",
4432 proc->pid, thread->pid);
4433 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4435 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4437 binder_inner_proc_unlock(proc);
4441 static void binder_release_work(struct binder_proc *proc,
4442 struct list_head *list)
4444 struct binder_work *w;
4447 w = binder_dequeue_work_head(proc, list);
4452 case BINDER_WORK_TRANSACTION: {
4453 struct binder_transaction *t;
4455 t = container_of(w, struct binder_transaction, work);
4457 binder_cleanup_transaction(t, "process died.",
4460 case BINDER_WORK_RETURN_ERROR: {
4461 struct binder_error *e = container_of(
4462 w, struct binder_error, work);
4464 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4465 "undelivered TRANSACTION_ERROR: %u\n",
4468 case BINDER_WORK_TRANSACTION_COMPLETE: {
4469 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4470 "undelivered TRANSACTION_COMPLETE\n");
4472 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4474 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4475 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4476 struct binder_ref_death *death;
4478 death = container_of(w, struct binder_ref_death, work);
4479 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4480 "undelivered death notification, %016llx\n",
4481 (u64)death->cookie);
4483 binder_stats_deleted(BINDER_STAT_DEATH);
4486 pr_err("unexpected work type, %d, not freed\n",
4494 static struct binder_thread *binder_get_thread_ilocked(
4495 struct binder_proc *proc, struct binder_thread *new_thread)
4497 struct binder_thread *thread = NULL;
4498 struct rb_node *parent = NULL;
4499 struct rb_node **p = &proc->threads.rb_node;
4503 thread = rb_entry(parent, struct binder_thread, rb_node);
4505 if (current->pid < thread->pid)
4507 else if (current->pid > thread->pid)
4508 p = &(*p)->rb_right;
4514 thread = new_thread;
4515 binder_stats_created(BINDER_STAT_THREAD);
4516 thread->proc = proc;
4517 thread->pid = current->pid;
4518 get_task_struct(current);
4519 thread->task = current;
4520 atomic_set(&thread->tmp_ref, 0);
4521 init_waitqueue_head(&thread->wait);
4522 INIT_LIST_HEAD(&thread->todo);
4523 rb_link_node(&thread->rb_node, parent, p);
4524 rb_insert_color(&thread->rb_node, &proc->threads);
4525 thread->looper_need_return = true;
4526 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4527 thread->return_error.cmd = BR_OK;
4528 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4529 thread->reply_error.cmd = BR_OK;
4530 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4534 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4536 struct binder_thread *thread;
4537 struct binder_thread *new_thread;
4539 binder_inner_proc_lock(proc);
4540 thread = binder_get_thread_ilocked(proc, NULL);
4541 binder_inner_proc_unlock(proc);
4543 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4544 if (new_thread == NULL)
4546 binder_inner_proc_lock(proc);
4547 thread = binder_get_thread_ilocked(proc, new_thread);
4548 binder_inner_proc_unlock(proc);
4549 if (thread != new_thread)
4555 static void binder_free_proc(struct binder_proc *proc)
4557 BUG_ON(!list_empty(&proc->todo));
4558 BUG_ON(!list_empty(&proc->delivered_death));
4559 binder_alloc_deferred_release(&proc->alloc);
4560 put_task_struct(proc->tsk);
4561 binder_stats_deleted(BINDER_STAT_PROC);
4565 static void binder_free_thread(struct binder_thread *thread)
4567 BUG_ON(!list_empty(&thread->todo));
4568 binder_stats_deleted(BINDER_STAT_THREAD);
4569 binder_proc_dec_tmpref(thread->proc);
4570 put_task_struct(thread->task);
4574 static int binder_thread_release(struct binder_proc *proc,
4575 struct binder_thread *thread)
4577 struct binder_transaction *t;
4578 struct binder_transaction *send_reply = NULL;
4579 int active_transactions = 0;
4580 struct binder_transaction *last_t = NULL;
4582 binder_inner_proc_lock(thread->proc);
4584 * take a ref on the proc so it survives
4585 * after we remove this thread from proc->threads.
4586 * The corresponding dec is when we actually
4587 * free the thread in binder_free_thread()
4589 atomic_inc(&proc->tmp_ref);
4591 * take a ref on this thread to ensure it
4592 * survives while we are releasing it
4594 atomic_inc(&thread->tmp_ref);
4595 rb_erase(&thread->rb_node, &proc->threads);
4596 t = thread->transaction_stack;
4598 spin_lock(&t->lock);
4599 if (t->to_thread == thread)
4602 thread->is_dead = true;
4606 active_transactions++;
4607 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4608 "release %d:%d transaction %d %s, still active\n",
4609 proc->pid, thread->pid,
4611 (t->to_thread == thread) ? "in" : "out");
4613 if (t->to_thread == thread) {
4615 t->to_thread = NULL;
4617 t->buffer->transaction = NULL;
4621 } else if (t->from == thread) {
4626 spin_unlock(&last_t->lock);
4628 spin_lock(&t->lock);
4632 * If this thread used poll, make sure we remove the waitqueue
4633 * from any epoll data structures holding it with POLLFREE.
4634 * waitqueue_active() is safe to use here because we're holding
4637 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4638 waitqueue_active(&thread->wait)) {
4639 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4642 binder_inner_proc_unlock(thread->proc);
4645 * This is needed to avoid races between wake_up_poll() above and
4646 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4647 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4648 * lock, so we can be sure it's done after calling synchronize_rcu().
4650 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4654 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4655 binder_release_work(proc, &thread->todo);
4656 binder_thread_dec_tmpref(thread);
4657 return active_transactions;
4660 static unsigned int binder_poll(struct file *filp,
4661 struct poll_table_struct *wait)
4663 struct binder_proc *proc = filp->private_data;
4664 struct binder_thread *thread = NULL;
4665 bool wait_for_proc_work;
4667 thread = binder_get_thread(proc);
4671 binder_inner_proc_lock(thread->proc);
4672 thread->looper |= BINDER_LOOPER_STATE_POLL;
4673 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4675 binder_inner_proc_unlock(thread->proc);
4677 poll_wait(filp, &thread->wait, wait);
4679 if (binder_has_work(thread, wait_for_proc_work))
4685 static int binder_ioctl_write_read(struct file *filp,
4686 unsigned int cmd, unsigned long arg,
4687 struct binder_thread *thread)
4690 struct binder_proc *proc = filp->private_data;
4691 unsigned int size = _IOC_SIZE(cmd);
4692 void __user *ubuf = (void __user *)arg;
4693 struct binder_write_read bwr;
4695 if (size != sizeof(struct binder_write_read)) {
4699 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4703 binder_debug(BINDER_DEBUG_READ_WRITE,
4704 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4705 proc->pid, thread->pid,
4706 (u64)bwr.write_size, (u64)bwr.write_buffer,
4707 (u64)bwr.read_size, (u64)bwr.read_buffer);
4709 if (bwr.write_size > 0) {
4710 ret = binder_thread_write(proc, thread,
4713 &bwr.write_consumed);
4714 trace_binder_write_done(ret);
4716 bwr.read_consumed = 0;
4717 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4722 if (bwr.read_size > 0) {
4723 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4726 filp->f_flags & O_NONBLOCK);
4727 trace_binder_read_done(ret);
4728 binder_inner_proc_lock(proc);
4729 if (!binder_worklist_empty_ilocked(&proc->todo))
4730 binder_wakeup_proc_ilocked(proc);
4731 binder_inner_proc_unlock(proc);
4733 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4738 binder_debug(BINDER_DEBUG_READ_WRITE,
4739 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4740 proc->pid, thread->pid,
4741 (u64)bwr.write_consumed, (u64)bwr.write_size,
4742 (u64)bwr.read_consumed, (u64)bwr.read_size);
4743 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4751 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4752 struct flat_binder_object *fbo)
4755 struct binder_proc *proc = filp->private_data;
4756 struct binder_context *context = proc->context;
4757 struct binder_node *new_node;
4758 kuid_t curr_euid = current_euid();
4760 mutex_lock(&context->context_mgr_node_lock);
4761 if (context->binder_context_mgr_node) {
4762 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4766 ret = security_binder_set_context_mgr(proc->tsk);
4769 if (uid_valid(context->binder_context_mgr_uid)) {
4770 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4771 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4772 from_kuid(&init_user_ns, curr_euid),
4773 from_kuid(&init_user_ns,
4774 context->binder_context_mgr_uid));
4779 context->binder_context_mgr_uid = curr_euid;
4781 new_node = binder_new_node(proc, fbo);
4786 binder_node_lock(new_node);
4787 new_node->local_weak_refs++;
4788 new_node->local_strong_refs++;
4789 new_node->has_strong_ref = 1;
4790 new_node->has_weak_ref = 1;
4791 context->binder_context_mgr_node = new_node;
4792 binder_node_unlock(new_node);
4793 binder_put_node(new_node);
4795 mutex_unlock(&context->context_mgr_node_lock);
4799 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4800 struct binder_node_info_for_ref *info)
4802 struct binder_node *node;
4803 struct binder_context *context = proc->context;
4804 __u32 handle = info->handle;
4806 if (info->strong_count || info->weak_count || info->reserved1 ||
4807 info->reserved2 || info->reserved3) {
4808 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4813 /* This ioctl may only be used by the context manager */
4814 mutex_lock(&context->context_mgr_node_lock);
4815 if (!context->binder_context_mgr_node ||
4816 context->binder_context_mgr_node->proc != proc) {
4817 mutex_unlock(&context->context_mgr_node_lock);
4820 mutex_unlock(&context->context_mgr_node_lock);
4822 node = binder_get_node_from_ref(proc, handle, true, NULL);
4826 info->strong_count = node->local_strong_refs +
4827 node->internal_strong_refs;
4828 info->weak_count = node->local_weak_refs;
4830 binder_put_node(node);
4835 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4836 struct binder_node_debug_info *info) {
4838 binder_uintptr_t ptr = info->ptr;
4840 memset(info, 0, sizeof(*info));
4842 binder_inner_proc_lock(proc);
4843 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4844 struct binder_node *node = rb_entry(n, struct binder_node,
4846 if (node->ptr > ptr) {
4847 info->ptr = node->ptr;
4848 info->cookie = node->cookie;
4849 info->has_strong_ref = node->has_strong_ref;
4850 info->has_weak_ref = node->has_weak_ref;
4854 binder_inner_proc_unlock(proc);
4859 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4862 struct binder_proc *proc = filp->private_data;
4863 struct binder_thread *thread;
4864 unsigned int size = _IOC_SIZE(cmd);
4865 void __user *ubuf = (void __user *)arg;
4867 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4868 proc->pid, current->pid, cmd, arg);*/
4870 binder_selftest_alloc(&proc->alloc);
4872 trace_binder_ioctl(cmd, arg);
4874 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4878 thread = binder_get_thread(proc);
4879 if (thread == NULL) {
4885 case BINDER_WRITE_READ:
4886 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4890 case BINDER_SET_MAX_THREADS: {
4893 if (copy_from_user(&max_threads, ubuf,
4894 sizeof(max_threads))) {
4898 binder_inner_proc_lock(proc);
4899 proc->max_threads = max_threads;
4900 binder_inner_proc_unlock(proc);
4903 case BINDER_SET_CONTEXT_MGR_EXT: {
4904 struct flat_binder_object fbo;
4906 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4910 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4915 case BINDER_SET_CONTEXT_MGR:
4916 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4920 case BINDER_THREAD_EXIT:
4921 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4922 proc->pid, thread->pid);
4923 binder_thread_release(proc, thread);
4926 case BINDER_VERSION: {
4927 struct binder_version __user *ver = ubuf;
4929 if (size != sizeof(struct binder_version)) {
4933 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4934 &ver->protocol_version)) {
4940 case BINDER_GET_NODE_INFO_FOR_REF: {
4941 struct binder_node_info_for_ref info;
4943 if (copy_from_user(&info, ubuf, sizeof(info))) {
4948 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4952 if (copy_to_user(ubuf, &info, sizeof(info))) {
4959 case BINDER_GET_NODE_DEBUG_INFO: {
4960 struct binder_node_debug_info info;
4962 if (copy_from_user(&info, ubuf, sizeof(info))) {
4967 ret = binder_ioctl_get_node_debug_info(proc, &info);
4971 if (copy_to_user(ubuf, &info, sizeof(info))) {
4984 thread->looper_need_return = false;
4985 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4986 if (ret && ret != -ERESTARTSYS)
4987 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4989 trace_binder_ioctl_done(ret);
4993 static void binder_vma_open(struct vm_area_struct *vma)
4995 struct binder_proc *proc = vma->vm_private_data;
4997 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4998 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4999 proc->pid, vma->vm_start, vma->vm_end,
5000 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5001 (unsigned long)pgprot_val(vma->vm_page_prot));
5004 static void binder_vma_close(struct vm_area_struct *vma)
5006 struct binder_proc *proc = vma->vm_private_data;
5008 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5009 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5010 proc->pid, vma->vm_start, vma->vm_end,
5011 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5012 (unsigned long)pgprot_val(vma->vm_page_prot));
5013 binder_alloc_vma_close(&proc->alloc);
5014 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
5017 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5019 return VM_FAULT_SIGBUS;
5022 static const struct vm_operations_struct binder_vm_ops = {
5023 .open = binder_vma_open,
5024 .close = binder_vma_close,
5025 .fault = binder_vm_fault,
5028 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5031 struct binder_proc *proc = filp->private_data;
5032 const char *failure_string;
5034 if (proc->tsk != current->group_leader)
5037 if ((vma->vm_end - vma->vm_start) > SZ_4M)
5038 vma->vm_end = vma->vm_start + SZ_4M;
5040 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5041 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5042 __func__, proc->pid, vma->vm_start, vma->vm_end,
5043 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5044 (unsigned long)pgprot_val(vma->vm_page_prot));
5046 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5048 failure_string = "bad vm_flags";
5051 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5052 vma->vm_flags &= ~VM_MAYWRITE;
5054 vma->vm_ops = &binder_vm_ops;
5055 vma->vm_private_data = proc;
5057 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5060 mutex_lock(&proc->files_lock);
5061 proc->files = get_files_struct(current);
5062 mutex_unlock(&proc->files_lock);
5066 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5067 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5071 static int binder_open(struct inode *nodp, struct file *filp)
5073 struct binder_proc *proc;
5074 struct binder_device *binder_dev;
5076 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5077 current->group_leader->pid, current->pid);
5079 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5082 spin_lock_init(&proc->inner_lock);
5083 spin_lock_init(&proc->outer_lock);
5084 atomic_set(&proc->tmp_ref, 0);
5085 get_task_struct(current->group_leader);
5086 proc->tsk = current->group_leader;
5087 mutex_init(&proc->files_lock);
5088 INIT_LIST_HEAD(&proc->todo);
5089 if (binder_supported_policy(current->policy)) {
5090 proc->default_priority.sched_policy = current->policy;
5091 proc->default_priority.prio = current->normal_prio;
5093 proc->default_priority.sched_policy = SCHED_NORMAL;
5094 proc->default_priority.prio = NICE_TO_PRIO(0);
5097 binder_dev = container_of(filp->private_data, struct binder_device,
5099 proc->context = &binder_dev->context;
5100 binder_alloc_init(&proc->alloc);
5102 binder_stats_created(BINDER_STAT_PROC);
5103 proc->pid = current->group_leader->pid;
5104 INIT_LIST_HEAD(&proc->delivered_death);
5105 INIT_LIST_HEAD(&proc->waiting_threads);
5106 filp->private_data = proc;
5108 mutex_lock(&binder_procs_lock);
5109 hlist_add_head(&proc->proc_node, &binder_procs);
5110 mutex_unlock(&binder_procs_lock);
5112 if (binder_debugfs_dir_entry_proc) {
5115 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5117 * proc debug entries are shared between contexts, so
5118 * this will fail if the process tries to open the driver
5119 * again with a different context. The priting code will
5120 * anyway print all contexts that a given PID has, so this
5123 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5124 binder_debugfs_dir_entry_proc,
5125 (void *)(unsigned long)proc->pid,
5132 static int binder_flush(struct file *filp, fl_owner_t id)
5134 struct binder_proc *proc = filp->private_data;
5136 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5141 static void binder_deferred_flush(struct binder_proc *proc)
5146 binder_inner_proc_lock(proc);
5147 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5148 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5150 thread->looper_need_return = true;
5151 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5152 wake_up_interruptible(&thread->wait);
5156 binder_inner_proc_unlock(proc);
5158 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5159 "binder_flush: %d woke %d threads\n", proc->pid,
5163 static int binder_release(struct inode *nodp, struct file *filp)
5165 struct binder_proc *proc = filp->private_data;
5167 debugfs_remove(proc->debugfs_entry);
5168 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5173 static int binder_node_release(struct binder_node *node, int refs)
5175 struct binder_ref *ref;
5177 struct binder_proc *proc = node->proc;
5179 binder_release_work(proc, &node->async_todo);
5181 binder_node_lock(node);
5182 binder_inner_proc_lock(proc);
5183 binder_dequeue_work_ilocked(&node->work);
5185 * The caller must have taken a temporary ref on the node,
5187 BUG_ON(!node->tmp_refs);
5188 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5189 binder_inner_proc_unlock(proc);
5190 binder_node_unlock(node);
5191 binder_free_node(node);
5197 node->local_strong_refs = 0;
5198 node->local_weak_refs = 0;
5199 binder_inner_proc_unlock(proc);
5201 spin_lock(&binder_dead_nodes_lock);
5202 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5203 spin_unlock(&binder_dead_nodes_lock);
5205 hlist_for_each_entry(ref, &node->refs, node_entry) {
5208 * Need the node lock to synchronize
5209 * with new notification requests and the
5210 * inner lock to synchronize with queued
5211 * death notifications.
5213 binder_inner_proc_lock(ref->proc);
5215 binder_inner_proc_unlock(ref->proc);
5221 BUG_ON(!list_empty(&ref->death->work.entry));
5222 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5223 binder_enqueue_work_ilocked(&ref->death->work,
5225 binder_wakeup_proc_ilocked(ref->proc);
5226 binder_inner_proc_unlock(ref->proc);
5229 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5230 "node %d now dead, refs %d, death %d\n",
5231 node->debug_id, refs, death);
5232 binder_node_unlock(node);
5233 binder_put_node(node);
5238 static void binder_deferred_release(struct binder_proc *proc)
5240 struct binder_context *context = proc->context;
5242 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5244 BUG_ON(proc->files);
5246 mutex_lock(&binder_procs_lock);
5247 hlist_del(&proc->proc_node);
5248 mutex_unlock(&binder_procs_lock);
5250 mutex_lock(&context->context_mgr_node_lock);
5251 if (context->binder_context_mgr_node &&
5252 context->binder_context_mgr_node->proc == proc) {
5253 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5254 "%s: %d context_mgr_node gone\n",
5255 __func__, proc->pid);
5256 context->binder_context_mgr_node = NULL;
5258 mutex_unlock(&context->context_mgr_node_lock);
5259 binder_inner_proc_lock(proc);
5261 * Make sure proc stays alive after we
5262 * remove all the threads
5264 atomic_inc(&proc->tmp_ref);
5266 proc->is_dead = true;
5268 active_transactions = 0;
5269 while ((n = rb_first(&proc->threads))) {
5270 struct binder_thread *thread;
5272 thread = rb_entry(n, struct binder_thread, rb_node);
5273 binder_inner_proc_unlock(proc);
5275 active_transactions += binder_thread_release(proc, thread);
5276 binder_inner_proc_lock(proc);
5281 while ((n = rb_first(&proc->nodes))) {
5282 struct binder_node *node;
5284 node = rb_entry(n, struct binder_node, rb_node);
5287 * take a temporary ref on the node before
5288 * calling binder_node_release() which will either
5289 * kfree() the node or call binder_put_node()
5291 binder_inc_node_tmpref_ilocked(node);
5292 rb_erase(&node->rb_node, &proc->nodes);
5293 binder_inner_proc_unlock(proc);
5294 incoming_refs = binder_node_release(node, incoming_refs);
5295 binder_inner_proc_lock(proc);
5297 binder_inner_proc_unlock(proc);
5300 binder_proc_lock(proc);
5301 while ((n = rb_first(&proc->refs_by_desc))) {
5302 struct binder_ref *ref;
5304 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5306 binder_cleanup_ref_olocked(ref);
5307 binder_proc_unlock(proc);
5308 binder_free_ref(ref);
5309 binder_proc_lock(proc);
5311 binder_proc_unlock(proc);
5313 binder_release_work(proc, &proc->todo);
5314 binder_release_work(proc, &proc->delivered_death);
5316 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5317 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5318 __func__, proc->pid, threads, nodes, incoming_refs,
5319 outgoing_refs, active_transactions);
5321 binder_proc_dec_tmpref(proc);
5324 static void binder_deferred_func(struct work_struct *work)
5326 struct binder_proc *proc;
5327 struct files_struct *files;
5332 mutex_lock(&binder_deferred_lock);
5333 if (!hlist_empty(&binder_deferred_list)) {
5334 proc = hlist_entry(binder_deferred_list.first,
5335 struct binder_proc, deferred_work_node);
5336 hlist_del_init(&proc->deferred_work_node);
5337 defer = proc->deferred_work;
5338 proc->deferred_work = 0;
5343 mutex_unlock(&binder_deferred_lock);
5346 if (defer & BINDER_DEFERRED_PUT_FILES) {
5347 mutex_lock(&proc->files_lock);
5348 files = proc->files;
5351 mutex_unlock(&proc->files_lock);
5354 if (defer & BINDER_DEFERRED_FLUSH)
5355 binder_deferred_flush(proc);
5357 if (defer & BINDER_DEFERRED_RELEASE)
5358 binder_deferred_release(proc); /* frees proc */
5361 put_files_struct(files);
5364 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5367 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5369 mutex_lock(&binder_deferred_lock);
5370 proc->deferred_work |= defer;
5371 if (hlist_unhashed(&proc->deferred_work_node)) {
5372 hlist_add_head(&proc->deferred_work_node,
5373 &binder_deferred_list);
5374 queue_work(binder_deferred_workqueue, &binder_deferred_work);
5376 mutex_unlock(&binder_deferred_lock);
5379 static void print_binder_transaction_ilocked(struct seq_file *m,
5380 struct binder_proc *proc,
5382 struct binder_transaction *t)
5384 struct binder_proc *to_proc;
5385 struct binder_buffer *buffer = t->buffer;
5387 spin_lock(&t->lock);
5388 to_proc = t->to_proc;
5390 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5391 prefix, t->debug_id, t,
5392 t->from ? t->from->proc->pid : 0,
5393 t->from ? t->from->pid : 0,
5394 to_proc ? to_proc->pid : 0,
5395 t->to_thread ? t->to_thread->pid : 0,
5396 t->code, t->flags, t->priority.sched_policy,
5397 t->priority.prio, t->need_reply);
5398 spin_unlock(&t->lock);
5400 if (proc != to_proc) {
5402 * Can only safely deref buffer if we are holding the
5403 * correct proc inner lock for this node
5409 if (buffer == NULL) {
5410 seq_puts(m, " buffer free\n");
5413 if (buffer->target_node)
5414 seq_printf(m, " node %d", buffer->target_node->debug_id);
5415 seq_printf(m, " size %zd:%zd data %pK\n",
5416 buffer->data_size, buffer->offsets_size,
5420 static void print_binder_work_ilocked(struct seq_file *m,
5421 struct binder_proc *proc,
5423 const char *transaction_prefix,
5424 struct binder_work *w)
5426 struct binder_node *node;
5427 struct binder_transaction *t;
5430 case BINDER_WORK_TRANSACTION:
5431 t = container_of(w, struct binder_transaction, work);
5432 print_binder_transaction_ilocked(
5433 m, proc, transaction_prefix, t);
5435 case BINDER_WORK_RETURN_ERROR: {
5436 struct binder_error *e = container_of(
5437 w, struct binder_error, work);
5439 seq_printf(m, "%stransaction error: %u\n",
5442 case BINDER_WORK_TRANSACTION_COMPLETE:
5443 seq_printf(m, "%stransaction complete\n", prefix);
5445 case BINDER_WORK_NODE:
5446 node = container_of(w, struct binder_node, work);
5447 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5448 prefix, node->debug_id,
5449 (u64)node->ptr, (u64)node->cookie);
5451 case BINDER_WORK_DEAD_BINDER:
5452 seq_printf(m, "%shas dead binder\n", prefix);
5454 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5455 seq_printf(m, "%shas cleared dead binder\n", prefix);
5457 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5458 seq_printf(m, "%shas cleared death notification\n", prefix);
5461 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5466 static void print_binder_thread_ilocked(struct seq_file *m,
5467 struct binder_thread *thread,
5470 struct binder_transaction *t;
5471 struct binder_work *w;
5472 size_t start_pos = m->count;
5475 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5476 thread->pid, thread->looper,
5477 thread->looper_need_return,
5478 atomic_read(&thread->tmp_ref));
5479 header_pos = m->count;
5480 t = thread->transaction_stack;
5482 if (t->from == thread) {
5483 print_binder_transaction_ilocked(m, thread->proc,
5484 " outgoing transaction", t);
5486 } else if (t->to_thread == thread) {
5487 print_binder_transaction_ilocked(m, thread->proc,
5488 " incoming transaction", t);
5491 print_binder_transaction_ilocked(m, thread->proc,
5492 " bad transaction", t);
5496 list_for_each_entry(w, &thread->todo, entry) {
5497 print_binder_work_ilocked(m, thread->proc, " ",
5498 " pending transaction", w);
5500 if (!print_always && m->count == header_pos)
5501 m->count = start_pos;
5504 static void print_binder_node_nilocked(struct seq_file *m,
5505 struct binder_node *node)
5507 struct binder_ref *ref;
5508 struct binder_work *w;
5512 hlist_for_each_entry(ref, &node->refs, node_entry)
5515 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5516 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5517 node->sched_policy, node->min_priority,
5518 node->has_strong_ref, node->has_weak_ref,
5519 node->local_strong_refs, node->local_weak_refs,
5520 node->internal_strong_refs, count, node->tmp_refs);
5522 seq_puts(m, " proc");
5523 hlist_for_each_entry(ref, &node->refs, node_entry)
5524 seq_printf(m, " %d", ref->proc->pid);
5528 list_for_each_entry(w, &node->async_todo, entry)
5529 print_binder_work_ilocked(m, node->proc, " ",
5530 " pending async transaction", w);
5534 static void print_binder_ref_olocked(struct seq_file *m,
5535 struct binder_ref *ref)
5537 binder_node_lock(ref->node);
5538 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5539 ref->data.debug_id, ref->data.desc,
5540 ref->node->proc ? "" : "dead ",
5541 ref->node->debug_id, ref->data.strong,
5542 ref->data.weak, ref->death);
5543 binder_node_unlock(ref->node);
5546 static void print_binder_proc(struct seq_file *m,
5547 struct binder_proc *proc, int print_all)
5549 struct binder_work *w;
5551 size_t start_pos = m->count;
5553 struct binder_node *last_node = NULL;
5555 seq_printf(m, "proc %d\n", proc->pid);
5556 seq_printf(m, "context %s\n", proc->context->name);
5557 header_pos = m->count;
5559 binder_inner_proc_lock(proc);
5560 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5561 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5562 rb_node), print_all);
5564 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5565 struct binder_node *node = rb_entry(n, struct binder_node,
5567 if (!print_all && !node->has_async_transaction)
5571 * take a temporary reference on the node so it
5572 * survives and isn't removed from the tree
5573 * while we print it.
5575 binder_inc_node_tmpref_ilocked(node);
5576 /* Need to drop inner lock to take node lock */
5577 binder_inner_proc_unlock(proc);
5579 binder_put_node(last_node);
5580 binder_node_inner_lock(node);
5581 print_binder_node_nilocked(m, node);
5582 binder_node_inner_unlock(node);
5584 binder_inner_proc_lock(proc);
5586 binder_inner_proc_unlock(proc);
5588 binder_put_node(last_node);
5591 binder_proc_lock(proc);
5592 for (n = rb_first(&proc->refs_by_desc);
5595 print_binder_ref_olocked(m, rb_entry(n,
5598 binder_proc_unlock(proc);
5600 binder_alloc_print_allocated(m, &proc->alloc);
5601 binder_inner_proc_lock(proc);
5602 list_for_each_entry(w, &proc->todo, entry)
5603 print_binder_work_ilocked(m, proc, " ",
5604 " pending transaction", w);
5605 list_for_each_entry(w, &proc->delivered_death, entry) {
5606 seq_puts(m, " has delivered dead binder\n");
5609 binder_inner_proc_unlock(proc);
5610 if (!print_all && m->count == header_pos)
5611 m->count = start_pos;
5614 static const char * const binder_return_strings[] = {
5619 "BR_ACQUIRE_RESULT",
5621 "BR_TRANSACTION_COMPLETE",
5626 "BR_ATTEMPT_ACQUIRE",
5631 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5635 static const char * const binder_command_strings[] = {
5638 "BC_ACQUIRE_RESULT",
5646 "BC_ATTEMPT_ACQUIRE",
5647 "BC_REGISTER_LOOPER",
5650 "BC_REQUEST_DEATH_NOTIFICATION",
5651 "BC_CLEAR_DEATH_NOTIFICATION",
5652 "BC_DEAD_BINDER_DONE",
5653 "BC_TRANSACTION_SG",
5657 static const char * const binder_objstat_strings[] = {
5664 "transaction_complete"
5667 static void print_binder_stats(struct seq_file *m, const char *prefix,
5668 struct binder_stats *stats)
5672 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5673 ARRAY_SIZE(binder_command_strings));
5674 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5675 int temp = atomic_read(&stats->bc[i]);
5678 seq_printf(m, "%s%s: %d\n", prefix,
5679 binder_command_strings[i], temp);
5682 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5683 ARRAY_SIZE(binder_return_strings));
5684 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5685 int temp = atomic_read(&stats->br[i]);
5688 seq_printf(m, "%s%s: %d\n", prefix,
5689 binder_return_strings[i], temp);
5692 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5693 ARRAY_SIZE(binder_objstat_strings));
5694 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5695 ARRAY_SIZE(stats->obj_deleted));
5696 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5697 int created = atomic_read(&stats->obj_created[i]);
5698 int deleted = atomic_read(&stats->obj_deleted[i]);
5700 if (created || deleted)
5701 seq_printf(m, "%s%s: active %d total %d\n",
5703 binder_objstat_strings[i],
5709 static void print_binder_proc_stats(struct seq_file *m,
5710 struct binder_proc *proc)
5712 struct binder_work *w;
5713 struct binder_thread *thread;
5715 int count, strong, weak, ready_threads;
5716 size_t free_async_space =
5717 binder_alloc_get_free_async_space(&proc->alloc);
5719 seq_printf(m, "proc %d\n", proc->pid);
5720 seq_printf(m, "context %s\n", proc->context->name);
5723 binder_inner_proc_lock(proc);
5724 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5727 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5730 seq_printf(m, " threads: %d\n", count);
5731 seq_printf(m, " requested threads: %d+%d/%d\n"
5732 " ready threads %d\n"
5733 " free async space %zd\n", proc->requested_threads,
5734 proc->requested_threads_started, proc->max_threads,
5738 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5740 binder_inner_proc_unlock(proc);
5741 seq_printf(m, " nodes: %d\n", count);
5745 binder_proc_lock(proc);
5746 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5747 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5750 strong += ref->data.strong;
5751 weak += ref->data.weak;
5753 binder_proc_unlock(proc);
5754 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5756 count = binder_alloc_get_allocated_count(&proc->alloc);
5757 seq_printf(m, " buffers: %d\n", count);
5759 binder_alloc_print_pages(m, &proc->alloc);
5762 binder_inner_proc_lock(proc);
5763 list_for_each_entry(w, &proc->todo, entry) {
5764 if (w->type == BINDER_WORK_TRANSACTION)
5767 binder_inner_proc_unlock(proc);
5768 seq_printf(m, " pending transactions: %d\n", count);
5770 print_binder_stats(m, " ", &proc->stats);
5774 static int binder_state_show(struct seq_file *m, void *unused)
5776 struct binder_proc *proc;
5777 struct binder_node *node;
5778 struct binder_node *last_node = NULL;
5780 seq_puts(m, "binder state:\n");
5782 spin_lock(&binder_dead_nodes_lock);
5783 if (!hlist_empty(&binder_dead_nodes))
5784 seq_puts(m, "dead nodes:\n");
5785 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5787 * take a temporary reference on the node so it
5788 * survives and isn't removed from the list
5789 * while we print it.
5792 spin_unlock(&binder_dead_nodes_lock);
5794 binder_put_node(last_node);
5795 binder_node_lock(node);
5796 print_binder_node_nilocked(m, node);
5797 binder_node_unlock(node);
5799 spin_lock(&binder_dead_nodes_lock);
5801 spin_unlock(&binder_dead_nodes_lock);
5803 binder_put_node(last_node);
5805 mutex_lock(&binder_procs_lock);
5806 hlist_for_each_entry(proc, &binder_procs, proc_node)
5807 print_binder_proc(m, proc, 1);
5808 mutex_unlock(&binder_procs_lock);
5813 static int binder_stats_show(struct seq_file *m, void *unused)
5815 struct binder_proc *proc;
5817 seq_puts(m, "binder stats:\n");
5819 print_binder_stats(m, "", &binder_stats);
5821 mutex_lock(&binder_procs_lock);
5822 hlist_for_each_entry(proc, &binder_procs, proc_node)
5823 print_binder_proc_stats(m, proc);
5824 mutex_unlock(&binder_procs_lock);
5829 static int binder_transactions_show(struct seq_file *m, void *unused)
5831 struct binder_proc *proc;
5833 seq_puts(m, "binder transactions:\n");
5834 mutex_lock(&binder_procs_lock);
5835 hlist_for_each_entry(proc, &binder_procs, proc_node)
5836 print_binder_proc(m, proc, 0);
5837 mutex_unlock(&binder_procs_lock);
5842 static int binder_proc_show(struct seq_file *m, void *unused)
5844 struct binder_proc *itr;
5845 int pid = (unsigned long)m->private;
5847 mutex_lock(&binder_procs_lock);
5848 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5849 if (itr->pid == pid) {
5850 seq_puts(m, "binder proc state:\n");
5851 print_binder_proc(m, itr, 1);
5854 mutex_unlock(&binder_procs_lock);
5859 static void print_binder_transaction_log_entry(struct seq_file *m,
5860 struct binder_transaction_log_entry *e)
5862 int debug_id = READ_ONCE(e->debug_id_done);
5864 * read barrier to guarantee debug_id_done read before
5865 * we print the log values
5869 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5870 e->debug_id, (e->call_type == 2) ? "reply" :
5871 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5872 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5873 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5874 e->return_error, e->return_error_param,
5875 e->return_error_line);
5877 * read-barrier to guarantee read of debug_id_done after
5878 * done printing the fields of the entry
5881 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5882 "\n" : " (incomplete)\n");
5885 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5887 struct binder_transaction_log *log = m->private;
5888 unsigned int log_cur = atomic_read(&log->cur);
5893 count = log_cur + 1;
5894 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5895 0 : count % ARRAY_SIZE(log->entry);
5896 if (count > ARRAY_SIZE(log->entry) || log->full)
5897 count = ARRAY_SIZE(log->entry);
5898 for (i = 0; i < count; i++) {
5899 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5901 print_binder_transaction_log_entry(m, &log->entry[index]);
5906 static const struct file_operations binder_fops = {
5907 .owner = THIS_MODULE,
5908 .poll = binder_poll,
5909 .unlocked_ioctl = binder_ioctl,
5910 .compat_ioctl = binder_ioctl,
5911 .mmap = binder_mmap,
5912 .open = binder_open,
5913 .flush = binder_flush,
5914 .release = binder_release,
5917 BINDER_DEBUG_ENTRY(state);
5918 BINDER_DEBUG_ENTRY(stats);
5919 BINDER_DEBUG_ENTRY(transactions);
5920 BINDER_DEBUG_ENTRY(transaction_log);
5922 static int __init init_binder_device(const char *name)
5925 struct binder_device *binder_device;
5927 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5931 binder_device->miscdev.fops = &binder_fops;
5932 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5933 binder_device->miscdev.name = name;
5935 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5936 binder_device->context.name = name;
5937 mutex_init(&binder_device->context.context_mgr_node_lock);
5939 ret = misc_register(&binder_device->miscdev);
5941 kfree(binder_device);
5945 hlist_add_head(&binder_device->hlist, &binder_devices);
5950 static int __init binder_init(void)
5953 char *device_name, *device_names, *device_tmp;
5954 struct binder_device *device;
5955 struct hlist_node *tmp;
5957 ret = binder_alloc_shrinker_init();
5961 atomic_set(&binder_transaction_log.cur, ~0U);
5962 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5963 binder_deferred_workqueue = create_singlethread_workqueue("binder");
5964 if (!binder_deferred_workqueue)
5967 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5968 if (binder_debugfs_dir_entry_root)
5969 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5970 binder_debugfs_dir_entry_root);
5972 if (binder_debugfs_dir_entry_root) {
5973 debugfs_create_file("state",
5975 binder_debugfs_dir_entry_root,
5977 &binder_state_fops);
5978 debugfs_create_file("stats",
5980 binder_debugfs_dir_entry_root,
5982 &binder_stats_fops);
5983 debugfs_create_file("transactions",
5985 binder_debugfs_dir_entry_root,
5987 &binder_transactions_fops);
5988 debugfs_create_file("transaction_log",
5990 binder_debugfs_dir_entry_root,
5991 &binder_transaction_log,
5992 &binder_transaction_log_fops);
5993 debugfs_create_file("failed_transaction_log",
5995 binder_debugfs_dir_entry_root,
5996 &binder_transaction_log_failed,
5997 &binder_transaction_log_fops);
6001 * Copy the module_parameter string, because we don't want to
6002 * tokenize it in-place.
6004 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
6005 if (!device_names) {
6007 goto err_alloc_device_names_failed;
6009 strcpy(device_names, binder_devices_param);
6011 device_tmp = device_names;
6012 while ((device_name = strsep(&device_tmp, ","))) {
6013 ret = init_binder_device(device_name);
6015 goto err_init_binder_device_failed;
6020 err_init_binder_device_failed:
6021 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6022 misc_deregister(&device->miscdev);
6023 hlist_del(&device->hlist);
6027 kfree(device_names);
6029 err_alloc_device_names_failed:
6030 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6032 destroy_workqueue(binder_deferred_workqueue);
6037 device_initcall(binder_init);
6039 #define CREATE_TRACE_POINTS
6040 #include "binder_trace.h"
6042 MODULE_LICENSE("GPL v2");