3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/atomic.h>
22 #include <linux/fdtable.h>
23 #include <linux/file.h>
24 #include <linux/freezer.h>
26 #include <linux/list.h>
27 #include <linux/miscdevice.h>
29 #include <linux/module.h>
30 #include <linux/mutex.h>
31 #include <linux/nsproxy.h>
32 #include <linux/poll.h>
33 #include <linux/debugfs.h>
34 #include <linux/rbtree.h>
35 #include <linux/sched.h>
36 #include <linux/seq_file.h>
37 #include <linux/uaccess.h>
38 #include <linux/vmalloc.h>
39 #include <linux/slab.h>
40 #include <linux/pid_namespace.h>
41 #include <linux/security.h>
43 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
44 #define BINDER_IPC_32BIT 1
47 #include <uapi/linux/android/binder.h>
48 #include "binder_trace.h"
50 static HLIST_HEAD(binder_devices);
52 static struct dentry *binder_debugfs_dir_entry_root;
53 static struct dentry *binder_debugfs_dir_entry_proc;
54 atomic_t binder_last_id;
56 #define BINDER_DEBUG_ENTRY(name) \
57 static int binder_##name##_open(struct inode *inode, struct file *file) \
59 return single_open(file, binder_##name##_show, inode->i_private); \
62 static const struct file_operations binder_##name##_fops = { \
63 .owner = THIS_MODULE, \
64 .open = binder_##name##_open, \
66 .llseek = seq_lseek, \
67 .release = single_release, \
70 static int binder_proc_show(struct seq_file *m, void *unused);
71 BINDER_DEBUG_ENTRY(proc);
73 /* This is only defined in include/asm-arm/sizes.h */
79 #define SZ_4M 0x400000
82 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
84 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
87 BINDER_DEBUG_USER_ERROR = 1U << 0,
88 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
89 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
90 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
91 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
92 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
93 BINDER_DEBUG_READ_WRITE = 1U << 6,
94 BINDER_DEBUG_USER_REFS = 1U << 7,
95 BINDER_DEBUG_THREADS = 1U << 8,
96 BINDER_DEBUG_TRANSACTION = 1U << 9,
97 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
98 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
99 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
100 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
101 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
102 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
104 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
105 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
106 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
108 static bool binder_debug_no_lock;
109 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
111 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
112 module_param_named(devices, binder_devices_param, charp, S_IRUGO);
114 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
115 static int binder_stop_on_user_error;
117 static int binder_set_stop_on_user_error(const char *val,
118 struct kernel_param *kp)
122 ret = param_set_int(val, kp);
123 if (binder_stop_on_user_error < 2)
124 wake_up(&binder_user_error_wait);
127 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
128 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
130 #define binder_debug(mask, x...) \
132 if (binder_debug_mask & mask) \
136 #define binder_user_error(x...) \
138 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
140 if (binder_stop_on_user_error) \
141 binder_stop_on_user_error = 2; \
144 #define to_flat_binder_object(hdr) \
145 container_of(hdr, struct flat_binder_object, hdr)
147 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
149 #define to_binder_buffer_object(hdr) \
150 container_of(hdr, struct binder_buffer_object, hdr)
152 #define to_binder_fd_array_object(hdr) \
153 container_of(hdr, struct binder_fd_array_object, hdr)
155 enum binder_stat_types {
161 BINDER_STAT_TRANSACTION,
162 BINDER_STAT_TRANSACTION_COMPLETE,
166 struct binder_stats {
167 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
168 int bc[_IOC_NR(BC_REPLY_SG) + 1];
171 /* These are still global, since it's not always easy to get the context */
172 struct binder_obj_stats {
173 atomic_t obj_created[BINDER_STAT_COUNT];
174 atomic_t obj_deleted[BINDER_STAT_COUNT];
177 static struct binder_obj_stats binder_obj_stats;
179 static inline void binder_stats_deleted(enum binder_stat_types type)
181 atomic_inc(&binder_obj_stats.obj_deleted[type]);
184 static inline void binder_stats_created(enum binder_stat_types type)
186 atomic_inc(&binder_obj_stats.obj_created[type]);
189 struct binder_transaction_log_entry {
200 const char *context_name;
202 struct binder_transaction_log {
205 struct binder_transaction_log_entry entry[32];
208 static struct binder_transaction_log_entry *binder_transaction_log_add(
209 struct binder_transaction_log *log)
211 struct binder_transaction_log_entry *e;
213 e = &log->entry[log->next];
214 memset(e, 0, sizeof(*e));
216 if (log->next == ARRAY_SIZE(log->entry)) {
223 struct binder_context {
224 struct binder_node *binder_context_mgr_node;
225 kuid_t binder_context_mgr_uid;
228 struct mutex binder_main_lock;
229 struct mutex binder_deferred_lock;
230 struct mutex binder_mmap_lock;
232 struct hlist_head binder_procs;
233 struct hlist_head binder_dead_nodes;
234 struct hlist_head binder_deferred_list;
236 struct work_struct deferred_work;
237 struct workqueue_struct *binder_deferred_workqueue;
238 struct binder_transaction_log transaction_log;
239 struct binder_transaction_log transaction_log_failed;
241 struct binder_stats binder_stats;
244 struct binder_device {
245 struct hlist_node hlist;
246 struct miscdevice miscdev;
247 struct binder_context context;
251 struct list_head entry;
253 BINDER_WORK_TRANSACTION = 1,
254 BINDER_WORK_TRANSACTION_COMPLETE,
256 BINDER_WORK_DEAD_BINDER,
257 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
258 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
264 struct binder_work work;
266 struct rb_node rb_node;
267 struct hlist_node dead_node;
269 struct binder_proc *proc;
270 struct hlist_head refs;
271 int internal_strong_refs;
273 int local_strong_refs;
274 binder_uintptr_t ptr;
275 binder_uintptr_t cookie;
276 unsigned has_strong_ref:1;
277 unsigned pending_strong_ref:1;
278 unsigned has_weak_ref:1;
279 unsigned pending_weak_ref:1;
280 unsigned has_async_transaction:1;
281 unsigned accept_fds:1;
282 unsigned min_priority:8;
283 struct list_head async_todo;
286 struct binder_ref_death {
287 struct binder_work work;
288 binder_uintptr_t cookie;
292 /* Lookups needed: */
293 /* node + proc => ref (transaction) */
294 /* desc + proc => ref (transaction, inc/dec ref) */
295 /* node => refs + procs (proc exit) */
297 struct rb_node rb_node_desc;
298 struct rb_node rb_node_node;
299 struct hlist_node node_entry;
300 struct binder_proc *proc;
301 struct binder_node *node;
305 struct binder_ref_death *death;
308 struct binder_buffer {
309 struct list_head entry; /* free and allocated entries by address */
310 struct rb_node rb_node; /* free entry by size or allocated entry */
313 unsigned allow_user_free:1;
314 unsigned async_transaction:1;
315 unsigned debug_id:29;
317 struct binder_transaction *transaction;
319 struct binder_node *target_node;
322 size_t extra_buffers_size;
326 enum binder_deferred_state {
327 BINDER_DEFERRED_PUT_FILES = 0x01,
328 BINDER_DEFERRED_FLUSH = 0x02,
329 BINDER_DEFERRED_RELEASE = 0x04,
333 struct hlist_node proc_node;
334 struct rb_root threads;
335 struct rb_root nodes;
336 struct rb_root refs_by_desc;
337 struct rb_root refs_by_node;
339 struct vm_area_struct *vma;
340 struct mm_struct *vma_vm_mm;
341 struct task_struct *tsk;
342 struct files_struct *files;
343 struct hlist_node deferred_work_node;
346 ptrdiff_t user_buffer_offset;
348 struct list_head buffers;
349 struct rb_root free_buffers;
350 struct rb_root allocated_buffers;
351 size_t free_async_space;
355 uint32_t buffer_free;
356 struct list_head todo;
357 wait_queue_head_t wait;
358 struct binder_stats stats;
359 struct list_head delivered_death;
361 int requested_threads;
362 int requested_threads_started;
364 long default_priority;
365 struct dentry *debugfs_entry;
366 struct binder_context *context;
370 BINDER_LOOPER_STATE_REGISTERED = 0x01,
371 BINDER_LOOPER_STATE_ENTERED = 0x02,
372 BINDER_LOOPER_STATE_EXITED = 0x04,
373 BINDER_LOOPER_STATE_INVALID = 0x08,
374 BINDER_LOOPER_STATE_WAITING = 0x10,
375 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
378 struct binder_thread {
379 struct binder_proc *proc;
380 struct rb_node rb_node;
383 struct binder_transaction *transaction_stack;
384 struct list_head todo;
385 uint32_t return_error; /* Write failed, return error code in read buf */
386 uint32_t return_error2; /* Write failed, return error code in read */
387 /* buffer. Used when sending a reply to a dead process that */
388 /* we are also waiting on */
389 wait_queue_head_t wait;
390 struct binder_stats stats;
393 struct binder_transaction {
395 struct binder_work work;
396 struct binder_thread *from;
397 struct binder_transaction *from_parent;
398 struct binder_proc *to_proc;
399 struct binder_thread *to_thread;
400 struct binder_transaction *to_parent;
401 unsigned need_reply:1;
402 /* unsigned is_dead:1; */ /* not used at the moment */
404 struct binder_buffer *buffer;
413 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
415 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
417 struct files_struct *files = proc->files;
418 unsigned long rlim_cur;
425 if (!lock_task_sighand(proc->tsk, &irqs))
428 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
429 unlock_task_sighand(proc->tsk, &irqs);
431 preempt_enable_no_resched();
432 ret = __alloc_fd(files, 0, rlim_cur, flags);
439 * copied from fd_install
441 static void task_fd_install(
442 struct binder_proc *proc, unsigned int fd, struct file *file)
445 preempt_enable_no_resched();
446 __fd_install(proc->files, fd, file);
452 * copied from sys_close
454 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
458 if (proc->files == NULL)
461 retval = __close_fd(proc->files, fd);
462 /* can't restart close syscall because file table entry was cleared */
463 if (unlikely(retval == -ERESTARTSYS ||
464 retval == -ERESTARTNOINTR ||
465 retval == -ERESTARTNOHAND ||
466 retval == -ERESTART_RESTARTBLOCK))
472 static inline void binder_lock(struct binder_context *context, const char *tag)
474 trace_binder_lock(tag);
475 mutex_lock(&context->binder_main_lock);
476 trace_binder_locked(tag);
479 static inline void binder_unlock(struct binder_context *context,
482 trace_binder_unlock(tag);
483 mutex_unlock(&context->binder_main_lock);
486 static void binder_set_nice(long nice)
490 if (can_nice(current, nice)) {
491 set_user_nice(current, nice);
494 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
495 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
496 "%d: nice value %ld not allowed use %ld instead\n",
497 current->pid, nice, min_nice);
498 set_user_nice(current, min_nice);
499 if (min_nice <= MAX_NICE)
501 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
504 static size_t binder_buffer_size(struct binder_proc *proc,
505 struct binder_buffer *buffer)
507 if (list_is_last(&buffer->entry, &proc->buffers))
508 return proc->buffer + proc->buffer_size - (void *)buffer->data;
509 return (size_t)list_entry(buffer->entry.next,
510 struct binder_buffer, entry) - (size_t)buffer->data;
513 static void binder_insert_free_buffer(struct binder_proc *proc,
514 struct binder_buffer *new_buffer)
516 struct rb_node **p = &proc->free_buffers.rb_node;
517 struct rb_node *parent = NULL;
518 struct binder_buffer *buffer;
520 size_t new_buffer_size;
522 BUG_ON(!new_buffer->free);
524 new_buffer_size = binder_buffer_size(proc, new_buffer);
526 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
527 "%d: add free buffer, size %zd, at %p\n",
528 proc->pid, new_buffer_size, new_buffer);
532 buffer = rb_entry(parent, struct binder_buffer, rb_node);
533 BUG_ON(!buffer->free);
535 buffer_size = binder_buffer_size(proc, buffer);
537 if (new_buffer_size < buffer_size)
538 p = &parent->rb_left;
540 p = &parent->rb_right;
542 rb_link_node(&new_buffer->rb_node, parent, p);
543 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
546 static void binder_insert_allocated_buffer(struct binder_proc *proc,
547 struct binder_buffer *new_buffer)
549 struct rb_node **p = &proc->allocated_buffers.rb_node;
550 struct rb_node *parent = NULL;
551 struct binder_buffer *buffer;
553 BUG_ON(new_buffer->free);
557 buffer = rb_entry(parent, struct binder_buffer, rb_node);
558 BUG_ON(buffer->free);
560 if (new_buffer < buffer)
561 p = &parent->rb_left;
562 else if (new_buffer > buffer)
563 p = &parent->rb_right;
567 rb_link_node(&new_buffer->rb_node, parent, p);
568 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
571 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
574 struct rb_node *n = proc->allocated_buffers.rb_node;
575 struct binder_buffer *buffer;
576 struct binder_buffer *kern_ptr;
578 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
579 - offsetof(struct binder_buffer, data));
582 buffer = rb_entry(n, struct binder_buffer, rb_node);
583 BUG_ON(buffer->free);
585 if (kern_ptr < buffer)
587 else if (kern_ptr > buffer)
595 static int binder_update_page_range(struct binder_proc *proc, int allocate,
596 void *start, void *end,
597 struct vm_area_struct *vma)
600 unsigned long user_page_addr;
602 struct mm_struct *mm;
604 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
605 "%d: %s pages %p-%p\n", proc->pid,
606 allocate ? "allocate" : "free", start, end);
611 trace_binder_update_page_range(proc, allocate, start, end);
616 mm = get_task_mm(proc->tsk);
619 down_write(&mm->mmap_sem);
621 if (vma && mm != proc->vma_vm_mm) {
622 pr_err("%d: vma mm and task mm mismatch\n",
632 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
637 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
640 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
643 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
645 pr_err("%d: binder_alloc_buf failed for page at %p\n",
646 proc->pid, page_addr);
647 goto err_alloc_page_failed;
649 ret = map_kernel_range_noflush((unsigned long)page_addr,
650 PAGE_SIZE, PAGE_KERNEL, page);
651 flush_cache_vmap((unsigned long)page_addr,
652 (unsigned long)page_addr + PAGE_SIZE);
654 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
655 proc->pid, page_addr);
656 goto err_map_kernel_failed;
659 (uintptr_t)page_addr + proc->user_buffer_offset;
660 ret = vm_insert_page(vma, user_page_addr, page[0]);
662 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
663 proc->pid, user_page_addr);
664 goto err_vm_insert_page_failed;
666 /* vm_insert_page does not seem to increment the refcount */
669 up_write(&mm->mmap_sem);
675 for (page_addr = end - PAGE_SIZE; page_addr >= start;
676 page_addr -= PAGE_SIZE) {
677 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
679 zap_page_range(vma, (uintptr_t)page_addr +
680 proc->user_buffer_offset, PAGE_SIZE, NULL);
681 err_vm_insert_page_failed:
682 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
683 err_map_kernel_failed:
686 err_alloc_page_failed:
691 up_write(&mm->mmap_sem);
697 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
700 size_t extra_buffers_size,
703 struct rb_node *n = proc->free_buffers.rb_node;
704 struct binder_buffer *buffer;
706 struct rb_node *best_fit = NULL;
709 size_t size, data_offsets_size;
711 if (proc->vma == NULL) {
712 pr_err("%d: binder_alloc_buf, no vma\n",
717 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
718 ALIGN(offsets_size, sizeof(void *));
720 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
721 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
722 proc->pid, data_size, offsets_size);
725 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
726 if (size < data_offsets_size || size < extra_buffers_size) {
727 binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
728 proc->pid, extra_buffers_size);
732 proc->free_async_space < size + sizeof(struct binder_buffer)) {
733 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
734 "%d: binder_alloc_buf size %zd failed, no async space left\n",
740 buffer = rb_entry(n, struct binder_buffer, rb_node);
741 BUG_ON(!buffer->free);
742 buffer_size = binder_buffer_size(proc, buffer);
744 if (size < buffer_size) {
747 } else if (size > buffer_size)
754 if (best_fit == NULL) {
755 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
760 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
761 buffer_size = binder_buffer_size(proc, buffer);
764 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
765 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
766 proc->pid, size, buffer, buffer_size);
769 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
771 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
772 buffer_size = size; /* no room for other buffers */
774 buffer_size = size + sizeof(struct binder_buffer);
777 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
778 if (end_page_addr > has_page_addr)
779 end_page_addr = has_page_addr;
780 if (binder_update_page_range(proc, 1,
781 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
784 rb_erase(best_fit, &proc->free_buffers);
786 binder_insert_allocated_buffer(proc, buffer);
787 if (buffer_size != size) {
788 struct binder_buffer *new_buffer = (void *)buffer->data + size;
790 list_add(&new_buffer->entry, &buffer->entry);
791 new_buffer->free = 1;
792 binder_insert_free_buffer(proc, new_buffer);
794 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
795 "%d: binder_alloc_buf size %zd got %p\n",
796 proc->pid, size, buffer);
797 buffer->data_size = data_size;
798 buffer->offsets_size = offsets_size;
799 buffer->extra_buffers_size = extra_buffers_size;
800 buffer->async_transaction = is_async;
802 proc->free_async_space -= size + sizeof(struct binder_buffer);
803 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
804 "%d: binder_alloc_buf size %zd async free %zd\n",
805 proc->pid, size, proc->free_async_space);
811 static void *buffer_start_page(struct binder_buffer *buffer)
813 return (void *)((uintptr_t)buffer & PAGE_MASK);
816 static void *buffer_end_page(struct binder_buffer *buffer)
818 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
821 static void binder_delete_free_buffer(struct binder_proc *proc,
822 struct binder_buffer *buffer)
824 struct binder_buffer *prev, *next = NULL;
825 int free_page_end = 1;
826 int free_page_start = 1;
828 BUG_ON(proc->buffers.next == &buffer->entry);
829 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
831 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
833 if (buffer_end_page(prev) == buffer_end_page(buffer))
835 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
836 "%d: merge free, buffer %p share page with %p\n",
837 proc->pid, buffer, prev);
840 if (!list_is_last(&buffer->entry, &proc->buffers)) {
841 next = list_entry(buffer->entry.next,
842 struct binder_buffer, entry);
843 if (buffer_start_page(next) == buffer_end_page(buffer)) {
845 if (buffer_start_page(next) ==
846 buffer_start_page(buffer))
848 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
849 "%d: merge free, buffer %p share page with %p\n",
850 proc->pid, buffer, prev);
853 list_del(&buffer->entry);
854 if (free_page_start || free_page_end) {
855 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
856 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
857 proc->pid, buffer, free_page_start ? "" : " end",
858 free_page_end ? "" : " start", prev, next);
859 binder_update_page_range(proc, 0, free_page_start ?
860 buffer_start_page(buffer) : buffer_end_page(buffer),
861 (free_page_end ? buffer_end_page(buffer) :
862 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
866 static void binder_free_buf(struct binder_proc *proc,
867 struct binder_buffer *buffer)
869 size_t size, buffer_size;
871 buffer_size = binder_buffer_size(proc, buffer);
873 size = ALIGN(buffer->data_size, sizeof(void *)) +
874 ALIGN(buffer->offsets_size, sizeof(void *)) +
875 ALIGN(buffer->extra_buffers_size, sizeof(void *));
877 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
878 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
879 proc->pid, buffer, size, buffer_size);
881 BUG_ON(buffer->free);
882 BUG_ON(size > buffer_size);
883 BUG_ON(buffer->transaction != NULL);
884 BUG_ON((void *)buffer < proc->buffer);
885 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
887 if (buffer->async_transaction) {
888 proc->free_async_space += size + sizeof(struct binder_buffer);
890 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
891 "%d: binder_free_buf size %zd async free %zd\n",
892 proc->pid, size, proc->free_async_space);
895 binder_update_page_range(proc, 0,
896 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
897 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
899 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
901 if (!list_is_last(&buffer->entry, &proc->buffers)) {
902 struct binder_buffer *next = list_entry(buffer->entry.next,
903 struct binder_buffer, entry);
906 rb_erase(&next->rb_node, &proc->free_buffers);
907 binder_delete_free_buffer(proc, next);
910 if (proc->buffers.next != &buffer->entry) {
911 struct binder_buffer *prev = list_entry(buffer->entry.prev,
912 struct binder_buffer, entry);
915 binder_delete_free_buffer(proc, buffer);
916 rb_erase(&prev->rb_node, &proc->free_buffers);
920 binder_insert_free_buffer(proc, buffer);
923 static struct binder_node *binder_get_node(struct binder_proc *proc,
924 binder_uintptr_t ptr)
926 struct rb_node *n = proc->nodes.rb_node;
927 struct binder_node *node;
930 node = rb_entry(n, struct binder_node, rb_node);
934 else if (ptr > node->ptr)
942 static struct binder_node *binder_new_node(struct binder_proc *proc,
943 binder_uintptr_t ptr,
944 binder_uintptr_t cookie)
946 struct rb_node **p = &proc->nodes.rb_node;
947 struct rb_node *parent = NULL;
948 struct binder_node *node;
952 node = rb_entry(parent, struct binder_node, rb_node);
956 else if (ptr > node->ptr)
962 node = kzalloc(sizeof(*node), GFP_KERNEL);
965 binder_stats_created(BINDER_STAT_NODE);
966 rb_link_node(&node->rb_node, parent, p);
967 rb_insert_color(&node->rb_node, &proc->nodes);
968 node->debug_id = atomic_inc_return(&binder_last_id);
971 node->cookie = cookie;
972 node->work.type = BINDER_WORK_NODE;
973 INIT_LIST_HEAD(&node->work.entry);
974 INIT_LIST_HEAD(&node->async_todo);
975 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
976 "%d:%d node %d u%016llx c%016llx created\n",
977 proc->pid, current->pid, node->debug_id,
978 (u64)node->ptr, (u64)node->cookie);
982 static int binder_inc_node(struct binder_node *node, int strong, int internal,
983 struct list_head *target_list)
987 if (target_list == NULL &&
988 node->internal_strong_refs == 0 &&
990 node == node->proc->context->
991 binder_context_mgr_node &&
992 node->has_strong_ref)) {
993 pr_err("invalid inc strong node for %d\n",
997 node->internal_strong_refs++;
999 node->local_strong_refs++;
1000 if (!node->has_strong_ref && target_list) {
1001 list_del_init(&node->work.entry);
1002 list_add_tail(&node->work.entry, target_list);
1006 node->local_weak_refs++;
1007 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1008 if (target_list == NULL) {
1009 pr_err("invalid inc weak node for %d\n",
1013 list_add_tail(&node->work.entry, target_list);
1019 static int binder_dec_node(struct binder_node *node, int strong, int internal)
1023 node->internal_strong_refs--;
1025 node->local_strong_refs--;
1026 if (node->local_strong_refs || node->internal_strong_refs)
1030 node->local_weak_refs--;
1031 if (node->local_weak_refs || !hlist_empty(&node->refs))
1034 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1035 if (list_empty(&node->work.entry)) {
1036 list_add_tail(&node->work.entry, &node->proc->todo);
1037 wake_up_interruptible(&node->proc->wait);
1040 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1041 !node->local_weak_refs) {
1042 list_del_init(&node->work.entry);
1044 rb_erase(&node->rb_node, &node->proc->nodes);
1045 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1046 "refless node %d deleted\n",
1049 hlist_del(&node->dead_node);
1050 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1051 "dead node %d deleted\n",
1055 binder_stats_deleted(BINDER_STAT_NODE);
1063 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1064 u32 desc, bool need_strong_ref)
1066 struct rb_node *n = proc->refs_by_desc.rb_node;
1067 struct binder_ref *ref;
1070 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1072 if (desc < ref->desc) {
1074 } else if (desc > ref->desc) {
1076 } else if (need_strong_ref && !ref->strong) {
1077 binder_user_error("tried to use weak ref as strong ref\n");
1086 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1087 struct binder_node *node)
1090 struct rb_node **p = &proc->refs_by_node.rb_node;
1091 struct rb_node *parent = NULL;
1092 struct binder_ref *ref, *new_ref;
1093 struct binder_context *context = proc->context;
1097 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1099 if (node < ref->node)
1101 else if (node > ref->node)
1102 p = &(*p)->rb_right;
1106 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1107 if (new_ref == NULL)
1109 binder_stats_created(BINDER_STAT_REF);
1110 new_ref->debug_id = atomic_inc_return(&binder_last_id);
1111 new_ref->proc = proc;
1112 new_ref->node = node;
1113 rb_link_node(&new_ref->rb_node_node, parent, p);
1114 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1116 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1117 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1118 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1119 if (ref->desc > new_ref->desc)
1121 new_ref->desc = ref->desc + 1;
1124 p = &proc->refs_by_desc.rb_node;
1127 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1129 if (new_ref->desc < ref->desc)
1131 else if (new_ref->desc > ref->desc)
1132 p = &(*p)->rb_right;
1136 rb_link_node(&new_ref->rb_node_desc, parent, p);
1137 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1139 hlist_add_head(&new_ref->node_entry, &node->refs);
1141 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1142 "%d new ref %d desc %d for node %d\n",
1143 proc->pid, new_ref->debug_id, new_ref->desc,
1146 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1147 "%d new ref %d desc %d for dead node\n",
1148 proc->pid, new_ref->debug_id, new_ref->desc);
1153 static void binder_delete_ref(struct binder_ref *ref)
1155 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1156 "%d delete ref %d desc %d for node %d\n",
1157 ref->proc->pid, ref->debug_id, ref->desc,
1158 ref->node->debug_id);
1160 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1161 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1163 binder_dec_node(ref->node, 1, 1);
1164 hlist_del(&ref->node_entry);
1165 binder_dec_node(ref->node, 0, 1);
1167 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1168 "%d delete ref %d desc %d has death notification\n",
1169 ref->proc->pid, ref->debug_id, ref->desc);
1170 list_del(&ref->death->work.entry);
1172 binder_stats_deleted(BINDER_STAT_DEATH);
1175 binder_stats_deleted(BINDER_STAT_REF);
1178 static int binder_inc_ref(struct binder_ref *ref, int strong,
1179 struct list_head *target_list)
1184 if (ref->strong == 0) {
1185 ret = binder_inc_node(ref->node, 1, 1, target_list);
1191 if (ref->weak == 0) {
1192 ret = binder_inc_node(ref->node, 0, 1, target_list);
1202 static int binder_dec_ref(struct binder_ref *ref, int strong)
1205 if (ref->strong == 0) {
1206 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1207 ref->proc->pid, ref->debug_id,
1208 ref->desc, ref->strong, ref->weak);
1212 if (ref->strong == 0) {
1215 ret = binder_dec_node(ref->node, strong, 1);
1220 if (ref->weak == 0) {
1221 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1222 ref->proc->pid, ref->debug_id,
1223 ref->desc, ref->strong, ref->weak);
1228 if (ref->strong == 0 && ref->weak == 0)
1229 binder_delete_ref(ref);
1233 static void binder_pop_transaction(struct binder_thread *target_thread,
1234 struct binder_transaction *t)
1236 if (target_thread) {
1237 BUG_ON(target_thread->transaction_stack != t);
1238 BUG_ON(target_thread->transaction_stack->from != target_thread);
1239 target_thread->transaction_stack =
1240 target_thread->transaction_stack->from_parent;
1245 t->buffer->transaction = NULL;
1247 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1250 static void binder_send_failed_reply(struct binder_transaction *t,
1251 uint32_t error_code)
1253 struct binder_thread *target_thread;
1254 struct binder_transaction *next;
1256 BUG_ON(t->flags & TF_ONE_WAY);
1258 target_thread = t->from;
1259 if (target_thread) {
1260 if (target_thread->return_error != BR_OK &&
1261 target_thread->return_error2 == BR_OK) {
1262 target_thread->return_error2 =
1263 target_thread->return_error;
1264 target_thread->return_error = BR_OK;
1266 if (target_thread->return_error == BR_OK) {
1267 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1268 "send failed reply for transaction %d to %d:%d\n",
1270 target_thread->proc->pid,
1271 target_thread->pid);
1273 binder_pop_transaction(target_thread, t);
1274 target_thread->return_error = error_code;
1275 wake_up_interruptible(&target_thread->wait);
1277 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1278 target_thread->proc->pid,
1280 target_thread->return_error);
1284 next = t->from_parent;
1286 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1287 "send failed reply for transaction %d, target dead\n",
1290 binder_pop_transaction(target_thread, t);
1292 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1293 "reply failed, no target thread at root\n");
1297 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1298 "reply failed, no target thread -- retry %d\n",
1304 * binder_validate_object() - checks for a valid metadata object in a buffer.
1305 * @buffer: binder_buffer that we're parsing.
1306 * @offset: offset in the buffer at which to validate an object.
1308 * Return: If there's a valid metadata object at @offset in @buffer, the
1309 * size of that object. Otherwise, it returns zero.
1311 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1313 /* Check if we can read a header first */
1314 struct binder_object_header *hdr;
1315 size_t object_size = 0;
1317 if (offset > buffer->data_size - sizeof(*hdr) ||
1318 buffer->data_size < sizeof(*hdr) ||
1319 !IS_ALIGNED(offset, sizeof(u32)))
1322 /* Ok, now see if we can read a complete object. */
1323 hdr = (struct binder_object_header *)(buffer->data + offset);
1324 switch (hdr->type) {
1325 case BINDER_TYPE_BINDER:
1326 case BINDER_TYPE_WEAK_BINDER:
1327 case BINDER_TYPE_HANDLE:
1328 case BINDER_TYPE_WEAK_HANDLE:
1329 object_size = sizeof(struct flat_binder_object);
1331 case BINDER_TYPE_FD:
1332 object_size = sizeof(struct binder_fd_object);
1334 case BINDER_TYPE_PTR:
1335 object_size = sizeof(struct binder_buffer_object);
1337 case BINDER_TYPE_FDA:
1338 object_size = sizeof(struct binder_fd_array_object);
1343 if (offset <= buffer->data_size - object_size &&
1344 buffer->data_size >= object_size)
1351 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1352 * @b: binder_buffer containing the object
1353 * @index: index in offset array at which the binder_buffer_object is
1355 * @start: points to the start of the offset array
1356 * @num_valid: the number of valid offsets in the offset array
1358 * Return: If @index is within the valid range of the offset array
1359 * described by @start and @num_valid, and if there's a valid
1360 * binder_buffer_object at the offset found in index @index
1361 * of the offset array, that object is returned. Otherwise,
1362 * %NULL is returned.
1363 * Note that the offset found in index @index itself is not
1364 * verified; this function assumes that @num_valid elements
1365 * from @start were previously verified to have valid offsets.
1367 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1368 binder_size_t index,
1369 binder_size_t *start,
1370 binder_size_t num_valid)
1372 struct binder_buffer_object *buffer_obj;
1373 binder_size_t *offp;
1375 if (index >= num_valid)
1378 offp = start + index;
1379 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1380 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1387 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1388 * @b: transaction buffer
1389 * @objects_start start of objects buffer
1390 * @buffer: binder_buffer_object in which to fix up
1391 * @offset: start offset in @buffer to fix up
1392 * @last_obj: last binder_buffer_object that we fixed up in
1393 * @last_min_offset: minimum fixup offset in @last_obj
1395 * Return: %true if a fixup in buffer @buffer at offset @offset is
1398 * For safety reasons, we only allow fixups inside a buffer to happen
1399 * at increasing offsets; additionally, we only allow fixup on the last
1400 * buffer object that was verified, or one of its parents.
1402 * Example of what is allowed:
1405 * B (parent = A, offset = 0)
1406 * C (parent = A, offset = 16)
1407 * D (parent = C, offset = 0)
1408 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1410 * Examples of what is not allowed:
1412 * Decreasing offsets within the same parent:
1414 * C (parent = A, offset = 16)
1415 * B (parent = A, offset = 0) // decreasing offset within A
1417 * Referring to a parent that wasn't the last object or any of its parents:
1419 * B (parent = A, offset = 0)
1420 * C (parent = A, offset = 0)
1421 * C (parent = A, offset = 16)
1422 * D (parent = B, offset = 0) // B is not A or any of A's parents
1424 static bool binder_validate_fixup(struct binder_buffer *b,
1425 binder_size_t *objects_start,
1426 struct binder_buffer_object *buffer,
1427 binder_size_t fixup_offset,
1428 struct binder_buffer_object *last_obj,
1429 binder_size_t last_min_offset)
1432 /* Nothing to fix up in */
1436 while (last_obj != buffer) {
1438 * Safe to retrieve the parent of last_obj, since it
1439 * was already previously verified by the driver.
1441 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1443 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1444 last_obj = (struct binder_buffer_object *)
1445 (b->data + *(objects_start + last_obj->parent));
1447 return (fixup_offset >= last_min_offset);
1450 static void binder_transaction_buffer_release(struct binder_proc *proc,
1451 struct binder_buffer *buffer,
1452 binder_size_t *failed_at)
1454 binder_size_t *offp, *off_start, *off_end;
1455 int debug_id = buffer->debug_id;
1457 binder_debug(BINDER_DEBUG_TRANSACTION,
1458 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1459 proc->pid, buffer->debug_id,
1460 buffer->data_size, buffer->offsets_size, failed_at);
1462 if (buffer->target_node)
1463 binder_dec_node(buffer->target_node, 1, 0);
1465 off_start = (binder_size_t *)(buffer->data +
1466 ALIGN(buffer->data_size, sizeof(void *)));
1468 off_end = failed_at;
1470 off_end = (void *)off_start + buffer->offsets_size;
1471 for (offp = off_start; offp < off_end; offp++) {
1472 struct binder_object_header *hdr;
1473 size_t object_size = binder_validate_object(buffer, *offp);
1475 if (object_size == 0) {
1476 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1477 debug_id, (u64)*offp, buffer->data_size);
1480 hdr = (struct binder_object_header *)(buffer->data + *offp);
1481 switch (hdr->type) {
1482 case BINDER_TYPE_BINDER:
1483 case BINDER_TYPE_WEAK_BINDER: {
1484 struct flat_binder_object *fp;
1485 struct binder_node *node;
1487 fp = to_flat_binder_object(hdr);
1488 node = binder_get_node(proc, fp->binder);
1490 pr_err("transaction release %d bad node %016llx\n",
1491 debug_id, (u64)fp->binder);
1494 binder_debug(BINDER_DEBUG_TRANSACTION,
1495 " node %d u%016llx\n",
1496 node->debug_id, (u64)node->ptr);
1497 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1500 case BINDER_TYPE_HANDLE:
1501 case BINDER_TYPE_WEAK_HANDLE: {
1502 struct flat_binder_object *fp;
1503 struct binder_ref *ref;
1505 fp = to_flat_binder_object(hdr);
1506 ref = binder_get_ref(proc, fp->handle,
1507 hdr->type == BINDER_TYPE_HANDLE);
1509 pr_err("transaction release %d bad handle %d\n",
1510 debug_id, fp->handle);
1513 binder_debug(BINDER_DEBUG_TRANSACTION,
1514 " ref %d desc %d (node %d)\n",
1515 ref->debug_id, ref->desc, ref->node->debug_id);
1516 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
1519 case BINDER_TYPE_FD: {
1520 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1522 binder_debug(BINDER_DEBUG_TRANSACTION,
1523 " fd %d\n", fp->fd);
1525 task_close_fd(proc, fp->fd);
1527 case BINDER_TYPE_PTR:
1529 * Nothing to do here, this will get cleaned up when the
1530 * transaction buffer gets freed
1533 case BINDER_TYPE_FDA: {
1534 struct binder_fd_array_object *fda;
1535 struct binder_buffer_object *parent;
1536 uintptr_t parent_buffer;
1539 binder_size_t fd_buf_size;
1541 fda = to_binder_fd_array_object(hdr);
1542 parent = binder_validate_ptr(buffer, fda->parent,
1546 pr_err("transaction release %d bad parent offset",
1551 * Since the parent was already fixed up, convert it
1552 * back to kernel address space to access it
1554 parent_buffer = parent->buffer -
1555 proc->user_buffer_offset;
1557 fd_buf_size = sizeof(u32) * fda->num_fds;
1558 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1559 pr_err("transaction release %d invalid number of fds (%lld)\n",
1560 debug_id, (u64)fda->num_fds);
1563 if (fd_buf_size > parent->length ||
1564 fda->parent_offset > parent->length - fd_buf_size) {
1565 /* No space for all file descriptors here. */
1566 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1567 debug_id, (u64)fda->num_fds);
1570 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1571 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1572 task_close_fd(proc, fd_array[fd_index]);
1575 pr_err("transaction release %d bad object type %x\n",
1576 debug_id, hdr->type);
1582 static int binder_translate_binder(struct flat_binder_object *fp,
1583 struct binder_transaction *t,
1584 struct binder_thread *thread)
1586 struct binder_node *node;
1587 struct binder_ref *ref;
1588 struct binder_proc *proc = thread->proc;
1589 struct binder_proc *target_proc = t->to_proc;
1591 node = binder_get_node(proc, fp->binder);
1593 node = binder_new_node(proc, fp->binder, fp->cookie);
1597 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1598 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1600 if (fp->cookie != node->cookie) {
1601 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1602 proc->pid, thread->pid, (u64)fp->binder,
1603 node->debug_id, (u64)fp->cookie,
1607 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1610 ref = binder_get_ref_for_node(target_proc, node);
1614 if (fp->hdr.type == BINDER_TYPE_BINDER)
1615 fp->hdr.type = BINDER_TYPE_HANDLE;
1617 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1619 fp->handle = ref->desc;
1621 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1623 trace_binder_transaction_node_to_ref(t, node, ref);
1624 binder_debug(BINDER_DEBUG_TRANSACTION,
1625 " node %d u%016llx -> ref %d desc %d\n",
1626 node->debug_id, (u64)node->ptr,
1627 ref->debug_id, ref->desc);
1632 static int binder_translate_handle(struct flat_binder_object *fp,
1633 struct binder_transaction *t,
1634 struct binder_thread *thread)
1636 struct binder_ref *ref;
1637 struct binder_proc *proc = thread->proc;
1638 struct binder_proc *target_proc = t->to_proc;
1640 ref = binder_get_ref(proc, fp->handle,
1641 fp->hdr.type == BINDER_TYPE_HANDLE);
1643 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1644 proc->pid, thread->pid, fp->handle);
1647 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1650 if (ref->node->proc == target_proc) {
1651 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1652 fp->hdr.type = BINDER_TYPE_BINDER;
1654 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1655 fp->binder = ref->node->ptr;
1656 fp->cookie = ref->node->cookie;
1657 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1659 trace_binder_transaction_ref_to_node(t, ref);
1660 binder_debug(BINDER_DEBUG_TRANSACTION,
1661 " ref %d desc %d -> node %d u%016llx\n",
1662 ref->debug_id, ref->desc, ref->node->debug_id,
1663 (u64)ref->node->ptr);
1665 struct binder_ref *new_ref;
1667 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1672 fp->handle = new_ref->desc;
1674 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1676 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1677 binder_debug(BINDER_DEBUG_TRANSACTION,
1678 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1679 ref->debug_id, ref->desc, new_ref->debug_id,
1680 new_ref->desc, ref->node->debug_id);
1685 static int binder_translate_fd(int fd,
1686 struct binder_transaction *t,
1687 struct binder_thread *thread,
1688 struct binder_transaction *in_reply_to)
1690 struct binder_proc *proc = thread->proc;
1691 struct binder_proc *target_proc = t->to_proc;
1695 bool target_allows_fd;
1698 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1700 target_allows_fd = t->buffer->target_node->accept_fds;
1701 if (!target_allows_fd) {
1702 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1703 proc->pid, thread->pid,
1704 in_reply_to ? "reply" : "transaction",
1707 goto err_fd_not_accepted;
1712 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1713 proc->pid, thread->pid, fd);
1717 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1723 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1724 if (target_fd < 0) {
1726 goto err_get_unused_fd;
1728 task_fd_install(target_proc, target_fd, file);
1729 trace_binder_transaction_fd(t, fd, target_fd);
1730 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1739 err_fd_not_accepted:
1743 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1744 struct binder_buffer_object *parent,
1745 struct binder_transaction *t,
1746 struct binder_thread *thread,
1747 struct binder_transaction *in_reply_to)
1749 binder_size_t fdi, fd_buf_size, num_installed_fds;
1751 uintptr_t parent_buffer;
1753 struct binder_proc *proc = thread->proc;
1754 struct binder_proc *target_proc = t->to_proc;
1756 fd_buf_size = sizeof(u32) * fda->num_fds;
1757 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1758 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1759 proc->pid, thread->pid, (u64)fda->num_fds);
1762 if (fd_buf_size > parent->length ||
1763 fda->parent_offset > parent->length - fd_buf_size) {
1764 /* No space for all file descriptors here. */
1765 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1766 proc->pid, thread->pid, (u64)fda->num_fds);
1770 * Since the parent was already fixed up, convert it
1771 * back to the kernel address space to access it
1773 parent_buffer = parent->buffer - target_proc->user_buffer_offset;
1774 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1775 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1776 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1777 proc->pid, thread->pid);
1780 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1781 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1784 goto err_translate_fd_failed;
1785 fd_array[fdi] = target_fd;
1789 err_translate_fd_failed:
1791 * Failed to allocate fd or security error, free fds
1794 num_installed_fds = fdi;
1795 for (fdi = 0; fdi < num_installed_fds; fdi++)
1796 task_close_fd(target_proc, fd_array[fdi]);
1800 static int binder_fixup_parent(struct binder_transaction *t,
1801 struct binder_thread *thread,
1802 struct binder_buffer_object *bp,
1803 binder_size_t *off_start,
1804 binder_size_t num_valid,
1805 struct binder_buffer_object *last_fixup_obj,
1806 binder_size_t last_fixup_min_off)
1808 struct binder_buffer_object *parent;
1810 struct binder_buffer *b = t->buffer;
1811 struct binder_proc *proc = thread->proc;
1812 struct binder_proc *target_proc = t->to_proc;
1814 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1817 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1819 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1820 proc->pid, thread->pid);
1824 if (!binder_validate_fixup(b, off_start,
1825 parent, bp->parent_offset,
1827 last_fixup_min_off)) {
1828 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1829 proc->pid, thread->pid);
1833 if (parent->length < sizeof(binder_uintptr_t) ||
1834 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1835 /* No space for a pointer here! */
1836 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1837 proc->pid, thread->pid);
1840 parent_buffer = (u8 *)(parent->buffer -
1841 target_proc->user_buffer_offset);
1842 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1847 static void binder_transaction(struct binder_proc *proc,
1848 struct binder_thread *thread,
1849 struct binder_transaction_data *tr, int reply,
1850 binder_size_t extra_buffers_size)
1853 struct binder_transaction *t;
1854 struct binder_work *tcomplete;
1855 binder_size_t *offp, *off_end, *off_start;
1856 binder_size_t off_min;
1857 u8 *sg_bufp, *sg_buf_end;
1858 struct binder_proc *target_proc;
1859 struct binder_thread *target_thread = NULL;
1860 struct binder_node *target_node = NULL;
1861 struct list_head *target_list;
1862 wait_queue_head_t *target_wait;
1863 struct binder_transaction *in_reply_to = NULL;
1864 struct binder_transaction_log_entry *e;
1865 uint32_t return_error;
1866 struct binder_buffer_object *last_fixup_obj = NULL;
1867 binder_size_t last_fixup_min_off = 0;
1868 struct binder_context *context = proc->context;
1870 e = binder_transaction_log_add(&context->transaction_log);
1871 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1872 e->from_proc = proc->pid;
1873 e->from_thread = thread->pid;
1874 e->target_handle = tr->target.handle;
1875 e->data_size = tr->data_size;
1876 e->offsets_size = tr->offsets_size;
1877 e->context_name = proc->context->name;
1880 in_reply_to = thread->transaction_stack;
1881 if (in_reply_to == NULL) {
1882 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1883 proc->pid, thread->pid);
1884 return_error = BR_FAILED_REPLY;
1885 goto err_empty_call_stack;
1887 binder_set_nice(in_reply_to->saved_priority);
1888 if (in_reply_to->to_thread != thread) {
1889 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1890 proc->pid, thread->pid, in_reply_to->debug_id,
1891 in_reply_to->to_proc ?
1892 in_reply_to->to_proc->pid : 0,
1893 in_reply_to->to_thread ?
1894 in_reply_to->to_thread->pid : 0);
1895 return_error = BR_FAILED_REPLY;
1897 goto err_bad_call_stack;
1899 thread->transaction_stack = in_reply_to->to_parent;
1900 target_thread = in_reply_to->from;
1901 if (target_thread == NULL) {
1902 return_error = BR_DEAD_REPLY;
1903 goto err_dead_binder;
1905 if (target_thread->transaction_stack != in_reply_to) {
1906 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1907 proc->pid, thread->pid,
1908 target_thread->transaction_stack ?
1909 target_thread->transaction_stack->debug_id : 0,
1910 in_reply_to->debug_id);
1911 return_error = BR_FAILED_REPLY;
1913 target_thread = NULL;
1914 goto err_dead_binder;
1916 target_proc = target_thread->proc;
1918 if (tr->target.handle) {
1919 struct binder_ref *ref;
1921 ref = binder_get_ref(proc, tr->target.handle, true);
1923 binder_user_error("%d:%d got transaction to invalid handle\n",
1924 proc->pid, thread->pid);
1925 return_error = BR_FAILED_REPLY;
1926 goto err_invalid_target_handle;
1928 target_node = ref->node;
1930 target_node = context->binder_context_mgr_node;
1931 if (target_node == NULL) {
1932 return_error = BR_DEAD_REPLY;
1933 goto err_no_context_mgr_node;
1936 e->to_node = target_node->debug_id;
1937 target_proc = target_node->proc;
1938 if (target_proc == NULL) {
1939 return_error = BR_DEAD_REPLY;
1940 goto err_dead_binder;
1942 if (security_binder_transaction(proc->tsk,
1943 target_proc->tsk) < 0) {
1944 return_error = BR_FAILED_REPLY;
1945 goto err_invalid_target_handle;
1947 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1948 struct binder_transaction *tmp;
1950 tmp = thread->transaction_stack;
1951 if (tmp->to_thread != thread) {
1952 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1953 proc->pid, thread->pid, tmp->debug_id,
1954 tmp->to_proc ? tmp->to_proc->pid : 0,
1956 tmp->to_thread->pid : 0);
1957 return_error = BR_FAILED_REPLY;
1958 goto err_bad_call_stack;
1961 if (tmp->from && tmp->from->proc == target_proc)
1962 target_thread = tmp->from;
1963 tmp = tmp->from_parent;
1967 if (target_thread) {
1968 e->to_thread = target_thread->pid;
1969 target_list = &target_thread->todo;
1970 target_wait = &target_thread->wait;
1972 target_list = &target_proc->todo;
1973 target_wait = &target_proc->wait;
1975 e->to_proc = target_proc->pid;
1977 /* TODO: reuse incoming transaction for reply */
1978 t = kzalloc(sizeof(*t), GFP_KERNEL);
1980 return_error = BR_FAILED_REPLY;
1981 goto err_alloc_t_failed;
1983 binder_stats_created(BINDER_STAT_TRANSACTION);
1985 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1986 if (tcomplete == NULL) {
1987 return_error = BR_FAILED_REPLY;
1988 goto err_alloc_tcomplete_failed;
1990 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1992 t->debug_id = atomic_inc_return(&binder_last_id);
1993 e->debug_id = t->debug_id;
1996 binder_debug(BINDER_DEBUG_TRANSACTION,
1997 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
1998 proc->pid, thread->pid, t->debug_id,
1999 target_proc->pid, target_thread->pid,
2000 (u64)tr->data.ptr.buffer,
2001 (u64)tr->data.ptr.offsets,
2002 (u64)tr->data_size, (u64)tr->offsets_size,
2003 (u64)extra_buffers_size);
2005 binder_debug(BINDER_DEBUG_TRANSACTION,
2006 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2007 proc->pid, thread->pid, t->debug_id,
2008 target_proc->pid, target_node->debug_id,
2009 (u64)tr->data.ptr.buffer,
2010 (u64)tr->data.ptr.offsets,
2011 (u64)tr->data_size, (u64)tr->offsets_size,
2012 (u64)extra_buffers_size);
2014 if (!reply && !(tr->flags & TF_ONE_WAY))
2018 t->sender_euid = task_euid(proc->tsk);
2019 t->to_proc = target_proc;
2020 t->to_thread = target_thread;
2022 t->flags = tr->flags;
2023 t->priority = task_nice(current);
2025 trace_binder_transaction(reply, t, target_node);
2027 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
2028 tr->offsets_size, extra_buffers_size,
2029 !reply && (t->flags & TF_ONE_WAY));
2030 if (t->buffer == NULL) {
2031 return_error = BR_FAILED_REPLY;
2032 goto err_binder_alloc_buf_failed;
2034 t->buffer->allow_user_free = 0;
2035 t->buffer->debug_id = t->debug_id;
2036 t->buffer->transaction = t;
2037 t->buffer->target_node = target_node;
2038 trace_binder_transaction_alloc_buf(t->buffer);
2040 binder_inc_node(target_node, 1, 0, NULL);
2042 off_start = (binder_size_t *)(t->buffer->data +
2043 ALIGN(tr->data_size, sizeof(void *)));
2046 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2047 tr->data.ptr.buffer, tr->data_size)) {
2048 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2049 proc->pid, thread->pid);
2050 return_error = BR_FAILED_REPLY;
2051 goto err_copy_data_failed;
2053 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2054 tr->data.ptr.offsets, tr->offsets_size)) {
2055 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2056 proc->pid, thread->pid);
2057 return_error = BR_FAILED_REPLY;
2058 goto err_copy_data_failed;
2060 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2061 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2062 proc->pid, thread->pid, (u64)tr->offsets_size);
2063 return_error = BR_FAILED_REPLY;
2064 goto err_bad_offset;
2066 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2067 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2068 proc->pid, thread->pid,
2069 (u64)extra_buffers_size);
2070 return_error = BR_FAILED_REPLY;
2071 goto err_bad_offset;
2073 off_end = (void *)off_start + tr->offsets_size;
2074 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2075 sg_buf_end = sg_bufp + extra_buffers_size;
2077 for (; offp < off_end; offp++) {
2078 struct binder_object_header *hdr;
2079 size_t object_size = binder_validate_object(t->buffer, *offp);
2081 if (object_size == 0 || *offp < off_min) {
2082 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2083 proc->pid, thread->pid, (u64)*offp,
2085 (u64)t->buffer->data_size);
2086 return_error = BR_FAILED_REPLY;
2087 goto err_bad_offset;
2090 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2091 off_min = *offp + object_size;
2092 switch (hdr->type) {
2093 case BINDER_TYPE_BINDER:
2094 case BINDER_TYPE_WEAK_BINDER: {
2095 struct flat_binder_object *fp;
2097 fp = to_flat_binder_object(hdr);
2098 ret = binder_translate_binder(fp, t, thread);
2100 return_error = BR_FAILED_REPLY;
2101 goto err_translate_failed;
2104 case BINDER_TYPE_HANDLE:
2105 case BINDER_TYPE_WEAK_HANDLE: {
2106 struct flat_binder_object *fp;
2108 fp = to_flat_binder_object(hdr);
2109 ret = binder_translate_handle(fp, t, thread);
2111 return_error = BR_FAILED_REPLY;
2112 goto err_translate_failed;
2116 case BINDER_TYPE_FD: {
2117 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2118 int target_fd = binder_translate_fd(fp->fd, t, thread,
2121 if (target_fd < 0) {
2122 return_error = BR_FAILED_REPLY;
2123 goto err_translate_failed;
2128 case BINDER_TYPE_FDA: {
2129 struct binder_fd_array_object *fda =
2130 to_binder_fd_array_object(hdr);
2131 struct binder_buffer_object *parent =
2132 binder_validate_ptr(t->buffer, fda->parent,
2136 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2137 proc->pid, thread->pid);
2138 return_error = BR_FAILED_REPLY;
2139 goto err_bad_parent;
2141 if (!binder_validate_fixup(t->buffer, off_start,
2142 parent, fda->parent_offset,
2144 last_fixup_min_off)) {
2145 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2146 proc->pid, thread->pid);
2147 return_error = BR_FAILED_REPLY;
2148 goto err_bad_parent;
2150 ret = binder_translate_fd_array(fda, parent, t, thread,
2153 return_error = BR_FAILED_REPLY;
2154 goto err_translate_failed;
2156 last_fixup_obj = parent;
2157 last_fixup_min_off =
2158 fda->parent_offset + sizeof(u32) * fda->num_fds;
2160 case BINDER_TYPE_PTR: {
2161 struct binder_buffer_object *bp =
2162 to_binder_buffer_object(hdr);
2163 size_t buf_left = sg_buf_end - sg_bufp;
2165 if (bp->length > buf_left) {
2166 binder_user_error("%d:%d got transaction with too large buffer\n",
2167 proc->pid, thread->pid);
2168 return_error = BR_FAILED_REPLY;
2169 goto err_bad_offset;
2171 if (copy_from_user(sg_bufp,
2172 (const void __user *)(uintptr_t)
2173 bp->buffer, bp->length)) {
2174 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2175 proc->pid, thread->pid);
2176 return_error = BR_FAILED_REPLY;
2177 goto err_copy_data_failed;
2179 /* Fixup buffer pointer to target proc address space */
2180 bp->buffer = (uintptr_t)sg_bufp +
2181 target_proc->user_buffer_offset;
2182 sg_bufp += ALIGN(bp->length, sizeof(u64));
2184 ret = binder_fixup_parent(t, thread, bp, off_start,
2187 last_fixup_min_off);
2189 return_error = BR_FAILED_REPLY;
2190 goto err_translate_failed;
2192 last_fixup_obj = bp;
2193 last_fixup_min_off = 0;
2196 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2197 proc->pid, thread->pid, hdr->type);
2198 return_error = BR_FAILED_REPLY;
2199 goto err_bad_object_type;
2203 BUG_ON(t->buffer->async_transaction != 0);
2204 binder_pop_transaction(target_thread, in_reply_to);
2205 } else if (!(t->flags & TF_ONE_WAY)) {
2206 BUG_ON(t->buffer->async_transaction != 0);
2208 t->from_parent = thread->transaction_stack;
2209 thread->transaction_stack = t;
2211 BUG_ON(target_node == NULL);
2212 BUG_ON(t->buffer->async_transaction != 1);
2213 if (target_node->has_async_transaction) {
2214 target_list = &target_node->async_todo;
2217 target_node->has_async_transaction = 1;
2219 t->work.type = BINDER_WORK_TRANSACTION;
2220 list_add_tail(&t->work.entry, target_list);
2221 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2222 list_add_tail(&tcomplete->entry, &thread->todo);
2224 wake_up_interruptible(target_wait);
2227 err_translate_failed:
2228 err_bad_object_type:
2231 err_copy_data_failed:
2232 trace_binder_transaction_failed_buffer_release(t->buffer);
2233 binder_transaction_buffer_release(target_proc, t->buffer, offp);
2234 t->buffer->transaction = NULL;
2235 binder_free_buf(target_proc, t->buffer);
2236 err_binder_alloc_buf_failed:
2238 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2239 err_alloc_tcomplete_failed:
2241 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2244 err_empty_call_stack:
2246 err_invalid_target_handle:
2247 err_no_context_mgr_node:
2248 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2249 "%d:%d transaction failed %d, size %lld-%lld\n",
2250 proc->pid, thread->pid, return_error,
2251 (u64)tr->data_size, (u64)tr->offsets_size);
2254 struct binder_transaction_log_entry *fe;
2256 fe = binder_transaction_log_add(
2257 &context->transaction_log_failed);
2261 BUG_ON(thread->return_error != BR_OK);
2263 thread->return_error = BR_TRANSACTION_COMPLETE;
2264 binder_send_failed_reply(in_reply_to, return_error);
2266 thread->return_error = return_error;
2269 static int binder_thread_write(struct binder_proc *proc,
2270 struct binder_thread *thread,
2271 binder_uintptr_t binder_buffer, size_t size,
2272 binder_size_t *consumed)
2275 struct binder_context *context = proc->context;
2276 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2277 void __user *ptr = buffer + *consumed;
2278 void __user *end = buffer + size;
2280 while (ptr < end && thread->return_error == BR_OK) {
2281 if (get_user(cmd, (uint32_t __user *)ptr))
2283 ptr += sizeof(uint32_t);
2284 trace_binder_command(cmd);
2285 if (_IOC_NR(cmd) < ARRAY_SIZE(context->binder_stats.bc)) {
2286 context->binder_stats.bc[_IOC_NR(cmd)]++;
2287 proc->stats.bc[_IOC_NR(cmd)]++;
2288 thread->stats.bc[_IOC_NR(cmd)]++;
2296 struct binder_ref *ref;
2297 const char *debug_string;
2299 if (get_user(target, (uint32_t __user *)ptr))
2301 ptr += sizeof(uint32_t);
2302 if (target == 0 && context->binder_context_mgr_node &&
2303 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
2304 ref = binder_get_ref_for_node(proc,
2305 context->binder_context_mgr_node);
2306 if (ref->desc != target) {
2307 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
2308 proc->pid, thread->pid,
2312 ref = binder_get_ref(proc, target,
2313 cmd == BC_ACQUIRE ||
2316 binder_user_error("%d:%d refcount change on invalid ref %d\n",
2317 proc->pid, thread->pid, target);
2322 debug_string = "IncRefs";
2323 binder_inc_ref(ref, 0, NULL);
2326 debug_string = "Acquire";
2327 binder_inc_ref(ref, 1, NULL);
2330 debug_string = "Release";
2331 binder_dec_ref(ref, 1);
2335 debug_string = "DecRefs";
2336 binder_dec_ref(ref, 0);
2339 binder_debug(BINDER_DEBUG_USER_REFS,
2340 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
2341 proc->pid, thread->pid, debug_string, ref->debug_id,
2342 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
2345 case BC_INCREFS_DONE:
2346 case BC_ACQUIRE_DONE: {
2347 binder_uintptr_t node_ptr;
2348 binder_uintptr_t cookie;
2349 struct binder_node *node;
2351 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
2353 ptr += sizeof(binder_uintptr_t);
2354 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2356 ptr += sizeof(binder_uintptr_t);
2357 node = binder_get_node(proc, node_ptr);
2359 binder_user_error("%d:%d %s u%016llx no match\n",
2360 proc->pid, thread->pid,
2361 cmd == BC_INCREFS_DONE ?
2367 if (cookie != node->cookie) {
2368 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2369 proc->pid, thread->pid,
2370 cmd == BC_INCREFS_DONE ?
2371 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2372 (u64)node_ptr, node->debug_id,
2373 (u64)cookie, (u64)node->cookie);
2376 if (cmd == BC_ACQUIRE_DONE) {
2377 if (node->pending_strong_ref == 0) {
2378 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2379 proc->pid, thread->pid,
2383 node->pending_strong_ref = 0;
2385 if (node->pending_weak_ref == 0) {
2386 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2387 proc->pid, thread->pid,
2391 node->pending_weak_ref = 0;
2393 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2394 binder_debug(BINDER_DEBUG_USER_REFS,
2395 "%d:%d %s node %d ls %d lw %d\n",
2396 proc->pid, thread->pid,
2397 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2398 node->debug_id, node->local_strong_refs, node->local_weak_refs);
2401 case BC_ATTEMPT_ACQUIRE:
2402 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2404 case BC_ACQUIRE_RESULT:
2405 pr_err("BC_ACQUIRE_RESULT not supported\n");
2408 case BC_FREE_BUFFER: {
2409 binder_uintptr_t data_ptr;
2410 struct binder_buffer *buffer;
2412 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
2414 ptr += sizeof(binder_uintptr_t);
2416 buffer = binder_buffer_lookup(proc, data_ptr);
2417 if (buffer == NULL) {
2418 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2419 proc->pid, thread->pid, (u64)data_ptr);
2422 if (!buffer->allow_user_free) {
2423 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2424 proc->pid, thread->pid, (u64)data_ptr);
2427 binder_debug(BINDER_DEBUG_FREE_BUFFER,
2428 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2429 proc->pid, thread->pid, (u64)data_ptr,
2431 buffer->transaction ? "active" : "finished");
2433 if (buffer->transaction) {
2434 buffer->transaction->buffer = NULL;
2435 buffer->transaction = NULL;
2437 if (buffer->async_transaction && buffer->target_node) {
2438 BUG_ON(!buffer->target_node->has_async_transaction);
2439 if (list_empty(&buffer->target_node->async_todo))
2440 buffer->target_node->has_async_transaction = 0;
2442 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2444 trace_binder_transaction_buffer_release(buffer);
2445 binder_transaction_buffer_release(proc, buffer, NULL);
2446 binder_free_buf(proc, buffer);
2450 case BC_TRANSACTION_SG:
2452 struct binder_transaction_data_sg tr;
2454 if (copy_from_user(&tr, ptr, sizeof(tr)))
2457 binder_transaction(proc, thread, &tr.transaction_data,
2458 cmd == BC_REPLY_SG, tr.buffers_size);
2461 case BC_TRANSACTION:
2463 struct binder_transaction_data tr;
2465 if (copy_from_user(&tr, ptr, sizeof(tr)))
2468 binder_transaction(proc, thread, &tr,
2469 cmd == BC_REPLY, 0);
2473 case BC_REGISTER_LOOPER:
2474 binder_debug(BINDER_DEBUG_THREADS,
2475 "%d:%d BC_REGISTER_LOOPER\n",
2476 proc->pid, thread->pid);
2477 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2478 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2479 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2480 proc->pid, thread->pid);
2481 } else if (proc->requested_threads == 0) {
2482 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2483 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2484 proc->pid, thread->pid);
2486 proc->requested_threads--;
2487 proc->requested_threads_started++;
2489 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2491 case BC_ENTER_LOOPER:
2492 binder_debug(BINDER_DEBUG_THREADS,
2493 "%d:%d BC_ENTER_LOOPER\n",
2494 proc->pid, thread->pid);
2495 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2496 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2497 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2498 proc->pid, thread->pid);
2500 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2502 case BC_EXIT_LOOPER:
2503 binder_debug(BINDER_DEBUG_THREADS,
2504 "%d:%d BC_EXIT_LOOPER\n",
2505 proc->pid, thread->pid);
2506 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2509 case BC_REQUEST_DEATH_NOTIFICATION:
2510 case BC_CLEAR_DEATH_NOTIFICATION: {
2512 binder_uintptr_t cookie;
2513 struct binder_ref *ref;
2514 struct binder_ref_death *death;
2516 if (get_user(target, (uint32_t __user *)ptr))
2518 ptr += sizeof(uint32_t);
2519 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2521 ptr += sizeof(binder_uintptr_t);
2522 ref = binder_get_ref(proc, target, false);
2524 binder_user_error("%d:%d %s invalid ref %d\n",
2525 proc->pid, thread->pid,
2526 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2527 "BC_REQUEST_DEATH_NOTIFICATION" :
2528 "BC_CLEAR_DEATH_NOTIFICATION",
2533 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2534 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2535 proc->pid, thread->pid,
2536 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2537 "BC_REQUEST_DEATH_NOTIFICATION" :
2538 "BC_CLEAR_DEATH_NOTIFICATION",
2539 (u64)cookie, ref->debug_id, ref->desc,
2540 ref->strong, ref->weak, ref->node->debug_id);
2542 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2544 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2545 proc->pid, thread->pid);
2548 death = kzalloc(sizeof(*death), GFP_KERNEL);
2549 if (death == NULL) {
2550 thread->return_error = BR_ERROR;
2551 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2552 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2553 proc->pid, thread->pid);
2556 binder_stats_created(BINDER_STAT_DEATH);
2557 INIT_LIST_HEAD(&death->work.entry);
2558 death->cookie = cookie;
2560 if (ref->node->proc == NULL) {
2561 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2562 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2563 list_add_tail(&ref->death->work.entry, &thread->todo);
2565 list_add_tail(&ref->death->work.entry, &proc->todo);
2566 wake_up_interruptible(&proc->wait);
2570 if (ref->death == NULL) {
2571 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2572 proc->pid, thread->pid);
2576 if (death->cookie != cookie) {
2577 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2578 proc->pid, thread->pid,
2584 if (list_empty(&death->work.entry)) {
2585 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2586 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2587 list_add_tail(&death->work.entry, &thread->todo);
2589 list_add_tail(&death->work.entry, &proc->todo);
2590 wake_up_interruptible(&proc->wait);
2593 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2594 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2598 case BC_DEAD_BINDER_DONE: {
2599 struct binder_work *w;
2600 binder_uintptr_t cookie;
2601 struct binder_ref_death *death = NULL;
2603 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2606 ptr += sizeof(cookie);
2607 list_for_each_entry(w, &proc->delivered_death, entry) {
2608 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2610 if (tmp_death->cookie == cookie) {
2615 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2616 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2617 proc->pid, thread->pid, (u64)cookie,
2619 if (death == NULL) {
2620 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2621 proc->pid, thread->pid, (u64)cookie);
2625 list_del_init(&death->work.entry);
2626 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2627 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2628 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2629 list_add_tail(&death->work.entry, &thread->todo);
2631 list_add_tail(&death->work.entry, &proc->todo);
2632 wake_up_interruptible(&proc->wait);
2638 pr_err("%d:%d unknown command %d\n",
2639 proc->pid, thread->pid, cmd);
2642 *consumed = ptr - buffer;
2647 static void binder_stat_br(struct binder_proc *proc,
2648 struct binder_thread *thread, uint32_t cmd)
2650 trace_binder_return(cmd);
2651 if (_IOC_NR(cmd) < ARRAY_SIZE(proc->stats.br)) {
2652 proc->context->binder_stats.br[_IOC_NR(cmd)]++;
2653 proc->stats.br[_IOC_NR(cmd)]++;
2654 thread->stats.br[_IOC_NR(cmd)]++;
2658 static int binder_has_proc_work(struct binder_proc *proc,
2659 struct binder_thread *thread)
2661 return !list_empty(&proc->todo) ||
2662 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2665 static int binder_has_thread_work(struct binder_thread *thread)
2667 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2668 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2671 static int binder_thread_read(struct binder_proc *proc,
2672 struct binder_thread *thread,
2673 binder_uintptr_t binder_buffer, size_t size,
2674 binder_size_t *consumed, int non_block)
2676 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2677 void __user *ptr = buffer + *consumed;
2678 void __user *end = buffer + size;
2681 int wait_for_proc_work;
2683 if (*consumed == 0) {
2684 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2686 ptr += sizeof(uint32_t);
2690 wait_for_proc_work = thread->transaction_stack == NULL &&
2691 list_empty(&thread->todo);
2693 if (thread->return_error != BR_OK && ptr < end) {
2694 if (thread->return_error2 != BR_OK) {
2695 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2697 ptr += sizeof(uint32_t);
2698 binder_stat_br(proc, thread, thread->return_error2);
2701 thread->return_error2 = BR_OK;
2703 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2705 ptr += sizeof(uint32_t);
2706 binder_stat_br(proc, thread, thread->return_error);
2707 thread->return_error = BR_OK;
2712 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2713 if (wait_for_proc_work)
2714 proc->ready_threads++;
2716 binder_unlock(proc->context, __func__);
2718 trace_binder_wait_for_work(wait_for_proc_work,
2719 !!thread->transaction_stack,
2720 !list_empty(&thread->todo));
2721 if (wait_for_proc_work) {
2722 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2723 BINDER_LOOPER_STATE_ENTERED))) {
2724 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2725 proc->pid, thread->pid, thread->looper);
2726 wait_event_interruptible(binder_user_error_wait,
2727 binder_stop_on_user_error < 2);
2729 binder_set_nice(proc->default_priority);
2731 if (!binder_has_proc_work(proc, thread))
2734 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2737 if (!binder_has_thread_work(thread))
2740 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2743 binder_lock(proc->context, __func__);
2745 if (wait_for_proc_work)
2746 proc->ready_threads--;
2747 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2754 struct binder_transaction_data tr;
2755 struct binder_work *w;
2756 struct binder_transaction *t = NULL;
2758 if (!list_empty(&thread->todo)) {
2759 w = list_first_entry(&thread->todo, struct binder_work,
2761 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2762 w = list_first_entry(&proc->todo, struct binder_work,
2766 if (ptr - buffer == 4 &&
2767 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2772 if (end - ptr < sizeof(tr) + 4)
2776 case BINDER_WORK_TRANSACTION: {
2777 t = container_of(w, struct binder_transaction, work);
2779 case BINDER_WORK_TRANSACTION_COMPLETE: {
2780 cmd = BR_TRANSACTION_COMPLETE;
2781 if (put_user(cmd, (uint32_t __user *)ptr))
2783 ptr += sizeof(uint32_t);
2785 binder_stat_br(proc, thread, cmd);
2786 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2787 "%d:%d BR_TRANSACTION_COMPLETE\n",
2788 proc->pid, thread->pid);
2790 list_del(&w->entry);
2792 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2794 case BINDER_WORK_NODE: {
2795 struct binder_node *node = container_of(w, struct binder_node, work);
2796 uint32_t cmd = BR_NOOP;
2797 const char *cmd_name;
2798 int strong = node->internal_strong_refs || node->local_strong_refs;
2799 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2801 if (weak && !node->has_weak_ref) {
2803 cmd_name = "BR_INCREFS";
2804 node->has_weak_ref = 1;
2805 node->pending_weak_ref = 1;
2806 node->local_weak_refs++;
2807 } else if (strong && !node->has_strong_ref) {
2809 cmd_name = "BR_ACQUIRE";
2810 node->has_strong_ref = 1;
2811 node->pending_strong_ref = 1;
2812 node->local_strong_refs++;
2813 } else if (!strong && node->has_strong_ref) {
2815 cmd_name = "BR_RELEASE";
2816 node->has_strong_ref = 0;
2817 } else if (!weak && node->has_weak_ref) {
2819 cmd_name = "BR_DECREFS";
2820 node->has_weak_ref = 0;
2822 if (cmd != BR_NOOP) {
2823 if (put_user(cmd, (uint32_t __user *)ptr))
2825 ptr += sizeof(uint32_t);
2826 if (put_user(node->ptr,
2827 (binder_uintptr_t __user *)ptr))
2829 ptr += sizeof(binder_uintptr_t);
2830 if (put_user(node->cookie,
2831 (binder_uintptr_t __user *)ptr))
2833 ptr += sizeof(binder_uintptr_t);
2835 binder_stat_br(proc, thread, cmd);
2836 binder_debug(BINDER_DEBUG_USER_REFS,
2837 "%d:%d %s %d u%016llx c%016llx\n",
2838 proc->pid, thread->pid, cmd_name,
2840 (u64)node->ptr, (u64)node->cookie);
2842 list_del_init(&w->entry);
2843 if (!weak && !strong) {
2844 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2845 "%d:%d node %d u%016llx c%016llx deleted\n",
2846 proc->pid, thread->pid,
2850 rb_erase(&node->rb_node, &proc->nodes);
2852 binder_stats_deleted(BINDER_STAT_NODE);
2854 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2855 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2856 proc->pid, thread->pid,
2863 case BINDER_WORK_DEAD_BINDER:
2864 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2865 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2866 struct binder_ref_death *death;
2869 death = container_of(w, struct binder_ref_death, work);
2870 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2871 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2873 cmd = BR_DEAD_BINDER;
2874 if (put_user(cmd, (uint32_t __user *)ptr))
2876 ptr += sizeof(uint32_t);
2877 if (put_user(death->cookie,
2878 (binder_uintptr_t __user *)ptr))
2880 ptr += sizeof(binder_uintptr_t);
2881 binder_stat_br(proc, thread, cmd);
2882 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2883 "%d:%d %s %016llx\n",
2884 proc->pid, thread->pid,
2885 cmd == BR_DEAD_BINDER ?
2887 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2888 (u64)death->cookie);
2890 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2891 list_del(&w->entry);
2893 binder_stats_deleted(BINDER_STAT_DEATH);
2895 list_move(&w->entry, &proc->delivered_death);
2896 if (cmd == BR_DEAD_BINDER)
2897 goto done; /* DEAD_BINDER notifications can cause transactions */
2904 BUG_ON(t->buffer == NULL);
2905 if (t->buffer->target_node) {
2906 struct binder_node *target_node = t->buffer->target_node;
2908 tr.target.ptr = target_node->ptr;
2909 tr.cookie = target_node->cookie;
2910 t->saved_priority = task_nice(current);
2911 if (t->priority < target_node->min_priority &&
2912 !(t->flags & TF_ONE_WAY))
2913 binder_set_nice(t->priority);
2914 else if (!(t->flags & TF_ONE_WAY) ||
2915 t->saved_priority > target_node->min_priority)
2916 binder_set_nice(target_node->min_priority);
2917 cmd = BR_TRANSACTION;
2924 tr.flags = t->flags;
2925 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2928 struct task_struct *sender = t->from->proc->tsk;
2930 tr.sender_pid = task_tgid_nr_ns(sender,
2931 task_active_pid_ns(current));
2936 tr.data_size = t->buffer->data_size;
2937 tr.offsets_size = t->buffer->offsets_size;
2938 tr.data.ptr.buffer = (binder_uintptr_t)(
2939 (uintptr_t)t->buffer->data +
2940 proc->user_buffer_offset);
2941 tr.data.ptr.offsets = tr.data.ptr.buffer +
2942 ALIGN(t->buffer->data_size,
2945 if (put_user(cmd, (uint32_t __user *)ptr))
2947 ptr += sizeof(uint32_t);
2948 if (copy_to_user(ptr, &tr, sizeof(tr)))
2952 trace_binder_transaction_received(t);
2953 binder_stat_br(proc, thread, cmd);
2954 binder_debug(BINDER_DEBUG_TRANSACTION,
2955 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2956 proc->pid, thread->pid,
2957 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2959 t->debug_id, t->from ? t->from->proc->pid : 0,
2960 t->from ? t->from->pid : 0, cmd,
2961 t->buffer->data_size, t->buffer->offsets_size,
2962 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2964 list_del(&t->work.entry);
2965 t->buffer->allow_user_free = 1;
2966 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2967 t->to_parent = thread->transaction_stack;
2968 t->to_thread = thread;
2969 thread->transaction_stack = t;
2971 t->buffer->transaction = NULL;
2973 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2980 *consumed = ptr - buffer;
2981 if (proc->requested_threads + proc->ready_threads == 0 &&
2982 proc->requested_threads_started < proc->max_threads &&
2983 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2984 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2985 /*spawn a new thread if we leave this out */) {
2986 proc->requested_threads++;
2987 binder_debug(BINDER_DEBUG_THREADS,
2988 "%d:%d BR_SPAWN_LOOPER\n",
2989 proc->pid, thread->pid);
2990 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2992 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2997 static void binder_release_work(struct list_head *list)
2999 struct binder_work *w;
3001 while (!list_empty(list)) {
3002 w = list_first_entry(list, struct binder_work, entry);
3003 list_del_init(&w->entry);
3005 case BINDER_WORK_TRANSACTION: {
3006 struct binder_transaction *t;
3008 t = container_of(w, struct binder_transaction, work);
3009 if (t->buffer->target_node &&
3010 !(t->flags & TF_ONE_WAY)) {
3011 binder_send_failed_reply(t, BR_DEAD_REPLY);
3013 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3014 "undelivered transaction %d\n",
3016 t->buffer->transaction = NULL;
3018 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3021 case BINDER_WORK_TRANSACTION_COMPLETE: {
3022 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3023 "undelivered TRANSACTION_COMPLETE\n");
3025 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3027 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3028 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3029 struct binder_ref_death *death;
3031 death = container_of(w, struct binder_ref_death, work);
3032 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3033 "undelivered death notification, %016llx\n",
3034 (u64)death->cookie);
3036 binder_stats_deleted(BINDER_STAT_DEATH);
3039 pr_err("unexpected work type, %d, not freed\n",
3047 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3049 struct binder_thread *thread = NULL;
3050 struct rb_node *parent = NULL;
3051 struct rb_node **p = &proc->threads.rb_node;
3055 thread = rb_entry(parent, struct binder_thread, rb_node);
3057 if (current->pid < thread->pid)
3059 else if (current->pid > thread->pid)
3060 p = &(*p)->rb_right;
3065 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3068 binder_stats_created(BINDER_STAT_THREAD);
3069 thread->proc = proc;
3070 thread->pid = current->pid;
3071 init_waitqueue_head(&thread->wait);
3072 INIT_LIST_HEAD(&thread->todo);
3073 rb_link_node(&thread->rb_node, parent, p);
3074 rb_insert_color(&thread->rb_node, &proc->threads);
3075 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3076 thread->return_error = BR_OK;
3077 thread->return_error2 = BR_OK;
3082 static int binder_free_thread(struct binder_proc *proc,
3083 struct binder_thread *thread)
3085 struct binder_transaction *t;
3086 struct binder_transaction *send_reply = NULL;
3087 int active_transactions = 0;
3089 rb_erase(&thread->rb_node, &proc->threads);
3090 t = thread->transaction_stack;
3091 if (t && t->to_thread == thread)
3094 active_transactions++;
3095 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3096 "release %d:%d transaction %d %s, still active\n",
3097 proc->pid, thread->pid,
3099 (t->to_thread == thread) ? "in" : "out");
3101 if (t->to_thread == thread) {
3103 t->to_thread = NULL;
3105 t->buffer->transaction = NULL;
3109 } else if (t->from == thread) {
3116 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
3117 binder_release_work(&thread->todo);
3119 binder_stats_deleted(BINDER_STAT_THREAD);
3120 return active_transactions;
3123 static unsigned int binder_poll(struct file *filp,
3124 struct poll_table_struct *wait)
3126 struct binder_proc *proc = filp->private_data;
3127 struct binder_thread *thread = NULL;
3128 int wait_for_proc_work;
3130 binder_lock(proc->context, __func__);
3132 thread = binder_get_thread(proc);
3134 wait_for_proc_work = thread->transaction_stack == NULL &&
3135 list_empty(&thread->todo) && thread->return_error == BR_OK;
3137 binder_unlock(proc->context, __func__);
3139 if (wait_for_proc_work) {
3140 if (binder_has_proc_work(proc, thread))
3142 poll_wait(filp, &proc->wait, wait);
3143 if (binder_has_proc_work(proc, thread))
3146 if (binder_has_thread_work(thread))
3148 poll_wait(filp, &thread->wait, wait);
3149 if (binder_has_thread_work(thread))
3155 static int binder_ioctl_write_read(struct file *filp,
3156 unsigned int cmd, unsigned long arg,
3157 struct binder_thread *thread)
3160 struct binder_proc *proc = filp->private_data;
3161 unsigned int size = _IOC_SIZE(cmd);
3162 void __user *ubuf = (void __user *)arg;
3163 struct binder_write_read bwr;
3165 if (size != sizeof(struct binder_write_read)) {
3169 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3173 binder_debug(BINDER_DEBUG_READ_WRITE,
3174 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3175 proc->pid, thread->pid,
3176 (u64)bwr.write_size, (u64)bwr.write_buffer,
3177 (u64)bwr.read_size, (u64)bwr.read_buffer);
3179 if (bwr.write_size > 0) {
3180 ret = binder_thread_write(proc, thread,
3183 &bwr.write_consumed);
3184 trace_binder_write_done(ret);
3186 bwr.read_consumed = 0;
3187 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3192 if (bwr.read_size > 0) {
3193 ret = binder_thread_read(proc, thread, bwr.read_buffer,
3196 filp->f_flags & O_NONBLOCK);
3197 trace_binder_read_done(ret);
3198 if (!list_empty(&proc->todo))
3199 wake_up_interruptible(&proc->wait);
3201 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3206 binder_debug(BINDER_DEBUG_READ_WRITE,
3207 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3208 proc->pid, thread->pid,
3209 (u64)bwr.write_consumed, (u64)bwr.write_size,
3210 (u64)bwr.read_consumed, (u64)bwr.read_size);
3211 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3219 static int binder_ioctl_set_ctx_mgr(struct file *filp)
3222 struct binder_proc *proc = filp->private_data;
3223 struct binder_context *context = proc->context;
3225 kuid_t curr_euid = current_euid();
3227 if (context->binder_context_mgr_node) {
3228 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3232 ret = security_binder_set_context_mgr(proc->tsk);
3235 if (uid_valid(context->binder_context_mgr_uid)) {
3236 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
3237 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3238 from_kuid(&init_user_ns, curr_euid),
3239 from_kuid(&init_user_ns,
3240 context->binder_context_mgr_uid));
3245 context->binder_context_mgr_uid = curr_euid;
3247 context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
3248 if (!context->binder_context_mgr_node) {
3252 context->binder_context_mgr_node->local_weak_refs++;
3253 context->binder_context_mgr_node->local_strong_refs++;
3254 context->binder_context_mgr_node->has_strong_ref = 1;
3255 context->binder_context_mgr_node->has_weak_ref = 1;
3260 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3263 struct binder_proc *proc = filp->private_data;
3264 struct binder_context *context = proc->context;
3265 struct binder_thread *thread;
3266 unsigned int size = _IOC_SIZE(cmd);
3267 void __user *ubuf = (void __user *)arg;
3269 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
3270 proc->pid, current->pid, cmd, arg);*/
3272 trace_binder_ioctl(cmd, arg);
3274 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3278 binder_lock(context, __func__);
3279 thread = binder_get_thread(proc);
3280 if (thread == NULL) {
3286 case BINDER_WRITE_READ:
3287 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
3291 case BINDER_SET_MAX_THREADS:
3292 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
3297 case BINDER_SET_CONTEXT_MGR:
3298 ret = binder_ioctl_set_ctx_mgr(filp);
3302 case BINDER_THREAD_EXIT:
3303 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
3304 proc->pid, thread->pid);
3305 binder_free_thread(proc, thread);
3308 case BINDER_VERSION: {
3309 struct binder_version __user *ver = ubuf;
3311 if (size != sizeof(struct binder_version)) {
3315 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3316 &ver->protocol_version)) {
3329 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
3330 binder_unlock(context, __func__);
3331 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3332 if (ret && ret != -ERESTARTSYS)
3333 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
3335 trace_binder_ioctl_done(ret);
3339 static void binder_vma_open(struct vm_area_struct *vma)
3341 struct binder_proc *proc = vma->vm_private_data;
3343 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3344 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3345 proc->pid, vma->vm_start, vma->vm_end,
3346 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3347 (unsigned long)pgprot_val(vma->vm_page_prot));
3350 static void binder_vma_close(struct vm_area_struct *vma)
3352 struct binder_proc *proc = vma->vm_private_data;
3354 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3355 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3356 proc->pid, vma->vm_start, vma->vm_end,
3357 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3358 (unsigned long)pgprot_val(vma->vm_page_prot));
3360 proc->vma_vm_mm = NULL;
3361 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3364 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3366 return VM_FAULT_SIGBUS;
3369 static const struct vm_operations_struct binder_vm_ops = {
3370 .open = binder_vma_open,
3371 .close = binder_vma_close,
3372 .fault = binder_vm_fault,
3375 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3378 struct vm_struct *area;
3379 struct binder_proc *proc = filp->private_data;
3380 const char *failure_string;
3381 struct binder_buffer *buffer;
3383 if (proc->tsk != current->group_leader)
3386 if ((vma->vm_end - vma->vm_start) > SZ_4M)
3387 vma->vm_end = vma->vm_start + SZ_4M;
3389 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3390 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3391 proc->pid, vma->vm_start, vma->vm_end,
3392 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3393 (unsigned long)pgprot_val(vma->vm_page_prot));
3395 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3397 failure_string = "bad vm_flags";
3400 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3402 mutex_lock(&proc->context->binder_mmap_lock);
3405 failure_string = "already mapped";
3406 goto err_already_mapped;
3409 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
3412 failure_string = "get_vm_area";
3413 goto err_get_vm_area_failed;
3415 proc->buffer = area->addr;
3416 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
3417 mutex_unlock(&proc->context->binder_mmap_lock);
3419 #ifdef CONFIG_CPU_CACHE_VIPT
3420 if (cache_is_vipt_aliasing()) {
3421 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
3422 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
3423 vma->vm_start += PAGE_SIZE;
3427 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
3428 if (proc->pages == NULL) {
3430 failure_string = "alloc page array";
3431 goto err_alloc_pages_failed;
3433 proc->buffer_size = vma->vm_end - vma->vm_start;
3435 vma->vm_ops = &binder_vm_ops;
3436 vma->vm_private_data = proc;
3438 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
3440 failure_string = "alloc small buf";
3441 goto err_alloc_small_buf_failed;
3443 buffer = proc->buffer;
3444 INIT_LIST_HEAD(&proc->buffers);
3445 list_add(&buffer->entry, &proc->buffers);
3447 binder_insert_free_buffer(proc, buffer);
3448 proc->free_async_space = proc->buffer_size / 2;
3450 proc->files = get_files_struct(current);
3452 proc->vma_vm_mm = vma->vm_mm;
3454 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
3455 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
3458 err_alloc_small_buf_failed:
3461 err_alloc_pages_failed:
3462 mutex_lock(&proc->context->binder_mmap_lock);
3463 vfree(proc->buffer);
3464 proc->buffer = NULL;
3465 err_get_vm_area_failed:
3467 mutex_unlock(&proc->context->binder_mmap_lock);
3469 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3470 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3474 static int binder_open(struct inode *nodp, struct file *filp)
3476 struct binder_proc *proc;
3477 struct binder_device *binder_dev;
3479 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3480 current->group_leader->pid, current->pid);
3482 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3485 get_task_struct(current->group_leader);
3486 proc->tsk = current->group_leader;
3487 INIT_LIST_HEAD(&proc->todo);
3488 init_waitqueue_head(&proc->wait);
3489 proc->default_priority = task_nice(current);
3490 binder_dev = container_of(filp->private_data, struct binder_device,
3492 proc->context = &binder_dev->context;
3494 binder_lock(proc->context, __func__);
3496 binder_stats_created(BINDER_STAT_PROC);
3497 hlist_add_head(&proc->proc_node, &proc->context->binder_procs);
3498 proc->pid = current->group_leader->pid;
3499 INIT_LIST_HEAD(&proc->delivered_death);
3500 filp->private_data = proc;
3502 binder_unlock(proc->context, __func__);
3504 if (binder_debugfs_dir_entry_proc) {
3507 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3509 * proc debug entries are shared between contexts, so
3510 * this will fail if the process tries to open the driver
3511 * again with a different context. The priting code will
3512 * anyway print all contexts that a given PID has, so this
3515 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3516 binder_debugfs_dir_entry_proc,
3517 (void *)(unsigned long)proc->pid,
3524 static int binder_flush(struct file *filp, fl_owner_t id)
3526 struct binder_proc *proc = filp->private_data;
3528 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3533 static void binder_deferred_flush(struct binder_proc *proc)
3538 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3539 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3541 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3542 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3543 wake_up_interruptible(&thread->wait);
3547 wake_up_interruptible_all(&proc->wait);
3549 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3550 "binder_flush: %d woke %d threads\n", proc->pid,
3554 static int binder_release(struct inode *nodp, struct file *filp)
3556 struct binder_proc *proc = filp->private_data;
3558 debugfs_remove(proc->debugfs_entry);
3559 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3564 static int binder_node_release(struct binder_node *node, int refs)
3566 struct binder_ref *ref;
3567 struct binder_context *context = node->proc->context;
3570 list_del_init(&node->work.entry);
3571 binder_release_work(&node->async_todo);
3573 if (hlist_empty(&node->refs)) {
3575 binder_stats_deleted(BINDER_STAT_NODE);
3581 node->local_strong_refs = 0;
3582 node->local_weak_refs = 0;
3583 hlist_add_head(&node->dead_node, &context->binder_dead_nodes);
3585 hlist_for_each_entry(ref, &node->refs, node_entry) {
3593 if (list_empty(&ref->death->work.entry)) {
3594 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3595 list_add_tail(&ref->death->work.entry,
3597 wake_up_interruptible(&ref->proc->wait);
3602 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3603 "node %d now dead, refs %d, death %d\n",
3604 node->debug_id, refs, death);
3609 static void binder_deferred_release(struct binder_proc *proc)
3611 struct binder_transaction *t;
3612 struct binder_context *context = proc->context;
3614 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3615 active_transactions, page_count;
3618 BUG_ON(proc->files);
3620 hlist_del(&proc->proc_node);
3622 if (context->binder_context_mgr_node &&
3623 context->binder_context_mgr_node->proc == proc) {
3624 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3625 "%s: %d context_mgr_node gone\n",
3626 __func__, proc->pid);
3627 context->binder_context_mgr_node = NULL;
3631 active_transactions = 0;
3632 while ((n = rb_first(&proc->threads))) {
3633 struct binder_thread *thread;
3635 thread = rb_entry(n, struct binder_thread, rb_node);
3637 active_transactions += binder_free_thread(proc, thread);
3642 while ((n = rb_first(&proc->nodes))) {
3643 struct binder_node *node;
3645 node = rb_entry(n, struct binder_node, rb_node);
3647 rb_erase(&node->rb_node, &proc->nodes);
3648 incoming_refs = binder_node_release(node,
3653 while ((n = rb_first(&proc->refs_by_desc))) {
3654 struct binder_ref *ref;
3656 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3658 binder_delete_ref(ref);
3661 binder_release_work(&proc->todo);
3662 binder_release_work(&proc->delivered_death);
3665 while ((n = rb_first(&proc->allocated_buffers))) {
3666 struct binder_buffer *buffer;
3668 buffer = rb_entry(n, struct binder_buffer, rb_node);
3670 t = buffer->transaction;
3673 buffer->transaction = NULL;
3674 pr_err("release proc %d, transaction %d, not freed\n",
3675 proc->pid, t->debug_id);
3679 binder_free_buf(proc, buffer);
3683 binder_stats_deleted(BINDER_STAT_PROC);
3689 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3692 if (!proc->pages[i])
3695 page_addr = proc->buffer + i * PAGE_SIZE;
3696 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3697 "%s: %d: page %d at %p not freed\n",
3698 __func__, proc->pid, i, page_addr);
3699 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3700 __free_page(proc->pages[i]);
3704 vfree(proc->buffer);
3707 put_task_struct(proc->tsk);
3709 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3710 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3711 __func__, proc->pid, threads, nodes, incoming_refs,
3712 outgoing_refs, active_transactions, buffers, page_count);
3717 static void binder_deferred_func(struct work_struct *work)
3719 struct binder_proc *proc;
3720 struct files_struct *files;
3721 struct binder_context *context =
3722 container_of(work, struct binder_context, deferred_work);
3727 binder_lock(context, __func__);
3728 mutex_lock(&context->binder_deferred_lock);
3729 if (!hlist_empty(&context->binder_deferred_list)) {
3730 proc = hlist_entry(context->binder_deferred_list.first,
3731 struct binder_proc, deferred_work_node);
3732 hlist_del_init(&proc->deferred_work_node);
3733 defer = proc->deferred_work;
3734 proc->deferred_work = 0;
3739 mutex_unlock(&context->binder_deferred_lock);
3742 if (defer & BINDER_DEFERRED_PUT_FILES) {
3743 files = proc->files;
3748 if (defer & BINDER_DEFERRED_FLUSH)
3749 binder_deferred_flush(proc);
3751 if (defer & BINDER_DEFERRED_RELEASE)
3752 binder_deferred_release(proc); /* frees proc */
3754 binder_unlock(context, __func__);
3756 put_files_struct(files);
3761 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3763 mutex_lock(&proc->context->binder_deferred_lock);
3764 proc->deferred_work |= defer;
3765 if (hlist_unhashed(&proc->deferred_work_node)) {
3766 hlist_add_head(&proc->deferred_work_node,
3767 &proc->context->binder_deferred_list);
3768 queue_work(proc->context->binder_deferred_workqueue,
3769 &proc->context->deferred_work);
3771 mutex_unlock(&proc->context->binder_deferred_lock);
3774 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3775 struct binder_transaction *t)
3778 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3779 prefix, t->debug_id, t,
3780 t->from ? t->from->proc->pid : 0,
3781 t->from ? t->from->pid : 0,
3782 t->to_proc ? t->to_proc->pid : 0,
3783 t->to_thread ? t->to_thread->pid : 0,
3784 t->code, t->flags, t->priority, t->need_reply);
3785 if (t->buffer == NULL) {
3786 seq_puts(m, " buffer free\n");
3789 if (t->buffer->target_node)
3790 seq_printf(m, " node %d",
3791 t->buffer->target_node->debug_id);
3792 seq_printf(m, " size %zd:%zd data %p\n",
3793 t->buffer->data_size, t->buffer->offsets_size,
3797 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3798 struct binder_buffer *buffer)
3800 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3801 prefix, buffer->debug_id, buffer->data,
3802 buffer->data_size, buffer->offsets_size,
3803 buffer->transaction ? "active" : "delivered");
3806 static void print_binder_work(struct seq_file *m, const char *prefix,
3807 const char *transaction_prefix,
3808 struct binder_work *w)
3810 struct binder_node *node;
3811 struct binder_transaction *t;
3814 case BINDER_WORK_TRANSACTION:
3815 t = container_of(w, struct binder_transaction, work);
3816 print_binder_transaction(m, transaction_prefix, t);
3818 case BINDER_WORK_TRANSACTION_COMPLETE:
3819 seq_printf(m, "%stransaction complete\n", prefix);
3821 case BINDER_WORK_NODE:
3822 node = container_of(w, struct binder_node, work);
3823 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3824 prefix, node->debug_id,
3825 (u64)node->ptr, (u64)node->cookie);
3827 case BINDER_WORK_DEAD_BINDER:
3828 seq_printf(m, "%shas dead binder\n", prefix);
3830 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3831 seq_printf(m, "%shas cleared dead binder\n", prefix);
3833 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3834 seq_printf(m, "%shas cleared death notification\n", prefix);
3837 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3842 static void print_binder_thread(struct seq_file *m,
3843 struct binder_thread *thread,
3846 struct binder_transaction *t;
3847 struct binder_work *w;
3848 size_t start_pos = m->count;
3851 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3852 header_pos = m->count;
3853 t = thread->transaction_stack;
3855 if (t->from == thread) {
3856 print_binder_transaction(m,
3857 " outgoing transaction", t);
3859 } else if (t->to_thread == thread) {
3860 print_binder_transaction(m,
3861 " incoming transaction", t);
3864 print_binder_transaction(m, " bad transaction", t);
3868 list_for_each_entry(w, &thread->todo, entry) {
3869 print_binder_work(m, " ", " pending transaction", w);
3871 if (!print_always && m->count == header_pos)
3872 m->count = start_pos;
3875 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3877 struct binder_ref *ref;
3878 struct binder_work *w;
3882 hlist_for_each_entry(ref, &node->refs, node_entry)
3885 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3886 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3887 node->has_strong_ref, node->has_weak_ref,
3888 node->local_strong_refs, node->local_weak_refs,
3889 node->internal_strong_refs, count);
3891 seq_puts(m, " proc");
3892 hlist_for_each_entry(ref, &node->refs, node_entry)
3893 seq_printf(m, " %d", ref->proc->pid);
3896 list_for_each_entry(w, &node->async_todo, entry)
3897 print_binder_work(m, " ",
3898 " pending async transaction", w);
3901 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3903 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3904 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3905 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3908 static void print_binder_proc(struct seq_file *m,
3909 struct binder_proc *proc, int print_all)
3911 struct binder_work *w;
3913 size_t start_pos = m->count;
3916 seq_printf(m, "proc %d\n", proc->pid);
3917 seq_printf(m, "context %s\n", proc->context->name);
3918 header_pos = m->count;
3920 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3921 print_binder_thread(m, rb_entry(n, struct binder_thread,
3922 rb_node), print_all);
3923 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3924 struct binder_node *node = rb_entry(n, struct binder_node,
3926 if (print_all || node->has_async_transaction)
3927 print_binder_node(m, node);
3930 for (n = rb_first(&proc->refs_by_desc);
3933 print_binder_ref(m, rb_entry(n, struct binder_ref,
3936 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3937 print_binder_buffer(m, " buffer",
3938 rb_entry(n, struct binder_buffer, rb_node));
3939 list_for_each_entry(w, &proc->todo, entry)
3940 print_binder_work(m, " ", " pending transaction", w);
3941 list_for_each_entry(w, &proc->delivered_death, entry) {
3942 seq_puts(m, " has delivered dead binder\n");
3945 if (!print_all && m->count == header_pos)
3946 m->count = start_pos;
3949 static const char * const binder_return_strings[] = {
3954 "BR_ACQUIRE_RESULT",
3956 "BR_TRANSACTION_COMPLETE",
3961 "BR_ATTEMPT_ACQUIRE",
3966 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3970 static const char * const binder_command_strings[] = {
3973 "BC_ACQUIRE_RESULT",
3981 "BC_ATTEMPT_ACQUIRE",
3982 "BC_REGISTER_LOOPER",
3985 "BC_REQUEST_DEATH_NOTIFICATION",
3986 "BC_CLEAR_DEATH_NOTIFICATION",
3987 "BC_DEAD_BINDER_DONE",
3988 "BC_TRANSACTION_SG",
3992 static const char * const binder_objstat_strings[] = {
3999 "transaction_complete"
4002 static void add_binder_stats(struct binder_stats *from, struct binder_stats *to)
4006 for (i = 0; i < ARRAY_SIZE(to->bc); i++)
4007 to->bc[i] += from->bc[i];
4009 for (i = 0; i < ARRAY_SIZE(to->br); i++)
4010 to->br[i] += from->br[i];
4013 static void print_binder_stats(struct seq_file *m, const char *prefix,
4014 struct binder_stats *stats,
4015 struct binder_obj_stats *obj_stats)
4019 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
4020 ARRAY_SIZE(binder_command_strings));
4021 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
4023 seq_printf(m, "%s%s: %d\n", prefix,
4024 binder_command_strings[i], stats->bc[i]);
4027 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
4028 ARRAY_SIZE(binder_return_strings));
4029 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
4031 seq_printf(m, "%s%s: %d\n", prefix,
4032 binder_return_strings[i], stats->br[i]);
4038 BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) !=
4039 ARRAY_SIZE(binder_objstat_strings));
4040 BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) !=
4041 ARRAY_SIZE(obj_stats->obj_deleted));
4042 for (i = 0; i < ARRAY_SIZE(obj_stats->obj_created); i++) {
4043 int obj_created = atomic_read(&obj_stats->obj_created[i]);
4044 int obj_deleted = atomic_read(&obj_stats->obj_deleted[i]);
4046 if (obj_created || obj_deleted)
4047 seq_printf(m, "%s%s: active %d total %d\n", prefix,
4048 binder_objstat_strings[i],
4049 obj_created - obj_deleted, obj_created);
4053 static void print_binder_proc_stats(struct seq_file *m,
4054 struct binder_proc *proc)
4056 struct binder_work *w;
4058 int count, strong, weak;
4060 seq_printf(m, "proc %d\n", proc->pid);
4061 seq_printf(m, "context %s\n", proc->context->name);
4063 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4065 seq_printf(m, " threads: %d\n", count);
4066 seq_printf(m, " requested threads: %d+%d/%d\n"
4067 " ready threads %d\n"
4068 " free async space %zd\n", proc->requested_threads,
4069 proc->requested_threads_started, proc->max_threads,
4070 proc->ready_threads, proc->free_async_space);
4072 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4074 seq_printf(m, " nodes: %d\n", count);
4078 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4079 struct binder_ref *ref = rb_entry(n, struct binder_ref,
4082 strong += ref->strong;
4085 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
4088 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
4090 seq_printf(m, " buffers: %d\n", count);
4093 list_for_each_entry(w, &proc->todo, entry) {
4095 case BINDER_WORK_TRANSACTION:
4102 seq_printf(m, " pending transactions: %d\n", count);
4104 print_binder_stats(m, " ", &proc->stats, NULL);
4108 static int binder_state_show(struct seq_file *m, void *unused)
4110 struct binder_device *device;
4111 struct binder_context *context;
4112 struct binder_proc *proc;
4113 struct binder_node *node;
4114 int do_lock = !binder_debug_no_lock;
4115 bool wrote_dead_nodes_header = false;
4117 seq_puts(m, "binder state:\n");
4119 hlist_for_each_entry(device, &binder_devices, hlist) {
4120 context = &device->context;
4122 binder_lock(context, __func__);
4123 if (!wrote_dead_nodes_header &&
4124 !hlist_empty(&context->binder_dead_nodes)) {
4125 seq_puts(m, "dead nodes:\n");
4126 wrote_dead_nodes_header = true;
4128 hlist_for_each_entry(node, &context->binder_dead_nodes,
4130 print_binder_node(m, node);
4133 binder_unlock(context, __func__);
4136 hlist_for_each_entry(device, &binder_devices, hlist) {
4137 context = &device->context;
4139 binder_lock(context, __func__);
4141 hlist_for_each_entry(proc, &context->binder_procs, proc_node)
4142 print_binder_proc(m, proc, 1);
4144 binder_unlock(context, __func__);
4149 static int binder_stats_show(struct seq_file *m, void *unused)
4151 struct binder_device *device;
4152 struct binder_context *context;
4153 struct binder_proc *proc;
4154 struct binder_stats total_binder_stats;
4155 int do_lock = !binder_debug_no_lock;
4157 memset(&total_binder_stats, 0, sizeof(struct binder_stats));
4159 hlist_for_each_entry(device, &binder_devices, hlist) {
4160 context = &device->context;
4162 binder_lock(context, __func__);
4164 add_binder_stats(&context->binder_stats, &total_binder_stats);
4167 binder_unlock(context, __func__);
4170 seq_puts(m, "binder stats:\n");
4171 print_binder_stats(m, "", &total_binder_stats, &binder_obj_stats);
4173 hlist_for_each_entry(device, &binder_devices, hlist) {
4174 context = &device->context;
4176 binder_lock(context, __func__);
4178 hlist_for_each_entry(proc, &context->binder_procs, proc_node)
4179 print_binder_proc_stats(m, proc);
4181 binder_unlock(context, __func__);
4186 static int binder_transactions_show(struct seq_file *m, void *unused)
4188 struct binder_device *device;
4189 struct binder_context *context;
4190 struct binder_proc *proc;
4191 int do_lock = !binder_debug_no_lock;
4193 seq_puts(m, "binder transactions:\n");
4194 hlist_for_each_entry(device, &binder_devices, hlist) {
4195 context = &device->context;
4197 binder_lock(context, __func__);
4199 hlist_for_each_entry(proc, &context->binder_procs, proc_node)
4200 print_binder_proc(m, proc, 0);
4202 binder_unlock(context, __func__);
4207 static int binder_proc_show(struct seq_file *m, void *unused)
4209 struct binder_device *device;
4210 struct binder_context *context;
4211 struct binder_proc *itr;
4212 int pid = (unsigned long)m->private;
4213 int do_lock = !binder_debug_no_lock;
4215 hlist_for_each_entry(device, &binder_devices, hlist) {
4216 context = &device->context;
4218 binder_lock(context, __func__);
4220 hlist_for_each_entry(itr, &context->binder_procs, proc_node) {
4221 if (itr->pid == pid) {
4222 seq_puts(m, "binder proc state:\n");
4223 print_binder_proc(m, itr, 1);
4227 binder_unlock(context, __func__);
4232 static void print_binder_transaction_log_entry(struct seq_file *m,
4233 struct binder_transaction_log_entry *e)
4236 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
4237 e->debug_id, (e->call_type == 2) ? "reply" :
4238 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
4239 e->from_thread, e->to_proc, e->to_thread, e->context_name,
4240 e->to_node, e->target_handle, e->data_size, e->offsets_size);
4243 static int print_binder_transaction_log(struct seq_file *m,
4244 struct binder_transaction_log *log)
4248 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
4249 print_binder_transaction_log_entry(m, &log->entry[i]);
4251 for (i = 0; i < log->next; i++)
4252 print_binder_transaction_log_entry(m, &log->entry[i]);
4256 static int binder_transaction_log_show(struct seq_file *m, void *unused)
4258 struct binder_device *device;
4259 struct binder_context *context;
4261 hlist_for_each_entry(device, &binder_devices, hlist) {
4262 context = &device->context;
4263 print_binder_transaction_log(m, &context->transaction_log);
4268 static int binder_failed_transaction_log_show(struct seq_file *m, void *unused)
4270 struct binder_device *device;
4271 struct binder_context *context;
4273 hlist_for_each_entry(device, &binder_devices, hlist) {
4274 context = &device->context;
4275 print_binder_transaction_log(m,
4276 &context->transaction_log_failed);
4281 static const struct file_operations binder_fops = {
4282 .owner = THIS_MODULE,
4283 .poll = binder_poll,
4284 .unlocked_ioctl = binder_ioctl,
4285 .compat_ioctl = binder_ioctl,
4286 .mmap = binder_mmap,
4287 .open = binder_open,
4288 .flush = binder_flush,
4289 .release = binder_release,
4292 BINDER_DEBUG_ENTRY(state);
4293 BINDER_DEBUG_ENTRY(stats);
4294 BINDER_DEBUG_ENTRY(transactions);
4295 BINDER_DEBUG_ENTRY(transaction_log);
4296 BINDER_DEBUG_ENTRY(failed_transaction_log);
4298 static void __init free_binder_device(struct binder_device *device)
4300 if (device->context.binder_deferred_workqueue)
4301 destroy_workqueue(device->context.binder_deferred_workqueue);
4305 static int __init init_binder_device(const char *name)
4308 struct binder_device *binder_device;
4309 struct binder_context *context;
4311 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4315 binder_device->miscdev.fops = &binder_fops;
4316 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4317 binder_device->miscdev.name = name;
4319 context = &binder_device->context;
4320 context->binder_context_mgr_uid = INVALID_UID;
4321 context->name = name;
4323 mutex_init(&context->binder_main_lock);
4324 mutex_init(&context->binder_deferred_lock);
4325 mutex_init(&context->binder_mmap_lock);
4327 context->binder_deferred_workqueue =
4328 create_singlethread_workqueue(name);
4330 if (!context->binder_deferred_workqueue) {
4332 goto err_create_singlethread_workqueue_failed;
4335 INIT_HLIST_HEAD(&context->binder_procs);
4336 INIT_HLIST_HEAD(&context->binder_dead_nodes);
4337 INIT_HLIST_HEAD(&context->binder_deferred_list);
4338 INIT_WORK(&context->deferred_work, binder_deferred_func);
4340 ret = misc_register(&binder_device->miscdev);
4342 goto err_misc_register_failed;
4345 hlist_add_head(&binder_device->hlist, &binder_devices);
4348 err_create_singlethread_workqueue_failed:
4349 err_misc_register_failed:
4350 free_binder_device(binder_device);
4355 static int __init binder_init(void)
4358 char *device_name, *device_names;
4359 struct binder_device *device;
4360 struct hlist_node *tmp;
4363 * Copy the module_parameter string, because we don't want to
4364 * tokenize it in-place.
4366 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4370 strcpy(device_names, binder_devices_param);
4372 while ((device_name = strsep(&device_names, ","))) {
4373 ret = init_binder_device(device_name);
4375 goto err_init_binder_device_failed;
4378 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4379 if (binder_debugfs_dir_entry_root)
4380 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4381 binder_debugfs_dir_entry_root);
4383 if (binder_debugfs_dir_entry_root) {
4384 debugfs_create_file("state",
4386 binder_debugfs_dir_entry_root,
4388 &binder_state_fops);
4389 debugfs_create_file("stats",
4391 binder_debugfs_dir_entry_root,
4393 &binder_stats_fops);
4394 debugfs_create_file("transactions",
4396 binder_debugfs_dir_entry_root,
4398 &binder_transactions_fops);
4399 debugfs_create_file("transaction_log",
4401 binder_debugfs_dir_entry_root,
4403 &binder_transaction_log_fops);
4404 debugfs_create_file("failed_transaction_log",
4406 binder_debugfs_dir_entry_root,
4408 &binder_failed_transaction_log_fops);
4413 err_init_binder_device_failed:
4414 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4415 misc_deregister(&device->miscdev);
4416 hlist_del(&device->hlist);
4417 free_binder_device(device);
4423 device_initcall(binder_init);
4425 #define CREATE_TRACE_POINTS
4426 #include "binder_trace.h"
4428 MODULE_LICENSE("GPL v2");