binder.c 178.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8
/* binder.c
 *
 * Android IPC Subsystem
 *
 * Copyright (C) 2007-2008 Google, Inc.
 */

9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Locking overview
 *
 * There are 3 main spinlocks which must be acquired in the
 * order shown:
 *
 * 1) proc->outer_lock : protects binder_ref
 *    binder_proc_lock() and binder_proc_unlock() are
 *    used to acq/rel.
 * 2) node->lock : protects most fields of binder_node.
 *    binder_node_lock() and binder_node_unlock() are
 *    used to acq/rel
 * 3) proc->inner_lock : protects the thread and node lists
22 23 24 25
 *    (proc->threads, proc->waiting_threads, proc->nodes)
 *    and all todo lists associated with the binder_proc
 *    (proc->todo, thread->todo, proc->delivered_death and
 *    node->async_todo), as well as thread->transaction_stack
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
 *    binder_inner_proc_lock() and binder_inner_proc_unlock()
 *    are used to acq/rel
 *
 * Any lock under procA must never be nested under any lock at the same
 * level or below on procB.
 *
 * Functions that require a lock held on entry indicate which lock
 * in the suffix of the function name:
 *
 * foo_olocked() : requires node->outer_lock
 * foo_nlocked() : requires node->lock
 * foo_ilocked() : requires proc->inner_lock
 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
 * foo_nilocked(): requires node->lock and proc->inner_lock
 * ...
 */

43 44
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

45 46
#include <linux/fdtable.h>
#include <linux/file.h>
47
#include <linux/freezer.h>
48 49 50 51 52 53 54
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
#include <linux/poll.h>
55
#include <linux/debugfs.h>
56
#include <linux/rbtree.h>
57
#include <linux/sched/signal.h>
58
#include <linux/sched/mm.h>
59
#include <linux/seq_file.h>
60
#include <linux/string.h>
61
#include <linux/uaccess.h>
62
#include <linux/pid_namespace.h>
63
#include <linux/security.h>
64
#include <linux/spinlock.h>
65
#include <linux/ratelimit.h>
66
#include <linux/syscalls.h>
67
#include <linux/task_work.h>
J
Jann Horn 已提交
68
#include <linux/sizes.h>
69

70
#include <uapi/linux/android/binder.h>
71

72
#include <linux/cacheflush.h>
73

C
Christian Brauner 已提交
74
#include "binder_internal.h"
75
#include "binder_trace.h"
76

77
static HLIST_HEAD(binder_deferred_list);
78 79
static DEFINE_MUTEX(binder_deferred_lock);

80
static HLIST_HEAD(binder_devices);
81
static HLIST_HEAD(binder_procs);
82 83
static DEFINE_MUTEX(binder_procs_lock);

84
static HLIST_HEAD(binder_dead_nodes);
85
static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86

87 88
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
89
static atomic_t binder_last_id;
90

Y
Yangtao Li 已提交
91 92
static int proc_show(struct seq_file *m, void *unused);
DEFINE_SHOW_ATTRIBUTE(proc);
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)

enum {
	BINDER_DEBUG_USER_ERROR             = 1U << 0,
	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
	BINDER_DEBUG_READ_WRITE             = 1U << 6,
	BINDER_DEBUG_USER_REFS              = 1U << 7,
	BINDER_DEBUG_THREADS                = 1U << 8,
	BINDER_DEBUG_TRANSACTION            = 1U << 9,
	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
110
	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
111
	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
112 113 114
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116

117
char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 119
module_param_named(devices, binder_devices_param, charp, 0444);

120 121 122 123
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;

static int binder_set_stop_on_user_error(const char *val,
124
					 const struct kernel_param *kp)
125 126
{
	int ret;
127

128 129 130 131 132 133
	ret = param_set_int(val, kp);
	if (binder_stop_on_user_error < 2)
		wake_up(&binder_user_error_wait);
	return ret;
}
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134
	param_get_int, &binder_stop_on_user_error, 0644);
135 136 137 138

#define binder_debug(mask, x...) \
	do { \
		if (binder_debug_mask & mask) \
139
			pr_info_ratelimited(x); \
140 141 142 143 144
	} while (0)

#define binder_user_error(x...) \
	do { \
		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145
			pr_info_ratelimited(x); \
146 147 148 149
		if (binder_stop_on_user_error) \
			binder_stop_on_user_error = 2; \
	} while (0)

150 151 152 153 154
#define to_flat_binder_object(hdr) \
	container_of(hdr, struct flat_binder_object, hdr)

#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)

155 156 157
#define to_binder_buffer_object(hdr) \
	container_of(hdr, struct binder_buffer_object, hdr)

158 159 160
#define to_binder_fd_array_object(hdr) \
	container_of(hdr, struct binder_fd_array_object, hdr)

161 162 163 164
static struct binder_stats binder_stats;

static inline void binder_stats_deleted(enum binder_stat_types type)
{
165
	atomic_inc(&binder_stats.obj_deleted[type]);
166 167 168 169
}

static inline void binder_stats_created(enum binder_stat_types type)
{
170
	atomic_inc(&binder_stats.obj_created[type]);
171 172
}

173 174
struct binder_transaction_log binder_transaction_log;
struct binder_transaction_log binder_transaction_log_failed;
175 176 177 178 179

static struct binder_transaction_log_entry *binder_transaction_log_add(
	struct binder_transaction_log *log)
{
	struct binder_transaction_log_entry *e;
180
	unsigned int cur = atomic_inc_return(&log->cur);
181

182
	if (cur >= ARRAY_SIZE(log->entry))
183
		log->full = true;
184 185 186 187 188 189 190 191 192
	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
	WRITE_ONCE(e->debug_id_done, 0);
	/*
	 * write-barrier to synchronize access to e->debug_id_done.
	 * We make sure the initialized 0 value is seen before
	 * memset() other fields are zeroed by memset.
	 */
	smp_wmb();
	memset(e, 0, sizeof(*e));
193 194 195 196
	return e;
}

enum binder_deferred_state {
197 198
	BINDER_DEFERRED_FLUSH        = 0x01,
	BINDER_DEFERRED_RELEASE      = 0x02,
199 200 201 202 203 204 205 206
};

enum {
	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
	BINDER_LOOPER_STATE_ENTERED     = 0x02,
	BINDER_LOOPER_STATE_EXITED      = 0x04,
	BINDER_LOOPER_STATE_INVALID     = 0x08,
	BINDER_LOOPER_STATE_WAITING     = 0x10,
207
	BINDER_LOOPER_STATE_POLL        = 0x20,
208 209
};

210 211 212 213 214 215 216 217 218 219
/**
 * binder_proc_lock() - Acquire outer lock for given binder_proc
 * @proc:         struct binder_proc to acquire
 *
 * Acquires proc->outer_lock. Used to protect binder_ref
 * structures associated with the given proc.
 */
#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
static void
_binder_proc_lock(struct binder_proc *proc, int line)
220
	__acquires(&proc->outer_lock)
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
{
	binder_debug(BINDER_DEBUG_SPINLOCKS,
		     "%s: line=%d\n", __func__, line);
	spin_lock(&proc->outer_lock);
}

/**
 * binder_proc_unlock() - Release spinlock for given binder_proc
 * @proc:         struct binder_proc to acquire
 *
 * Release lock acquired via binder_proc_lock()
 */
#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
static void
_binder_proc_unlock(struct binder_proc *proc, int line)
236
	__releases(&proc->outer_lock)
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
{
	binder_debug(BINDER_DEBUG_SPINLOCKS,
		     "%s: line=%d\n", __func__, line);
	spin_unlock(&proc->outer_lock);
}

/**
 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
 * @proc:         struct binder_proc to acquire
 *
 * Acquires proc->inner_lock. Used to protect todo lists
 */
#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
static void
_binder_inner_proc_lock(struct binder_proc *proc, int line)
252
	__acquires(&proc->inner_lock)
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
{
	binder_debug(BINDER_DEBUG_SPINLOCKS,
		     "%s: line=%d\n", __func__, line);
	spin_lock(&proc->inner_lock);
}

/**
 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
 * @proc:         struct binder_proc to acquire
 *
 * Release lock acquired via binder_inner_proc_lock()
 */
#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
static void
_binder_inner_proc_unlock(struct binder_proc *proc, int line)
268
	__releases(&proc->inner_lock)
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
{
	binder_debug(BINDER_DEBUG_SPINLOCKS,
		     "%s: line=%d\n", __func__, line);
	spin_unlock(&proc->inner_lock);
}

/**
 * binder_node_lock() - Acquire spinlock for given binder_node
 * @node:         struct binder_node to acquire
 *
 * Acquires node->lock. Used to protect binder_node fields
 */
#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
static void
_binder_node_lock(struct binder_node *node, int line)
284
	__acquires(&node->lock)
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
{
	binder_debug(BINDER_DEBUG_SPINLOCKS,
		     "%s: line=%d\n", __func__, line);
	spin_lock(&node->lock);
}

/**
 * binder_node_unlock() - Release spinlock for given binder_proc
 * @node:         struct binder_node to acquire
 *
 * Release lock acquired via binder_node_lock()
 */
#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
static void
_binder_node_unlock(struct binder_node *node, int line)
300
	__releases(&node->lock)
301 302 303 304 305 306
{
	binder_debug(BINDER_DEBUG_SPINLOCKS,
		     "%s: line=%d\n", __func__, line);
	spin_unlock(&node->lock);
}

307 308 309 310 311 312 313 314 315 316
/**
 * binder_node_inner_lock() - Acquire node and inner locks
 * @node:         struct binder_node to acquire
 *
 * Acquires node->lock. If node->proc also acquires
 * proc->inner_lock. Used to protect binder_node fields
 */
#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
static void
_binder_node_inner_lock(struct binder_node *node, int line)
317
	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
318 319 320 321 322 323
{
	binder_debug(BINDER_DEBUG_SPINLOCKS,
		     "%s: line=%d\n", __func__, line);
	spin_lock(&node->lock);
	if (node->proc)
		binder_inner_proc_lock(node->proc);
324 325 326
	else
		/* annotation for sparse */
		__acquire(&node->proc->inner_lock);
327 328 329 330 331 332 333 334 335 336 337
}

/**
 * binder_node_unlock() - Release node and inner locks
 * @node:         struct binder_node to acquire
 *
 * Release lock acquired via binder_node_lock()
 */
#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
static void
_binder_node_inner_unlock(struct binder_node *node, int line)
338
	__releases(&node->lock) __releases(&node->proc->inner_lock)
339 340 341 342 343 344 345
{
	struct binder_proc *proc = node->proc;

	binder_debug(BINDER_DEBUG_SPINLOCKS,
		     "%s: line=%d\n", __func__, line);
	if (proc)
		binder_inner_proc_unlock(proc);
346 347 348
	else
		/* annotation for sparse */
		__release(&node->proc->inner_lock);
349 350 351
	spin_unlock(&node->lock);
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
static bool binder_worklist_empty_ilocked(struct list_head *list)
{
	return list_empty(list);
}

/**
 * binder_worklist_empty() - Check if no items on the work list
 * @proc:       binder_proc associated with list
 * @list:	list to check
 *
 * Return: true if there are no items on list, else false
 */
static bool binder_worklist_empty(struct binder_proc *proc,
				  struct list_head *list)
{
	bool ret;

	binder_inner_proc_lock(proc);
	ret = binder_worklist_empty_ilocked(list);
	binder_inner_proc_unlock(proc);
	return ret;
}

375 376 377 378 379 380 381 382 383 384
/**
 * binder_enqueue_work_ilocked() - Add an item to the work list
 * @work:         struct binder_work to add to list
 * @target_list:  list to add work to
 *
 * Adds the work to the specified list. Asserts that work
 * is not already on a list.
 *
 * Requires the proc->inner_lock to be held.
 */
385 386 387 388 389 390 391 392 393 394
static void
binder_enqueue_work_ilocked(struct binder_work *work,
			   struct list_head *target_list)
{
	BUG_ON(target_list == NULL);
	BUG_ON(work->entry.next && !list_empty(&work->entry));
	list_add_tail(&work->entry, target_list);
}

/**
395 396
 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
 * @thread:       thread to queue work to
397 398
 * @work:         struct binder_work to add to list
 *
399 400 401 402 403
 * Adds the work to the todo list of the thread. Doesn't set the process_todo
 * flag, which means that (if it wasn't already set) the thread will go to
 * sleep without handling this work when it calls read.
 *
 * Requires the proc->inner_lock to be held.
404 405
 */
static void
406 407
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
					    struct binder_work *work)
408
{
409
	WARN_ON(!list_empty(&thread->waiting_thread_node));
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
	binder_enqueue_work_ilocked(work, &thread->todo);
}

/**
 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
 * @thread:       thread to queue work to
 * @work:         struct binder_work to add to list
 *
 * Adds the work to the todo list of the thread, and enables processing
 * of the todo queue.
 *
 * Requires the proc->inner_lock to be held.
 */
static void
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
				   struct binder_work *work)
{
427
	WARN_ON(!list_empty(&thread->waiting_thread_node));
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	binder_enqueue_work_ilocked(work, &thread->todo);
	thread->process_todo = true;
}

/**
 * binder_enqueue_thread_work() - Add an item to the thread work list
 * @thread:       thread to queue work to
 * @work:         struct binder_work to add to list
 *
 * Adds the work to the todo list of the thread, and enables processing
 * of the todo queue.
 */
static void
binder_enqueue_thread_work(struct binder_thread *thread,
			   struct binder_work *work)
{
	binder_inner_proc_lock(thread->proc);
	binder_enqueue_thread_work_ilocked(thread, work);
	binder_inner_proc_unlock(thread->proc);
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
}

static void
binder_dequeue_work_ilocked(struct binder_work *work)
{
	list_del_init(&work->entry);
}

/**
 * binder_dequeue_work() - Removes an item from the work list
 * @proc:         binder_proc associated with list
 * @work:         struct binder_work to remove from list
 *
 * Removes the specified work item from whatever list it is on.
 * Can safely be called if work is not on any list.
 */
static void
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
{
	binder_inner_proc_lock(proc);
	binder_dequeue_work_ilocked(work);
	binder_inner_proc_unlock(proc);
}

static struct binder_work *binder_dequeue_work_head_ilocked(
					struct list_head *list)
{
	struct binder_work *w;

	w = list_first_entry_or_null(list, struct binder_work, entry);
	if (w)
		list_del_init(&w->entry);
	return w;
}

482 483
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
484 485
static void binder_free_thread(struct binder_thread *thread);
static void binder_free_proc(struct binder_proc *proc);
486
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
487

488 489 490
static bool binder_has_work_ilocked(struct binder_thread *thread,
				    bool do_proc_work)
{
491
	return thread->process_todo ||
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
		thread->looper_need_return ||
		(do_proc_work &&
		 !binder_worklist_empty_ilocked(&thread->proc->todo));
}

static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
{
	bool has_work;

	binder_inner_proc_lock(thread->proc);
	has_work = binder_has_work_ilocked(thread, do_proc_work);
	binder_inner_proc_unlock(thread->proc);

	return has_work;
}

static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
	return !thread->transaction_stack &&
		binder_worklist_empty_ilocked(&thread->todo) &&
		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
				   BINDER_LOOPER_STATE_REGISTERED));
}

static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
					       bool sync)
{
	struct rb_node *n;
	struct binder_thread *thread;

	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
		thread = rb_entry(n, struct binder_thread, rb_node);
		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
		    binder_available_for_proc_work_ilocked(thread)) {
			if (sync)
				wake_up_interruptible_sync(&thread->wait);
			else
				wake_up_interruptible(&thread->wait);
		}
	}
}

534 535 536 537 538 539 540 541 542 543 544 545 546 547
/**
 * binder_select_thread_ilocked() - selects a thread for doing proc work.
 * @proc:	process to select a thread from
 *
 * Note that calling this function moves the thread off the waiting_threads
 * list, so it can only be woken up by the caller of this function, or a
 * signal. Therefore, callers *should* always wake up the thread this function
 * returns.
 *
 * Return:	If there's a thread currently waiting for process work,
 *		returns that thread. Otherwise returns NULL.
 */
static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc *proc)
548 549 550
{
	struct binder_thread *thread;

551
	assert_spin_locked(&proc->inner_lock);
552 553 554 555
	thread = list_first_entry_or_null(&proc->waiting_threads,
					  struct binder_thread,
					  waiting_thread_node);

556
	if (thread)
557
		list_del_init(&thread->waiting_thread_node);
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581

	return thread;
}

/**
 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
 * @proc:	process to wake up a thread in
 * @thread:	specific thread to wake-up (may be NULL)
 * @sync:	whether to do a synchronous wake-up
 *
 * This function wakes up a thread in the @proc process.
 * The caller may provide a specific thread to wake-up in
 * the @thread parameter. If @thread is NULL, this function
 * will wake up threads that have called poll().
 *
 * Note that for this function to work as expected, callers
 * should first call binder_select_thread() to find a thread
 * to handle the work (if they don't have a thread already),
 * and pass the result into the @thread parameter.
 */
static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
					 struct binder_thread *thread,
					 bool sync)
{
582
	assert_spin_locked(&proc->inner_lock);
583 584

	if (thread) {
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
		if (sync)
			wake_up_interruptible_sync(&thread->wait);
		else
			wake_up_interruptible(&thread->wait);
		return;
	}

	/* Didn't find a thread waiting for proc work; this can happen
	 * in two scenarios:
	 * 1. All threads are busy handling transactions
	 *    In that case, one of those threads should call back into
	 *    the kernel driver soon and pick up this work.
	 * 2. Threads are using the (e)poll interface, in which case
	 *    they may be blocked on the waitqueue without having been
	 *    added to waiting_threads. For this case, we just iterate
	 *    over all threads not handling transaction work, and
	 *    wake them all up. We wake all because we don't know whether
	 *    a thread that called into (e)poll is handling non-binder
	 *    work currently.
	 */
	binder_wakeup_poll_threads_ilocked(proc, sync);
}

608 609 610 611 612 613 614
static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
{
	struct binder_thread *thread = binder_select_thread_ilocked(proc);

	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
}

615 616 617
static void binder_set_nice(long nice)
{
	long min_nice;
618

619 620 621 622
	if (can_nice(current, nice)) {
		set_user_nice(current, nice);
		return;
	}
623
	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
624
	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
625 626
		     "%d: nice value %ld not allowed use %ld instead\n",
		      current->pid, nice, min_nice);
627
	set_user_nice(current, min_nice);
628
	if (min_nice <= MAX_NICE)
629
		return;
630
	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
631 632
}

633 634
static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
						   binder_uintptr_t ptr)
635 636 637 638
{
	struct rb_node *n = proc->nodes.rb_node;
	struct binder_node *node;

639
	assert_spin_locked(&proc->inner_lock);
640

641 642 643 644 645 646 647
	while (n) {
		node = rb_entry(n, struct binder_node, rb_node);

		if (ptr < node->ptr)
			n = n->rb_left;
		else if (ptr > node->ptr)
			n = n->rb_right;
648 649 650 651 652 653
		else {
			/*
			 * take an implicit weak reference
			 * to ensure node stays alive until
			 * call to binder_put_node()
			 */
654
			binder_inc_node_tmpref_ilocked(node);
655
			return node;
656
		}
657 658 659 660
	}
	return NULL;
}

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
static struct binder_node *binder_get_node(struct binder_proc *proc,
					   binder_uintptr_t ptr)
{
	struct binder_node *node;

	binder_inner_proc_lock(proc);
	node = binder_get_node_ilocked(proc, ptr);
	binder_inner_proc_unlock(proc);
	return node;
}

static struct binder_node *binder_init_node_ilocked(
						struct binder_proc *proc,
						struct binder_node *new_node,
						struct flat_binder_object *fp)
676 677 678 679
{
	struct rb_node **p = &proc->nodes.rb_node;
	struct rb_node *parent = NULL;
	struct binder_node *node;
680 681 682
	binder_uintptr_t ptr = fp ? fp->binder : 0;
	binder_uintptr_t cookie = fp ? fp->cookie : 0;
	__u32 flags = fp ? fp->flags : 0;
683

684 685
	assert_spin_locked(&proc->inner_lock);

686
	while (*p) {
687

688 689 690 691 692 693 694
		parent = *p;
		node = rb_entry(parent, struct binder_node, rb_node);

		if (ptr < node->ptr)
			p = &(*p)->rb_left;
		else if (ptr > node->ptr)
			p = &(*p)->rb_right;
695 696 697 698 699 700 701 702 703
		else {
			/*
			 * A matching node is already in
			 * the rb tree. Abandon the init
			 * and return it.
			 */
			binder_inc_node_tmpref_ilocked(node);
			return node;
		}
704
	}
705
	node = new_node;
706
	binder_stats_created(BINDER_STAT_NODE);
707
	node->tmp_refs++;
708 709
	rb_link_node(&node->rb_node, parent, p);
	rb_insert_color(&node->rb_node, &proc->nodes);
710
	node->debug_id = atomic_inc_return(&binder_last_id);
711 712 713 714
	node->proc = proc;
	node->ptr = ptr;
	node->cookie = cookie;
	node->work.type = BINDER_WORK_NODE;
715 716
	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
717
	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
718
	spin_lock_init(&node->lock);
719 720 721
	INIT_LIST_HEAD(&node->work.entry);
	INIT_LIST_HEAD(&node->async_todo);
	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
722
		     "%d:%d node %d u%016llx c%016llx created\n",
723
		     proc->pid, current->pid, node->debug_id,
724
		     (u64)node->ptr, (u64)node->cookie);
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745

	return node;
}

static struct binder_node *binder_new_node(struct binder_proc *proc,
					   struct flat_binder_object *fp)
{
	struct binder_node *node;
	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);

	if (!new_node)
		return NULL;
	binder_inner_proc_lock(proc);
	node = binder_init_node_ilocked(proc, new_node, fp);
	binder_inner_proc_unlock(proc);
	if (node != new_node)
		/*
		 * The node was already added by another thread
		 */
		kfree(new_node);

746 747 748
	return node;
}

749
static void binder_free_node(struct binder_node *node)
750
{
751 752 753 754
	kfree(node);
	binder_stats_deleted(BINDER_STAT_NODE);
}

755 756 757
static int binder_inc_node_nilocked(struct binder_node *node, int strong,
				    int internal,
				    struct list_head *target_list)
758
{
759 760
	struct binder_proc *proc = node->proc;

761
	assert_spin_locked(&node->lock);
762
	if (proc)
763
		assert_spin_locked(&proc->inner_lock);
764 765 766 767
	if (strong) {
		if (internal) {
			if (target_list == NULL &&
			    node->internal_strong_refs == 0 &&
768 769 770
			    !(node->proc &&
			      node == node->proc->context->binder_context_mgr_node &&
			      node->has_strong_ref)) {
771 772
				pr_err("invalid inc strong node for %d\n",
					node->debug_id);
773 774 775 776 777 778
				return -EINVAL;
			}
			node->internal_strong_refs++;
		} else
			node->local_strong_refs++;
		if (!node->has_strong_ref && target_list) {
779 780
			struct binder_thread *thread = container_of(target_list,
						    struct binder_thread, todo);
781
			binder_dequeue_work_ilocked(&node->work);
782 783 784
			BUG_ON(&thread->todo != target_list);
			binder_enqueue_deferred_thread_work_ilocked(thread,
								   &node->work);
785 786 787 788 789 790
		}
	} else {
		if (!internal)
			node->local_weak_refs++;
		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
			if (target_list == NULL) {
791 792
				pr_err("invalid inc weak node for %d\n",
					node->debug_id);
793 794
				return -EINVAL;
			}
795 796 797
			/*
			 * See comment above
			 */
798
			binder_enqueue_work_ilocked(&node->work, target_list);
799 800 801 802 803
		}
	}
	return 0;
}

804 805 806 807 808
static int binder_inc_node(struct binder_node *node, int strong, int internal,
			   struct list_head *target_list)
{
	int ret;

809 810 811
	binder_node_inner_lock(node);
	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
	binder_node_inner_unlock(node);
812 813 814 815

	return ret;
}

816 817
static bool binder_dec_node_nilocked(struct binder_node *node,
				     int strong, int internal)
818
{
819 820
	struct binder_proc *proc = node->proc;

821
	assert_spin_locked(&node->lock);
822
	if (proc)
823
		assert_spin_locked(&proc->inner_lock);
824 825 826 827 828 829
	if (strong) {
		if (internal)
			node->internal_strong_refs--;
		else
			node->local_strong_refs--;
		if (node->local_strong_refs || node->internal_strong_refs)
830
			return false;
831 832 833
	} else {
		if (!internal)
			node->local_weak_refs--;
834 835
		if (node->local_weak_refs || node->tmp_refs ||
				!hlist_empty(&node->refs))
836
			return false;
837
	}
838 839

	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
840
		if (list_empty(&node->work.entry)) {
841
			binder_enqueue_work_ilocked(&node->work, &proc->todo);
842
			binder_wakeup_proc_ilocked(proc);
843 844 845
		}
	} else {
		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
846
		    !node->local_weak_refs && !node->tmp_refs) {
847
			if (proc) {
848 849
				binder_dequeue_work_ilocked(&node->work);
				rb_erase(&node->rb_node, &proc->nodes);
850
				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
851
					     "refless node %d deleted\n",
852 853
					     node->debug_id);
			} else {
854
				BUG_ON(!list_empty(&node->work.entry));
855
				spin_lock(&binder_dead_nodes_lock);
856 857 858 859 860 861 862 863
				/*
				 * tmp_refs could have changed so
				 * check it again
				 */
				if (node->tmp_refs) {
					spin_unlock(&binder_dead_nodes_lock);
					return false;
				}
864
				hlist_del(&node->dead_node);
865
				spin_unlock(&binder_dead_nodes_lock);
866
				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
867
					     "dead node %d deleted\n",
868 869
					     node->debug_id);
			}
870
			return true;
871 872
		}
	}
873 874
	return false;
}
875

876 877 878 879
static void binder_dec_node(struct binder_node *node, int strong, int internal)
{
	bool free_node;

880 881 882
	binder_node_inner_lock(node);
	free_node = binder_dec_node_nilocked(node, strong, internal);
	binder_node_inner_unlock(node);
883 884 885 886 887 888 889 890 891 892 893 894
	if (free_node)
		binder_free_node(node);
}

static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
{
	/*
	 * No call to binder_inc_node() is needed since we
	 * don't need to inform userspace of any changes to
	 * tmp_refs
	 */
	node->tmp_refs++;
895 896
}

897 898 899 900 901
/**
 * binder_inc_node_tmpref() - take a temporary reference on node
 * @node:	node to reference
 *
 * Take reference on node to prevent the node from being freed
902 903 904 905 906 907 908
 * while referenced only by a local variable. The inner lock is
 * needed to serialize with the node work on the queue (which
 * isn't needed after the node is dead). If the node is dead
 * (node->proc is NULL), use binder_dead_nodes_lock to protect
 * node->tmp_refs against dead-node-only cases where the node
 * lock cannot be acquired (eg traversing the dead node list to
 * print nodes)
909 910 911
 */
static void binder_inc_node_tmpref(struct binder_node *node)
{
912
	binder_node_lock(node);
913 914 915 916 917 918 919 920 921
	if (node->proc)
		binder_inner_proc_lock(node->proc);
	else
		spin_lock(&binder_dead_nodes_lock);
	binder_inc_node_tmpref_ilocked(node);
	if (node->proc)
		binder_inner_proc_unlock(node->proc);
	else
		spin_unlock(&binder_dead_nodes_lock);
922
	binder_node_unlock(node);
923 924 925 926 927 928 929 930 931 932
}

/**
 * binder_dec_node_tmpref() - remove a temporary reference on node
 * @node:	node to reference
 *
 * Release temporary reference on node taken via binder_inc_node_tmpref()
 */
static void binder_dec_node_tmpref(struct binder_node *node)
{
933 934
	bool free_node;

935 936
	binder_node_inner_lock(node);
	if (!node->proc)
937
		spin_lock(&binder_dead_nodes_lock);
938 939
	else
		__acquire(&binder_dead_nodes_lock);
940 941
	node->tmp_refs--;
	BUG_ON(node->tmp_refs < 0);
942 943
	if (!node->proc)
		spin_unlock(&binder_dead_nodes_lock);
944 945
	else
		__release(&binder_dead_nodes_lock);
946 947 948 949 950 951
	/*
	 * Call binder_dec_node() to check if all refcounts are 0
	 * and cleanup is needed. Calling with strong=0 and internal=1
	 * causes no actual reference to be released in binder_dec_node().
	 * If that changes, a change is needed here too.
	 */
952 953
	free_node = binder_dec_node_nilocked(node, 0, 1);
	binder_node_inner_unlock(node);
954 955
	if (free_node)
		binder_free_node(node);
956 957 958 959 960 961
}

static void binder_put_node(struct binder_node *node)
{
	binder_dec_node_tmpref(node);
}
962

963 964
static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
						 u32 desc, bool need_strong_ref)
965 966 967 968 969 970 971
{
	struct rb_node *n = proc->refs_by_desc.rb_node;
	struct binder_ref *ref;

	while (n) {
		ref = rb_entry(n, struct binder_ref, rb_node_desc);

972
		if (desc < ref->data.desc) {
973
			n = n->rb_left;
974
		} else if (desc > ref->data.desc) {
975
			n = n->rb_right;
976
		} else if (need_strong_ref && !ref->data.strong) {
977 978 979
			binder_user_error("tried to use weak ref as strong ref\n");
			return NULL;
		} else {
980
			return ref;
981
		}
982 983 984 985
	}
	return NULL;
}

986
/**
987
 * binder_get_ref_for_node_olocked() - get the ref associated with given node
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
 * @proc:	binder_proc that owns the ref
 * @node:	binder_node of target
 * @new_ref:	newly allocated binder_ref to be initialized or %NULL
 *
 * Look up the ref for the given node and return it if it exists
 *
 * If it doesn't exist and the caller provides a newly allocated
 * ref, initialize the fields of the newly allocated ref and insert
 * into the given proc rb_trees and node refs list.
 *
 * Return:	the ref for node. It is possible that another thread
 *		allocated/initialized the ref first in which case the
 *		returned ref would be different than the passed-in
 *		new_ref. new_ref must be kfree'd by the caller in
 *		this case.
 */
1004 1005 1006 1007
static struct binder_ref *binder_get_ref_for_node_olocked(
					struct binder_proc *proc,
					struct binder_node *node,
					struct binder_ref *new_ref)
1008
{
1009
	struct binder_context *context = proc->context;
1010 1011
	struct rb_node **p = &proc->refs_by_node.rb_node;
	struct rb_node *parent = NULL;
1012 1013
	struct binder_ref *ref;
	struct rb_node *n;
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_node);

		if (node < ref->node)
			p = &(*p)->rb_left;
		else if (node > ref->node)
			p = &(*p)->rb_right;
		else
			return ref;
	}
1026
	if (!new_ref)
1027
		return NULL;
1028

1029
	binder_stats_created(BINDER_STAT_REF);
1030
	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031 1032 1033 1034 1035
	new_ref->proc = proc;
	new_ref->node = node;
	rb_link_node(&new_ref->rb_node_node, parent, p);
	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);

1036
	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037 1038
	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039
		if (ref->data.desc > new_ref->data.desc)
1040
			break;
1041
		new_ref->data.desc = ref->data.desc + 1;
1042 1043 1044 1045 1046 1047 1048
	}

	p = &proc->refs_by_desc.rb_node;
	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_desc);

1049
		if (new_ref->data.desc < ref->data.desc)
1050
			p = &(*p)->rb_left;
1051
		else if (new_ref->data.desc > ref->data.desc)
1052 1053 1054 1055 1056 1057
			p = &(*p)->rb_right;
		else
			BUG();
	}
	rb_link_node(&new_ref->rb_node_desc, parent, p);
	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1058 1059

	binder_node_lock(node);
1060
	hlist_add_head(&new_ref->node_entry, &node->refs);
1061

1062 1063
	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
		     "%d new ref %d desc %d for node %d\n",
1064
		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1065
		      node->debug_id);
1066
	binder_node_unlock(node);
1067 1068 1069
	return new_ref;
}

1070
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1071
{
1072 1073
	bool delete_node = false;

1074
	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075
		     "%d delete ref %d desc %d for node %d\n",
1076
		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077
		      ref->node->debug_id);
1078 1079 1080

	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1081

1082
	binder_node_inner_lock(ref->node);
1083
	if (ref->data.strong)
1084
		binder_dec_node_nilocked(ref->node, 1, 1);
1085

1086
	hlist_del(&ref->node_entry);
1087 1088
	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
	binder_node_inner_unlock(ref->node);
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
	/*
	 * Clear ref->node unless we want the caller to free the node
	 */
	if (!delete_node) {
		/*
		 * The caller uses ref->node to determine
		 * whether the node needs to be freed. Clear
		 * it since the node is still alive.
		 */
		ref->node = NULL;
	}
1100

1101 1102
	if (ref->death) {
		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103
			     "%d delete ref %d desc %d has death notification\n",
1104 1105
			      ref->proc->pid, ref->data.debug_id,
			      ref->data.desc);
1106
		binder_dequeue_work(ref->proc, &ref->death->work);
1107 1108 1109 1110 1111
		binder_stats_deleted(BINDER_STAT_DEATH);
	}
	binder_stats_deleted(BINDER_STAT_REF);
}

1112
/**
1113
 * binder_inc_ref_olocked() - increment the ref for given handle
1114 1115 1116 1117
 * @ref:         ref to be incremented
 * @strong:      if true, strong increment, else weak
 * @target_list: list to queue node work on
 *
1118
 * Increment the ref. @ref->proc->outer_lock must be held on entry
1119 1120 1121
 *
 * Return: 0, if successful, else errno
 */
1122 1123
static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
				  struct list_head *target_list)
1124 1125
{
	int ret;
1126

1127
	if (strong) {
1128
		if (ref->data.strong == 0) {
1129 1130 1131 1132
			ret = binder_inc_node(ref->node, 1, 1, target_list);
			if (ret)
				return ret;
		}
1133
		ref->data.strong++;
1134
	} else {
1135
		if (ref->data.weak == 0) {
1136 1137 1138 1139
			ret = binder_inc_node(ref->node, 0, 1, target_list);
			if (ret)
				return ret;
		}
1140
		ref->data.weak++;
1141 1142 1143 1144
	}
	return 0;
}

1145 1146 1147 1148 1149 1150 1151 1152 1153
/**
 * binder_dec_ref() - dec the ref for given handle
 * @ref:	ref to be decremented
 * @strong:	if true, strong decrement, else weak
 *
 * Decrement the ref.
 *
 * Return: true if ref is cleaned up and ready to be freed
 */
1154
static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1155 1156
{
	if (strong) {
1157
		if (ref->data.strong == 0) {
1158
			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159 1160 1161 1162
					  ref->proc->pid, ref->data.debug_id,
					  ref->data.desc, ref->data.strong,
					  ref->data.weak);
			return false;
1163
		}
1164
		ref->data.strong--;
1165 1166
		if (ref->data.strong == 0)
			binder_dec_node(ref->node, strong, 1);
1167
	} else {
1168
		if (ref->data.weak == 0) {
1169
			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170 1171 1172 1173
					  ref->proc->pid, ref->data.debug_id,
					  ref->data.desc, ref->data.strong,
					  ref->data.weak);
			return false;
1174
		}
1175
		ref->data.weak--;
1176
	}
1177
	if (ref->data.strong == 0 && ref->data.weak == 0) {
1178
		binder_cleanup_ref_olocked(ref);
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
		return true;
	}
	return false;
}

/**
 * binder_get_node_from_ref() - get the node from the given proc/desc
 * @proc:	proc containing the ref
 * @desc:	the handle associated with the ref
 * @need_strong_ref: if true, only return node if ref is strong
 * @rdata:	the id/refcount data for the ref
 *
 * Given a proc and ref handle, return the associated binder_node
 *
 * Return: a binder_node or NULL if not found or not strong when strong required
 */
static struct binder_node *binder_get_node_from_ref(
		struct binder_proc *proc,
		u32 desc, bool need_strong_ref,
		struct binder_ref_data *rdata)
{
	struct binder_node *node;
	struct binder_ref *ref;

1203 1204
	binder_proc_lock(proc);
	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1205 1206 1207
	if (!ref)
		goto err_no_ref;
	node = ref->node;
1208 1209 1210 1211 1212
	/*
	 * Take an implicit reference on the node to ensure
	 * it stays alive until the call to binder_put_node()
	 */
	binder_inc_node_tmpref(node);
1213 1214
	if (rdata)
		*rdata = ref->data;
1215
	binder_proc_unlock(proc);
1216 1217 1218 1219

	return node;

err_no_ref:
1220
	binder_proc_unlock(proc);
1221 1222 1223 1224 1225 1226 1227
	return NULL;
}

/**
 * binder_free_ref() - free the binder_ref
 * @ref:	ref to free
 *
1228 1229
 * Free the binder_ref. Free the binder_node indicated by ref->node
 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1230 1231 1232
 */
static void binder_free_ref(struct binder_ref *ref)
{
1233 1234
	if (ref->node)
		binder_free_node(ref->node);
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
	kfree(ref->death);
	kfree(ref);
}

/**
 * binder_update_ref_for_handle() - inc/dec the ref for given handle
 * @proc:	proc containing the ref
 * @desc:	the handle associated with the ref
 * @increment:	true=inc reference, false=dec reference
 * @strong:	true=strong reference, false=weak reference
 * @rdata:	the id/refcount data for the ref
 *
 * Given a proc and ref handle, increment or decrement the ref
 * according to "increment" arg.
 *
 * Return: 0 if successful, else errno
 */
static int binder_update_ref_for_handle(struct binder_proc *proc,
		uint32_t desc, bool increment, bool strong,
		struct binder_ref_data *rdata)
{
	int ret = 0;
	struct binder_ref *ref;
	bool delete_ref = false;

1260 1261
	binder_proc_lock(proc);
	ref = binder_get_ref_olocked(proc, desc, strong);
1262 1263 1264 1265 1266
	if (!ref) {
		ret = -EINVAL;
		goto err_no_ref;
	}
	if (increment)
1267
		ret = binder_inc_ref_olocked(ref, strong, NULL);
1268
	else
1269
		delete_ref = binder_dec_ref_olocked(ref, strong);
1270 1271 1272

	if (rdata)
		*rdata = ref->data;
1273
	binder_proc_unlock(proc);
1274 1275 1276 1277 1278 1279

	if (delete_ref)
		binder_free_ref(ref);
	return ret;

err_no_ref:
1280
	binder_proc_unlock(proc);
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
	return ret;
}

/**
 * binder_dec_ref_for_handle() - dec the ref for given handle
 * @proc:	proc containing the ref
 * @desc:	the handle associated with the ref
 * @strong:	true=strong reference, false=weak reference
 * @rdata:	the id/refcount data for the ref
 *
 * Just calls binder_update_ref_for_handle() to decrement the ref.
 *
 * Return: 0 if successful, else errno
 */
static int binder_dec_ref_for_handle(struct binder_proc *proc,
		uint32_t desc, bool strong, struct binder_ref_data *rdata)
{
	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
}


/**
 * binder_inc_ref_for_node() - increment the ref for given proc/node
 * @proc:	 proc containing the ref
 * @node:	 target node
 * @strong:	 true=strong reference, false=weak reference
 * @target_list: worklist to use if node is incremented
 * @rdata:	 the id/refcount data for the ref
 *
 * Given a proc and node, increment the ref. Create the ref if it
 * doesn't already exist
 *
 * Return: 0 if successful, else errno
 */
static int binder_inc_ref_for_node(struct binder_proc *proc,
			struct binder_node *node,
			bool strong,
			struct list_head *target_list,
			struct binder_ref_data *rdata)
{
	struct binder_ref *ref;
	struct binder_ref *new_ref = NULL;
	int ret = 0;

1325 1326
	binder_proc_lock(proc);
	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1327
	if (!ref) {
1328
		binder_proc_unlock(proc);
1329 1330 1331
		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
		if (!new_ref)
			return -ENOMEM;
1332 1333
		binder_proc_lock(proc);
		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1334
	}
1335
	ret = binder_inc_ref_olocked(ref, strong, target_list);
1336
	*rdata = ref->data;
1337
	binder_proc_unlock(proc);
1338 1339 1340 1341 1342 1343 1344
	if (new_ref && ref != new_ref)
		/*
		 * Another thread created the ref first so
		 * free the one we allocated
		 */
		kfree(new_ref);
	return ret;
1345 1346
}

1347 1348
static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
					   struct binder_transaction *t)
1349
{
1350
	BUG_ON(!target_thread);
1351
	assert_spin_locked(&target_thread->proc->inner_lock);
1352 1353 1354 1355 1356 1357 1358
	BUG_ON(target_thread->transaction_stack != t);
	BUG_ON(target_thread->transaction_stack->from != target_thread);
	target_thread->transaction_stack =
		target_thread->transaction_stack->from_parent;
	t->from = NULL;
}

1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
/**
 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
 * @thread:	thread to decrement
 *
 * A thread needs to be kept alive while being used to create or
 * handle a transaction. binder_get_txn_from() is used to safely
 * extract t->from from a binder_transaction and keep the thread
 * indicated by t->from from being freed. When done with that
 * binder_thread, this function is called to decrement the
 * tmp_ref and free if appropriate (thread has been released
 * and no transaction being processed by the driver)
 */
static void binder_thread_dec_tmpref(struct binder_thread *thread)
{
	/*
	 * atomic is used to protect the counter value while
	 * it cannot reach zero or thread->is_dead is false
	 */
1377
	binder_inner_proc_lock(thread->proc);
1378 1379
	atomic_dec(&thread->tmp_ref);
	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380
		binder_inner_proc_unlock(thread->proc);
1381 1382 1383
		binder_free_thread(thread);
		return;
	}
1384
	binder_inner_proc_unlock(thread->proc);
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
}

/**
 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
 * @proc:	proc to decrement
 *
 * A binder_proc needs to be kept alive while being used to create or
 * handle a transaction. proc->tmp_ref is incremented when
 * creating a new transaction or the binder_proc is currently in-use
 * by threads that are being released. When done with the binder_proc,
 * this function is called to decrement the counter and free the
 * proc if appropriate (proc has been released, all threads have
 * been released and not currenly in-use to process a transaction).
 */
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
1401
	binder_inner_proc_lock(proc);
1402 1403 1404
	proc->tmp_ref--;
	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
			!proc->tmp_ref) {
1405
		binder_inner_proc_unlock(proc);
1406 1407 1408
		binder_free_proc(proc);
		return;
	}
1409
	binder_inner_proc_unlock(proc);
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
}

/**
 * binder_get_txn_from() - safely extract the "from" thread in transaction
 * @t:	binder transaction for t->from
 *
 * Atomically return the "from" thread and increment the tmp_ref
 * count for the thread to ensure it stays alive until
 * binder_thread_dec_tmpref() is called.
 *
 * Return: the value of t->from
 */
static struct binder_thread *binder_get_txn_from(
		struct binder_transaction *t)
{
	struct binder_thread *from;

	spin_lock(&t->lock);
	from = t->from;
	if (from)
		atomic_inc(&from->tmp_ref);
	spin_unlock(&t->lock);
	return from;
}

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
/**
 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
 * @t:	binder transaction for t->from
 *
 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
 * to guarantee that the thread cannot be released while operating on it.
 * The caller must call binder_inner_proc_unlock() to release the inner lock
 * as well as call binder_dec_thread_txn() to release the reference.
 *
 * Return: the value of t->from
 */
static struct binder_thread *binder_get_txn_from_and_acq_inner(
		struct binder_transaction *t)
1448
	__acquires(&t->from->proc->inner_lock)
1449 1450 1451 1452
{
	struct binder_thread *from;

	from = binder_get_txn_from(t);
1453 1454
	if (!from) {
		__acquire(&from->proc->inner_lock);
1455
		return NULL;
1456
	}
1457 1458 1459 1460 1461 1462
	binder_inner_proc_lock(from->proc);
	if (t->from) {
		BUG_ON(from != t->from);
		return from;
	}
	binder_inner_proc_unlock(from->proc);
1463
	__acquire(&from->proc->inner_lock);
1464 1465 1466 1467
	binder_thread_dec_tmpref(from);
	return NULL;
}

1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
/**
 * binder_free_txn_fixups() - free unprocessed fd fixups
 * @t:	binder transaction for t->from
 *
 * If the transaction is being torn down prior to being
 * processed by the target process, free all of the
 * fd fixups and fput the file structs. It is safe to
 * call this function after the fixups have been
 * processed -- in that case, the list will be empty.
 */
static void binder_free_txn_fixups(struct binder_transaction *t)
{
	struct binder_txn_fd_fixup *fixup, *tmp;

	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
		fput(fixup->file);
		list_del(&fixup->fixup_entry);
		kfree(fixup);
	}
}

1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
static void binder_txn_latency_free(struct binder_transaction *t)
{
	int from_proc, from_thread, to_proc, to_thread;

	spin_lock(&t->lock);
	from_proc = t->from ? t->from->proc->pid : 0;
	from_thread = t->from ? t->from->pid : 0;
	to_proc = t->to_proc ? t->to_proc->pid : 0;
	to_thread = t->to_thread ? t->to_thread->pid : 0;
	spin_unlock(&t->lock);

	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
}

1503 1504
static void binder_free_transaction(struct binder_transaction *t)
{
1505 1506 1507 1508
	struct binder_proc *target_proc = t->to_proc;

	if (target_proc) {
		binder_inner_proc_lock(target_proc);
M
Marco Ballesio 已提交
1509 1510 1511 1512 1513 1514
		target_proc->outstanding_txns--;
		if (target_proc->outstanding_txns < 0)
			pr_warn("%s: Unexpected outstanding_txns %d\n",
				__func__, target_proc->outstanding_txns);
		if (!target_proc->outstanding_txns && target_proc->is_frozen)
			wake_up_interruptible_all(&target_proc->freeze_wait);
1515 1516 1517 1518
		if (t->buffer)
			t->buffer->transaction = NULL;
		binder_inner_proc_unlock(target_proc);
	}
1519 1520
	if (trace_binder_txn_latency_free_enabled())
		binder_txn_latency_free(t);
1521 1522 1523 1524
	/*
	 * If the transaction has no target_proc, then
	 * t->buffer->transaction has already been cleared.
	 */
1525
	binder_free_txn_fixups(t);
1526 1527 1528 1529 1530 1531 1532 1533
	kfree(t);
	binder_stats_deleted(BINDER_STAT_TRANSACTION);
}

static void binder_send_failed_reply(struct binder_transaction *t,
				     uint32_t error_code)
{
	struct binder_thread *target_thread;
1534
	struct binder_transaction *next;
1535

1536 1537
	BUG_ON(t->flags & TF_ONE_WAY);
	while (1) {
1538
		target_thread = binder_get_txn_from_and_acq_inner(t);
1539
		if (target_thread) {
1540 1541 1542 1543 1544 1545
			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
				     "send failed reply for transaction %d to %d:%d\n",
				      t->debug_id,
				      target_thread->proc->pid,
				      target_thread->pid);

1546
			binder_pop_transaction_ilocked(target_thread, t);
1547 1548
			if (target_thread->reply_error.cmd == BR_OK) {
				target_thread->reply_error.cmd = error_code;
1549 1550 1551
				binder_enqueue_thread_work_ilocked(
					target_thread,
					&target_thread->reply_error.work);
1552 1553
				wake_up_interruptible(&target_thread->wait);
			} else {
1554 1555 1556 1557 1558 1559 1560 1561
				/*
				 * Cannot get here for normal operation, but
				 * we can if multiple synchronous transactions
				 * are sent without blocking for responses.
				 * Just ignore the 2nd error in this case.
				 */
				pr_warn("Unexpected reply error: %u\n",
					target_thread->reply_error.cmd);
1562
			}
1563
			binder_inner_proc_unlock(target_thread->proc);
1564
			binder_thread_dec_tmpref(target_thread);
1565
			binder_free_transaction(t);
1566
			return;
1567
		}
1568
		__release(&target_thread->proc->inner_lock);
1569 1570 1571 1572 1573 1574
		next = t->from_parent;

		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
			     "send failed reply for transaction %d, target dead\n",
			     t->debug_id);

1575
		binder_free_transaction(t);
1576
		if (next == NULL) {
1577
			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1578 1579
				     "reply failed, no target thread at root\n");
			return;
1580
		}
1581 1582 1583 1584
		t = next;
		binder_debug(BINDER_DEBUG_DEAD_BINDER,
			     "reply failed, no target thread -- retry %d\n",
			      t->debug_id);
1585 1586 1587
	}
}

1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
/**
 * binder_cleanup_transaction() - cleans up undelivered transaction
 * @t:		transaction that needs to be cleaned up
 * @reason:	reason the transaction wasn't delivered
 * @error_code:	error to return to caller (if synchronous call)
 */
static void binder_cleanup_transaction(struct binder_transaction *t,
				       const char *reason,
				       uint32_t error_code)
{
	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
		binder_send_failed_reply(t, error_code);
	} else {
		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
			"undelivered transaction %d, %s\n",
			t->debug_id, reason);
		binder_free_transaction(t);
	}
}

1608
/**
1609 1610
 * binder_get_object() - gets object and checks for valid metadata
 * @proc:	binder_proc owning the buffer
1611
 * @u:		sender's user pointer to base of buffer
1612
 * @buffer:	binder_buffer that we're parsing.
1613 1614
 * @offset:	offset in the @buffer at which to validate an object.
 * @object:	struct binder_object to read into
1615
 *
1616 1617 1618 1619 1620
 * Copy the binder object at the given offset into @object. If @u is
 * provided then the copy is from the sender's buffer. If not, then
 * it is copied from the target's @buffer.
 *
 * Return:	If there's a valid metadata object at @offset, the
1621 1622
 *		size of that object. Otherwise, it returns zero. The object
 *		is read into the struct binder_object pointed to by @object.
1623
 */
1624
static size_t binder_get_object(struct binder_proc *proc,
1625
				const void __user *u,
1626 1627 1628
				struct binder_buffer *buffer,
				unsigned long offset,
				struct binder_object *object)
1629
{
1630
	size_t read_size;
1631 1632 1633
	struct binder_object_header *hdr;
	size_t object_size = 0;

1634
	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1635
	if (offset > buffer->data_size || read_size < sizeof(*hdr))
1636
		return 0;
1637 1638 1639 1640 1641 1642 1643 1644
	if (u) {
		if (copy_from_user(object, u + offset, read_size))
			return 0;
	} else {
		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
						  offset, read_size))
			return 0;
	}
1645

1646 1647
	/* Ok, now see if we read a complete object. */
	hdr = &object->hdr;
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
	switch (hdr->type) {
	case BINDER_TYPE_BINDER:
	case BINDER_TYPE_WEAK_BINDER:
	case BINDER_TYPE_HANDLE:
	case BINDER_TYPE_WEAK_HANDLE:
		object_size = sizeof(struct flat_binder_object);
		break;
	case BINDER_TYPE_FD:
		object_size = sizeof(struct binder_fd_object);
		break;
1658 1659 1660
	case BINDER_TYPE_PTR:
		object_size = sizeof(struct binder_buffer_object);
		break;
1661 1662 1663
	case BINDER_TYPE_FDA:
		object_size = sizeof(struct binder_fd_array_object);
		break;
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
	default:
		return 0;
	}
	if (offset <= buffer->data_size - object_size &&
	    buffer->data_size >= object_size)
		return object_size;
	else
		return 0;
}

1674 1675
/**
 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1676
 * @proc:	binder_proc owning the buffer
1677
 * @b:		binder_buffer containing the object
1678
 * @object:	struct binder_object to read into
1679 1680
 * @index:	index in offset array at which the binder_buffer_object is
 *		located
1681 1682
 * @start_offset: points to the start of the offset array
 * @object_offsetp: offset of @object read from @b
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
 * @num_valid:	the number of valid offsets in the offset array
 *
 * Return:	If @index is within the valid range of the offset array
 *		described by @start and @num_valid, and if there's a valid
 *		binder_buffer_object at the offset found in index @index
 *		of the offset array, that object is returned. Otherwise,
 *		%NULL is returned.
 *		Note that the offset found in index @index itself is not
 *		verified; this function assumes that @num_valid elements
 *		from @start were previously verified to have valid offsets.
1693 1694
 *		If @object_offsetp is non-NULL, then the offset within
 *		@b is written to it.
1695
 */
1696 1697 1698 1699 1700 1701 1702 1703
static struct binder_buffer_object *binder_validate_ptr(
						struct binder_proc *proc,
						struct binder_buffer *b,
						struct binder_object *object,
						binder_size_t index,
						binder_size_t start_offset,
						binder_size_t *object_offsetp,
						binder_size_t num_valid)
1704
{
1705 1706 1707
	size_t object_size;
	binder_size_t object_offset;
	unsigned long buffer_offset;
1708 1709 1710 1711

	if (index >= num_valid)
		return NULL;

1712
	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1713 1714 1715 1716
	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
					  b, buffer_offset,
					  sizeof(object_offset)))
		return NULL;
1717
	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1718
	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1719
		return NULL;
1720 1721
	if (object_offsetp)
		*object_offsetp = object_offset;
1722

1723
	return &object->bbo;
1724 1725 1726 1727
}

/**
 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1728
 * @proc:		binder_proc owning the buffer
1729
 * @b:			transaction buffer
1730 1731 1732 1733 1734
 * @objects_start_offset: offset to start of objects buffer
 * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
 * @fixup_offset:	start offset in @buffer to fix up
 * @last_obj_offset:	offset to last binder_buffer_object that we fixed
 * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
 *
 * Return:		%true if a fixup in buffer @buffer at offset @offset is
 *			allowed.
 *
 * For safety reasons, we only allow fixups inside a buffer to happen
 * at increasing offsets; additionally, we only allow fixup on the last
 * buffer object that was verified, or one of its parents.
 *
 * Example of what is allowed:
 *
 * A
 *   B (parent = A, offset = 0)
 *   C (parent = A, offset = 16)
 *     D (parent = C, offset = 0)
 *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
 *
 * Examples of what is not allowed:
 *
 * Decreasing offsets within the same parent:
 * A
 *   C (parent = A, offset = 16)
 *   B (parent = A, offset = 0) // decreasing offset within A
 *
 * Referring to a parent that wasn't the last object or any of its parents:
 * A
 *   B (parent = A, offset = 0)
 *   C (parent = A, offset = 0)
 *   C (parent = A, offset = 16)
 *     D (parent = B, offset = 0) // B is not A or any of A's parents
 */
1765 1766 1767 1768
static bool binder_validate_fixup(struct binder_proc *proc,
				  struct binder_buffer *b,
				  binder_size_t objects_start_offset,
				  binder_size_t buffer_obj_offset,
1769
				  binder_size_t fixup_offset,
1770
				  binder_size_t last_obj_offset,
1771 1772
				  binder_size_t last_min_offset)
{
1773
	if (!last_obj_offset) {
1774 1775 1776 1777
		/* Nothing to fix up in */
		return false;
	}

1778 1779 1780 1781
	while (last_obj_offset != buffer_obj_offset) {
		unsigned long buffer_offset;
		struct binder_object last_object;
		struct binder_buffer_object *last_bbo;
1782 1783
		size_t object_size = binder_get_object(proc, NULL, b,
						       last_obj_offset,
1784 1785 1786 1787 1788
						       &last_object);
		if (object_size != sizeof(*last_bbo))
			return false;

		last_bbo = &last_object.bbo;
1789 1790 1791 1792
		/*
		 * Safe to retrieve the parent of last_obj, since it
		 * was already previously verified by the driver.
		 */
1793
		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1794
			return false;
1795 1796
		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
		buffer_offset = objects_start_offset +
1797 1798 1799 1800 1801 1802
			sizeof(binder_size_t) * last_bbo->parent;
		if (binder_alloc_copy_from_buffer(&proc->alloc,
						  &last_obj_offset,
						  b, buffer_offset,
						  sizeof(last_obj_offset)))
			return false;
1803 1804 1805 1806
	}
	return (fixup_offset >= last_min_offset);
}

1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857
/**
 * struct binder_task_work_cb - for deferred close
 *
 * @twork:                callback_head for task work
 * @fd:                   fd to close
 *
 * Structure to pass task work to be handled after
 * returning from binder_ioctl() via task_work_add().
 */
struct binder_task_work_cb {
	struct callback_head twork;
	struct file *file;
};

/**
 * binder_do_fd_close() - close list of file descriptors
 * @twork:	callback head for task work
 *
 * It is not safe to call ksys_close() during the binder_ioctl()
 * function if there is a chance that binder's own file descriptor
 * might be closed. This is to meet the requirements for using
 * fdget() (see comments for __fget_light()). Therefore use
 * task_work_add() to schedule the close operation once we have
 * returned from binder_ioctl(). This function is a callback
 * for that mechanism and does the actual ksys_close() on the
 * given file descriptor.
 */
static void binder_do_fd_close(struct callback_head *twork)
{
	struct binder_task_work_cb *twcb = container_of(twork,
			struct binder_task_work_cb, twork);

	fput(twcb->file);
	kfree(twcb);
}

/**
 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
 * @fd:		file-descriptor to close
 *
 * See comments in binder_do_fd_close(). This function is used to schedule
 * a file-descriptor to be closed after returning from binder_ioctl().
 */
static void binder_deferred_fd_close(int fd)
{
	struct binder_task_work_cb *twcb;

	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
	if (!twcb)
		return;
	init_task_work(&twcb->twork, binder_do_fd_close);
1858
	close_fd_get_file(fd, &twcb->file);
1859 1860
	if (twcb->file) {
		filp_close(twcb->file, current->files);
J
Jens Axboe 已提交
1861
		task_work_add(current, &twcb->twork, TWA_RESUME);
1862
	} else {
1863
		kfree(twcb);
1864
	}
1865 1866
}

1867
static void binder_transaction_buffer_release(struct binder_proc *proc,
T
Todd Kjos 已提交
1868
					      struct binder_thread *thread,
1869
					      struct binder_buffer *buffer,
1870 1871
					      binder_size_t failed_at,
					      bool is_failure)
1872 1873
{
	int debug_id = buffer->debug_id;
1874
	binder_size_t off_start_offset, buffer_offset, off_end_offset;
1875 1876

	binder_debug(BINDER_DEBUG_TRANSACTION,
1877
		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1878
		     proc->pid, buffer->debug_id,
1879 1880
		     buffer->data_size, buffer->offsets_size,
		     (unsigned long long)failed_at);
1881 1882 1883 1884

	if (buffer->target_node)
		binder_dec_node(buffer->target_node, 1, 0);

1885
	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1886
	off_end_offset = is_failure && failed_at ? failed_at :
1887 1888 1889
				off_start_offset + buffer->offsets_size;
	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
	     buffer_offset += sizeof(binder_size_t)) {
1890
		struct binder_object_header *hdr;
1891
		size_t object_size = 0;
1892
		struct binder_object object;
1893 1894
		binder_size_t object_offset;

1895 1896 1897
		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
						   buffer, buffer_offset,
						   sizeof(object_offset)))
1898
			object_size = binder_get_object(proc, NULL, buffer,
1899
							object_offset, &object);
1900 1901
		if (object_size == 0) {
			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1902
			       debug_id, (u64)object_offset, buffer->data_size);
1903 1904
			continue;
		}
1905
		hdr = &object.hdr;
1906
		switch (hdr->type) {
1907 1908
		case BINDER_TYPE_BINDER:
		case BINDER_TYPE_WEAK_BINDER: {
1909 1910
			struct flat_binder_object *fp;
			struct binder_node *node;
1911

1912 1913
			fp = to_flat_binder_object(hdr);
			node = binder_get_node(proc, fp->binder);
1914
			if (node == NULL) {
1915 1916
				pr_err("transaction release %d bad node %016llx\n",
				       debug_id, (u64)fp->binder);
1917 1918 1919
				break;
			}
			binder_debug(BINDER_DEBUG_TRANSACTION,
1920 1921
				     "        node %d u%016llx\n",
				     node->debug_id, (u64)node->ptr);
1922 1923
			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
					0);
1924
			binder_put_node(node);
1925 1926 1927
		} break;
		case BINDER_TYPE_HANDLE:
		case BINDER_TYPE_WEAK_HANDLE: {
1928
			struct flat_binder_object *fp;
1929 1930
			struct binder_ref_data rdata;
			int ret;
1931

1932
			fp = to_flat_binder_object(hdr);
1933 1934 1935 1936 1937 1938
			ret = binder_dec_ref_for_handle(proc, fp->handle,
				hdr->type == BINDER_TYPE_HANDLE, &rdata);

			if (ret) {
				pr_err("transaction release %d bad handle %d, ret = %d\n",
				 debug_id, fp->handle, ret);
1939 1940 1941
				break;
			}
			binder_debug(BINDER_DEBUG_TRANSACTION,
1942 1943
				     "        ref %d desc %d\n",
				     rdata.debug_id, rdata.desc);
1944 1945
		} break;

1946
		case BINDER_TYPE_FD: {
1947 1948
			/*
			 * No need to close the file here since user-space
1949
			 * closes it for successfully delivered
1950 1951 1952 1953 1954 1955
			 * transactions. For transactions that weren't
			 * delivered, the new fd was never allocated so
			 * there is no need to close and the fput on the
			 * file is done when the transaction is torn
			 * down.
			 */
1956
		} break;
1957 1958 1959 1960 1961 1962
		case BINDER_TYPE_PTR:
			/*
			 * Nothing to do here, this will get cleaned up when the
			 * transaction buffer gets freed
			 */
			break;
1963 1964 1965
		case BINDER_TYPE_FDA: {
			struct binder_fd_array_object *fda;
			struct binder_buffer_object *parent;
1966
			struct binder_object ptr_object;
1967
			binder_size_t fda_offset;
1968 1969
			size_t fd_index;
			binder_size_t fd_buf_size;
1970
			binder_size_t num_valid;
1971

1972
			if (is_failure) {
1973 1974 1975 1976 1977 1978 1979
				/*
				 * The fd fixups have not been applied so no
				 * fds need to be closed.
				 */
				continue;
			}

1980 1981
			num_valid = (buffer_offset - off_start_offset) /
						sizeof(binder_size_t);
1982
			fda = to_binder_fd_array_object(hdr);
1983 1984 1985 1986
			parent = binder_validate_ptr(proc, buffer, &ptr_object,
						     fda->parent,
						     off_start_offset,
						     NULL,
1987
						     num_valid);
1988
			if (!parent) {
1989
				pr_err("transaction release %d bad parent offset\n",
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
				       debug_id);
				continue;
			}
			fd_buf_size = sizeof(u32) * fda->num_fds;
			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
				pr_err("transaction release %d invalid number of fds (%lld)\n",
				       debug_id, (u64)fda->num_fds);
				continue;
			}
			if (fd_buf_size > parent->length ||
			    fda->parent_offset > parent->length - fd_buf_size) {
				/* No space for all file descriptors here. */
				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
				       debug_id, (u64)fda->num_fds);
				continue;
			}
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
			/*
			 * the source data for binder_buffer_object is visible
			 * to user-space and the @buffer element is the user
			 * pointer to the buffer_object containing the fd_array.
			 * Convert the address to an offset relative to
			 * the base of the transaction buffer.
			 */
			fda_offset =
			    (parent->buffer - (uintptr_t)buffer->user_data) +
			    fda->parent_offset;
2016 2017 2018
			for (fd_index = 0; fd_index < fda->num_fds;
			     fd_index++) {
				u32 fd;
2019
				int err;
2020 2021
				binder_size_t offset = fda_offset +
					fd_index * sizeof(fd);
2022

2023 2024 2025 2026
				err = binder_alloc_copy_from_buffer(
						&proc->alloc, &fd, buffer,
						offset, sizeof(fd));
				WARN_ON(err);
T
Todd Kjos 已提交
2027
				if (!err) {
2028
					binder_deferred_fd_close(fd);
T
Todd Kjos 已提交
2029 2030 2031 2032 2033 2034 2035 2036
					/*
					 * Need to make sure the thread goes
					 * back to userspace to complete the
					 * deferred close
					 */
					if (thread)
						thread->looper_need_return = true;
				}
2037
			}
2038
		} break;
2039
		default:
2040
			pr_err("transaction release %d bad object type %x\n",
2041
				debug_id, hdr->type);
2042 2043 2044 2045 2046
			break;
		}
	}
}

2047 2048 2049 2050 2051 2052 2053
static int binder_translate_binder(struct flat_binder_object *fp,
				   struct binder_transaction *t,
				   struct binder_thread *thread)
{
	struct binder_node *node;
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
2054
	struct binder_ref_data rdata;
2055
	int ret = 0;
2056 2057 2058

	node = binder_get_node(proc, fp->binder);
	if (!node) {
2059
		node = binder_new_node(proc, fp);
2060 2061 2062 2063 2064 2065 2066 2067
		if (!node)
			return -ENOMEM;
	}
	if (fp->cookie != node->cookie) {
		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
				  proc->pid, thread->pid, (u64)fp->binder,
				  node->debug_id, (u64)fp->cookie,
				  (u64)node->cookie);
2068 2069 2070
		ret = -EINVAL;
		goto done;
	}
2071
	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2072 2073
		ret = -EPERM;
		goto done;
2074 2075
	}

2076 2077 2078 2079
	ret = binder_inc_ref_for_node(target_proc, node,
			fp->hdr.type == BINDER_TYPE_BINDER,
			&thread->todo, &rdata);
	if (ret)
2080
		goto done;
2081 2082 2083 2084 2085 2086

	if (fp->hdr.type == BINDER_TYPE_BINDER)
		fp->hdr.type = BINDER_TYPE_HANDLE;
	else
		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
	fp->binder = 0;
2087
	fp->handle = rdata.desc;
2088 2089
	fp->cookie = 0;

2090
	trace_binder_transaction_node_to_ref(t, node, &rdata);
2091 2092 2093
	binder_debug(BINDER_DEBUG_TRANSACTION,
		     "        node %d u%016llx -> ref %d desc %d\n",
		     node->debug_id, (u64)node->ptr,
2094
		     rdata.debug_id, rdata.desc);
2095 2096 2097
done:
	binder_put_node(node);
	return ret;
2098 2099 2100 2101 2102 2103 2104 2105
}

static int binder_translate_handle(struct flat_binder_object *fp,
				   struct binder_transaction *t,
				   struct binder_thread *thread)
{
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
2106 2107
	struct binder_node *node;
	struct binder_ref_data src_rdata;
2108
	int ret = 0;
2109

2110 2111 2112
	node = binder_get_node_from_ref(proc, fp->handle,
			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
	if (!node) {
2113 2114 2115 2116
		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
				  proc->pid, thread->pid, fp->handle);
		return -EINVAL;
	}
2117
	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2118 2119 2120
		ret = -EPERM;
		goto done;
	}
2121

2122
	binder_node_lock(node);
2123
	if (node->proc == target_proc) {
2124 2125 2126 2127
		if (fp->hdr.type == BINDER_TYPE_HANDLE)
			fp->hdr.type = BINDER_TYPE_BINDER;
		else
			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2128 2129
		fp->binder = node->ptr;
		fp->cookie = node->cookie;
2130 2131
		if (node->proc)
			binder_inner_proc_lock(node->proc);
2132 2133
		else
			__acquire(&node->proc->inner_lock);
2134 2135 2136 2137 2138
		binder_inc_node_nilocked(node,
					 fp->hdr.type == BINDER_TYPE_BINDER,
					 0, NULL);
		if (node->proc)
			binder_inner_proc_unlock(node->proc);
2139 2140
		else
			__release(&node->proc->inner_lock);
2141
		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2142 2143
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "        ref %d desc %d -> node %d u%016llx\n",
2144 2145
			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
			     (u64)node->ptr);
2146
		binder_node_unlock(node);
2147
	} else {
2148
		struct binder_ref_data dest_rdata;
2149

2150
		binder_node_unlock(node);
2151 2152 2153 2154
		ret = binder_inc_ref_for_node(target_proc, node,
				fp->hdr.type == BINDER_TYPE_HANDLE,
				NULL, &dest_rdata);
		if (ret)
2155
			goto done;
2156 2157

		fp->binder = 0;
2158
		fp->handle = dest_rdata.desc;
2159
		fp->cookie = 0;
2160 2161
		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
						    &dest_rdata);
2162 2163
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2164 2165 2166
			     src_rdata.debug_id, src_rdata.desc,
			     dest_rdata.debug_id, dest_rdata.desc,
			     node->debug_id);
2167
	}
2168 2169 2170
done:
	binder_put_node(node);
	return ret;
2171 2172
}

2173
static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2174 2175 2176 2177 2178 2179
			       struct binder_transaction *t,
			       struct binder_thread *thread,
			       struct binder_transaction *in_reply_to)
{
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
2180
	struct binder_txn_fd_fixup *fixup;
2181
	struct file *file;
2182
	int ret = 0;
2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204
	bool target_allows_fd;

	if (in_reply_to)
		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
	else
		target_allows_fd = t->buffer->target_node->accept_fds;
	if (!target_allows_fd) {
		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
				  proc->pid, thread->pid,
				  in_reply_to ? "reply" : "transaction",
				  fd);
		ret = -EPERM;
		goto err_fd_not_accepted;
	}

	file = fget(fd);
	if (!file) {
		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
				  proc->pid, thread->pid, fd);
		ret = -EBADF;
		goto err_fget;
	}
2205
	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2206 2207 2208 2209 2210
	if (ret < 0) {
		ret = -EPERM;
		goto err_security;
	}

2211 2212 2213 2214 2215 2216 2217
	/*
	 * Add fixup record for this transaction. The allocation
	 * of the fd in the target needs to be done from a
	 * target thread.
	 */
	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
	if (!fixup) {
2218
		ret = -ENOMEM;
2219
		goto err_alloc;
2220
	}
2221
	fixup->file = file;
2222
	fixup->offset = fd_offset;
2223 2224
	trace_binder_transaction_fd_send(t, fd, fixup->offset);
	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2225

2226
	return ret;
2227

2228
err_alloc:
2229 2230 2231 2232 2233 2234 2235
err_security:
	fput(file);
err_fget:
err_fd_not_accepted:
	return ret;
}

2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
/**
 * struct binder_ptr_fixup - data to be fixed-up in target buffer
 * @offset	offset in target buffer to fixup
 * @skip_size	bytes to skip in copy (fixup will be written later)
 * @fixup_data	data to write at fixup offset
 * @node	list node
 *
 * This is used for the pointer fixup list (pf) which is created and consumed
 * during binder_transaction() and is only accessed locally. No
 * locking is necessary.
 *
 * The list is ordered by @offset.
 */
struct binder_ptr_fixup {
	binder_size_t offset;
	size_t skip_size;
	binder_uintptr_t fixup_data;
	struct list_head node;
};

/**
 * struct binder_sg_copy - scatter-gather data to be copied
 * @offset		offset in target buffer
 * @sender_uaddr	user address in source buffer
 * @length		bytes to copy
 * @node		list node
 *
 * This is used for the sg copy list (sgc) which is created and consumed
 * during binder_transaction() and is only accessed locally. No
 * locking is necessary.
 *
 * The list is ordered by @offset.
 */
struct binder_sg_copy {
	binder_size_t offset;
	const void __user *sender_uaddr;
	size_t length;
	struct list_head node;
};

/**
 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
 * @alloc:	binder_alloc associated with @buffer
 * @buffer:	binder buffer in target process
 * @sgc_head:	list_head of scatter-gather copy list
 * @pf_head:	list_head of pointer fixup list
 *
 * Processes all elements of @sgc_head, applying fixups from @pf_head
 * and copying the scatter-gather data from the source process' user
 * buffer to the target's buffer. It is expected that the list creation
 * and processing all occurs during binder_transaction() so these lists
 * are only accessed in local context.
 *
 * Return: 0=success, else -errno
 */
static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
					 struct binder_buffer *buffer,
					 struct list_head *sgc_head,
					 struct list_head *pf_head)
{
	int ret = 0;
	struct binder_sg_copy *sgc, *tmpsgc;
2298
	struct binder_ptr_fixup *tmppf;
2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352
	struct binder_ptr_fixup *pf =
		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
					 node);

	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
		size_t bytes_copied = 0;

		while (bytes_copied < sgc->length) {
			size_t copy_size;
			size_t bytes_left = sgc->length - bytes_copied;
			size_t offset = sgc->offset + bytes_copied;

			/*
			 * We copy up to the fixup (pointed to by pf)
			 */
			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
				       : bytes_left;
			if (!ret && copy_size)
				ret = binder_alloc_copy_user_to_buffer(
						alloc, buffer,
						offset,
						sgc->sender_uaddr + bytes_copied,
						copy_size);
			bytes_copied += copy_size;
			if (copy_size != bytes_left) {
				BUG_ON(!pf);
				/* we stopped at a fixup offset */
				if (pf->skip_size) {
					/*
					 * we are just skipping. This is for
					 * BINDER_TYPE_FDA where the translated
					 * fds will be fixed up when we get
					 * to target context.
					 */
					bytes_copied += pf->skip_size;
				} else {
					/* apply the fixup indicated by pf */
					if (!ret)
						ret = binder_alloc_copy_to_buffer(
							alloc, buffer,
							pf->offset,
							&pf->fixup_data,
							sizeof(pf->fixup_data));
					bytes_copied += sizeof(pf->fixup_data);
				}
				list_del(&pf->node);
				kfree(pf);
				pf = list_first_entry_or_null(pf_head,
						struct binder_ptr_fixup, node);
			}
		}
		list_del(&sgc->node);
		kfree(sgc);
	}
2353 2354 2355 2356 2357
	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
		BUG_ON(pf->skip_size == 0);
		list_del(&pf->node);
		kfree(pf);
	}
2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480
	BUG_ON(!list_empty(sgc_head));

	return ret > 0 ? -EINVAL : ret;
}

/**
 * binder_cleanup_deferred_txn_lists() - free specified lists
 * @sgc_head:	list_head of scatter-gather copy list
 * @pf_head:	list_head of pointer fixup list
 *
 * Called to clean up @sgc_head and @pf_head if there is an
 * error.
 */
static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
					      struct list_head *pf_head)
{
	struct binder_sg_copy *sgc, *tmpsgc;
	struct binder_ptr_fixup *pf, *tmppf;

	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
		list_del(&sgc->node);
		kfree(sgc);
	}
	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
		list_del(&pf->node);
		kfree(pf);
	}
}

/**
 * binder_defer_copy() - queue a scatter-gather buffer for copy
 * @sgc_head:		list_head of scatter-gather copy list
 * @offset:		binder buffer offset in target process
 * @sender_uaddr:	user address in source process
 * @length:		bytes to copy
 *
 * Specify a scatter-gather block to be copied. The actual copy must
 * be deferred until all the needed fixups are identified and queued.
 * Then the copy and fixups are done together so un-translated values
 * from the source are never visible in the target buffer.
 *
 * We are guaranteed that repeated calls to this function will have
 * monotonically increasing @offset values so the list will naturally
 * be ordered.
 *
 * Return: 0=success, else -errno
 */
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
			     const void __user *sender_uaddr, size_t length)
{
	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);

	if (!bc)
		return -ENOMEM;

	bc->offset = offset;
	bc->sender_uaddr = sender_uaddr;
	bc->length = length;
	INIT_LIST_HEAD(&bc->node);

	/*
	 * We are guaranteed that the deferred copies are in-order
	 * so just add to the tail.
	 */
	list_add_tail(&bc->node, sgc_head);

	return 0;
}

/**
 * binder_add_fixup() - queue a fixup to be applied to sg copy
 * @pf_head:	list_head of binder ptr fixup list
 * @offset:	binder buffer offset in target process
 * @fixup:	bytes to be copied for fixup
 * @skip_size:	bytes to skip when copying (fixup will be applied later)
 *
 * Add the specified fixup to a list ordered by @offset. When copying
 * the scatter-gather buffers, the fixup will be copied instead of
 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
 * will be applied later (in target process context), so we just skip
 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
 * value in @fixup.
 *
 * This function is called *mostly* in @offset order, but there are
 * exceptions. Since out-of-order inserts are relatively uncommon,
 * we insert the new element by searching backward from the tail of
 * the list.
 *
 * Return: 0=success, else -errno
 */
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
			    binder_uintptr_t fixup, size_t skip_size)
{
	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
	struct binder_ptr_fixup *tmppf;

	if (!pf)
		return -ENOMEM;

	pf->offset = offset;
	pf->fixup_data = fixup;
	pf->skip_size = skip_size;
	INIT_LIST_HEAD(&pf->node);

	/* Fixups are *mostly* added in-order, but there are some
	 * exceptions. Look backwards through list for insertion point.
	 */
	list_for_each_entry_reverse(tmppf, pf_head, node) {
		if (tmppf->offset < pf->offset) {
			list_add(&pf->node, &tmppf->node);
			return 0;
		}
	}
	/*
	 * if we get here, then the new offset is the lowest so
	 * insert at the head
	 */
	list_add(&pf->node, pf_head);
	return 0;
}

static int binder_translate_fd_array(struct list_head *pf_head,
				     struct binder_fd_array_object *fda,
2481
				     const void __user *sender_ubuffer,
2482
				     struct binder_buffer_object *parent,
2483
				     struct binder_buffer_object *sender_uparent,
2484 2485 2486 2487
				     struct binder_transaction *t,
				     struct binder_thread *thread,
				     struct binder_transaction *in_reply_to)
{
2488
	binder_size_t fdi, fd_buf_size;
2489
	binder_size_t fda_offset;
2490
	const void __user *sender_ufda_base;
2491
	struct binder_proc *proc = thread->proc;
2492
	int ret;
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506

	fd_buf_size = sizeof(u32) * fda->num_fds;
	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
				  proc->pid, thread->pid, (u64)fda->num_fds);
		return -EINVAL;
	}
	if (fd_buf_size > parent->length ||
	    fda->parent_offset > parent->length - fd_buf_size) {
		/* No space for all file descriptors here. */
		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
				  proc->pid, thread->pid, (u64)fda->num_fds);
		return -EINVAL;
	}
2507 2508 2509 2510 2511 2512 2513 2514 2515
	/*
	 * the source data for binder_buffer_object is visible
	 * to user-space and the @buffer element is the user
	 * pointer to the buffer_object containing the fd_array.
	 * Convert the address to an offset relative to
	 * the base of the transaction buffer.
	 */
	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
		fda->parent_offset;
A
Arnd Bergmann 已提交
2516 2517
	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
				fda->parent_offset;
2518 2519 2520

	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2521 2522 2523 2524
		binder_user_error("%d:%d parent offset not aligned correctly.\n",
				  proc->pid, thread->pid);
		return -EINVAL;
	}
2525 2526 2527 2528
	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
	if (ret)
		return ret;

2529
	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2530
		u32 fd;
2531
		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2532
		binder_size_t sender_uoffset = fdi * sizeof(fd);
2533

2534
		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2535 2536 2537
		if (!ret)
			ret = binder_translate_fd(fd, offset, t, thread,
						  in_reply_to);
2538 2539
		if (ret)
			return ret > 0 ? -EINVAL : ret;
2540 2541 2542 2543
	}
	return 0;
}

2544 2545
static int binder_fixup_parent(struct list_head *pf_head,
			       struct binder_transaction *t,
2546 2547
			       struct binder_thread *thread,
			       struct binder_buffer_object *bp,
2548
			       binder_size_t off_start_offset,
2549
			       binder_size_t num_valid,
2550
			       binder_size_t last_fixup_obj_off,
2551 2552 2553 2554 2555 2556
			       binder_size_t last_fixup_min_off)
{
	struct binder_buffer_object *parent;
	struct binder_buffer *b = t->buffer;
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
2557 2558 2559
	struct binder_object object;
	binder_size_t buffer_offset;
	binder_size_t parent_offset;
2560 2561 2562 2563

	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
		return 0;

2564 2565 2566
	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
				     off_start_offset, &parent_offset,
				     num_valid);
2567 2568 2569 2570 2571 2572
	if (!parent) {
		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
				  proc->pid, thread->pid);
		return -EINVAL;
	}

2573 2574 2575
	if (!binder_validate_fixup(target_proc, b, off_start_offset,
				   parent_offset, bp->parent_offset,
				   last_fixup_obj_off,
2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
				   last_fixup_min_off)) {
		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
				  proc->pid, thread->pid);
		return -EINVAL;
	}

	if (parent->length < sizeof(binder_uintptr_t) ||
	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
		/* No space for a pointer here! */
		binder_user_error("%d:%d got transaction with invalid parent offset\n",
				  proc->pid, thread->pid);
		return -EINVAL;
	}
2589
	buffer_offset = bp->parent_offset +
2590
			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2591
	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2592 2593
}

2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607
/**
 * binder_proc_transaction() - sends a transaction to a process and wakes it up
 * @t:		transaction to send
 * @proc:	process to send the transaction to
 * @thread:	thread in @proc to send the transaction to (may be NULL)
 *
 * This function queues a transaction to the specified process. It will try
 * to find a thread in the target process to handle the transaction and
 * wake it up. If no thread is found, the work is queued to the proc
 * waitqueue.
 *
 * If the @thread parameter is not NULL, the transaction is always queued
 * to the waitlist of that specific thread.
 *
M
Marco Ballesio 已提交
2608 2609 2610
 * Return:	0 if the transaction was successfully queued
 *		BR_DEAD_REPLY if the target process or thread is dead
 *		BR_FROZEN_REPLY if the target process or thread is frozen
2611
 */
M
Marco Ballesio 已提交
2612
static int binder_proc_transaction(struct binder_transaction *t,
2613 2614 2615 2616 2617
				    struct binder_proc *proc,
				    struct binder_thread *thread)
{
	struct binder_node *node = t->buffer->target_node;
	bool oneway = !!(t->flags & TF_ONE_WAY);
2618
	bool pending_async = false;
2619 2620 2621 2622 2623

	BUG_ON(!node);
	binder_node_lock(node);
	if (oneway) {
		BUG_ON(thread);
2624
		if (node->has_async_transaction)
2625
			pending_async = true;
2626
		else
2627
			node->has_async_transaction = true;
2628 2629 2630
	}

	binder_inner_proc_lock(proc);
2631 2632 2633 2634
	if (proc->is_frozen) {
		proc->sync_recv |= !oneway;
		proc->async_recv |= oneway;
	}
2635

M
Marco Ballesio 已提交
2636 2637
	if ((proc->is_frozen && !oneway) || proc->is_dead ||
			(thread && thread->is_dead)) {
2638 2639
		binder_inner_proc_unlock(proc);
		binder_node_unlock(node);
M
Marco Ballesio 已提交
2640
		return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2641 2642
	}

2643
	if (!thread && !pending_async)
2644 2645 2646
		thread = binder_select_thread_ilocked(proc);

	if (thread)
2647 2648 2649
		binder_enqueue_thread_work_ilocked(thread, &t->work);
	else if (!pending_async)
		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2650
	else
2651
		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2652

2653
	if (!pending_async)
2654 2655
		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);

M
Marco Ballesio 已提交
2656
	proc->outstanding_txns++;
2657 2658 2659
	binder_inner_proc_unlock(proc);
	binder_node_unlock(node);

M
Marco Ballesio 已提交
2660
	return 0;
2661 2662
}

2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
/**
 * binder_get_node_refs_for_txn() - Get required refs on node for txn
 * @node:         struct binder_node for which to get refs
 * @proc:         returns @node->proc if valid
 * @error:        if no @proc then returns BR_DEAD_REPLY
 *
 * User-space normally keeps the node alive when creating a transaction
 * since it has a reference to the target. The local strong ref keeps it
 * alive if the sending process dies before the target process processes
 * the transaction. If the source process is malicious or has a reference
 * counting bug, relying on the local strong ref can fail.
 *
 * Since user-space can cause the local strong ref to go away, we also take
 * a tmpref on the node to ensure it survives while we are constructing
 * the transaction. We also need a tmpref on the proc while we are
 * constructing the transaction, so we take that here as well.
 *
 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
 * target proc has died, @error is set to BR_DEAD_REPLY
 */
static struct binder_node *binder_get_node_refs_for_txn(
		struct binder_node *node,
		struct binder_proc **procp,
		uint32_t *error)
{
	struct binder_node *target_node = NULL;

	binder_node_inner_lock(node);
	if (node->proc) {
		target_node = node;
		binder_inc_node_nilocked(node, 1, 0, NULL);
		binder_inc_node_tmpref_ilocked(node);
		node->proc->tmp_ref++;
		*procp = node->proc;
	} else
		*error = BR_DEAD_REPLY;
	binder_node_inner_unlock(node);

	return target_node;
}

2705 2706
static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
2707 2708
			       struct binder_transaction_data *tr, int reply,
			       binder_size_t extra_buffers_size)
2709
{
2710
	int ret;
2711
	struct binder_transaction *t;
2712
	struct binder_work *w;
2713
	struct binder_work *tcomplete;
2714 2715
	binder_size_t buffer_offset = 0;
	binder_size_t off_start_offset, off_end_offset;
2716
	binder_size_t off_min;
2717
	binder_size_t sg_buf_offset, sg_buf_end_offset;
2718
	binder_size_t user_offset = 0;
2719
	struct binder_proc *target_proc = NULL;
2720 2721 2722 2723
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct binder_transaction *in_reply_to = NULL;
	struct binder_transaction_log_entry *e;
2724 2725 2726
	uint32_t return_error = 0;
	uint32_t return_error_param = 0;
	uint32_t return_error_line = 0;
2727
	binder_size_t last_fixup_obj_off = 0;
2728
	binder_size_t last_fixup_min_off = 0;
2729
	struct binder_context *context = proc->context;
2730
	int t_debug_id = atomic_inc_return(&binder_last_id);
2731 2732
	char *secctx = NULL;
	u32 secctx_sz = 0;
2733 2734
	struct list_head sgc_head;
	struct list_head pf_head;
2735 2736
	const void __user *user_buffer = (const void __user *)
				(uintptr_t)tr->data.ptr.buffer;
2737 2738
	INIT_LIST_HEAD(&sgc_head);
	INIT_LIST_HEAD(&pf_head);
2739 2740

	e = binder_transaction_log_add(&binder_transaction_log);
2741
	e->debug_id = t_debug_id;
2742 2743 2744 2745 2746 2747
	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
	e->from_proc = proc->pid;
	e->from_thread = thread->pid;
	e->target_handle = tr->target.handle;
	e->data_size = tr->data_size;
	e->offsets_size = tr->offsets_size;
2748
	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2749 2750

	if (reply) {
2751
		binder_inner_proc_lock(proc);
2752 2753
		in_reply_to = thread->transaction_stack;
		if (in_reply_to == NULL) {
2754
			binder_inner_proc_unlock(proc);
2755
			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2756 2757
					  proc->pid, thread->pid);
			return_error = BR_FAILED_REPLY;
2758 2759
			return_error_param = -EPROTO;
			return_error_line = __LINE__;
2760 2761 2762
			goto err_empty_call_stack;
		}
		if (in_reply_to->to_thread != thread) {
2763
			spin_lock(&in_reply_to->lock);
2764
			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2765 2766 2767 2768 2769
				proc->pid, thread->pid, in_reply_to->debug_id,
				in_reply_to->to_proc ?
				in_reply_to->to_proc->pid : 0,
				in_reply_to->to_thread ?
				in_reply_to->to_thread->pid : 0);
2770
			spin_unlock(&in_reply_to->lock);
2771
			binder_inner_proc_unlock(proc);
2772
			return_error = BR_FAILED_REPLY;
2773 2774
			return_error_param = -EPROTO;
			return_error_line = __LINE__;
2775 2776 2777 2778
			in_reply_to = NULL;
			goto err_bad_call_stack;
		}
		thread->transaction_stack = in_reply_to->to_parent;
2779 2780 2781
		binder_inner_proc_unlock(proc);
		binder_set_nice(in_reply_to->saved_priority);
		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2782
		if (target_thread == NULL) {
2783 2784
			/* annotation for sparse */
			__release(&target_thread->proc->inner_lock);
2785
			return_error = BR_DEAD_REPLY;
2786
			return_error_line = __LINE__;
2787 2788 2789
			goto err_dead_binder;
		}
		if (target_thread->transaction_stack != in_reply_to) {
2790
			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2791 2792 2793 2794
				proc->pid, thread->pid,
				target_thread->transaction_stack ?
				target_thread->transaction_stack->debug_id : 0,
				in_reply_to->debug_id);
2795
			binder_inner_proc_unlock(target_thread->proc);
2796
			return_error = BR_FAILED_REPLY;
2797 2798
			return_error_param = -EPROTO;
			return_error_line = __LINE__;
2799 2800 2801 2802 2803
			in_reply_to = NULL;
			target_thread = NULL;
			goto err_dead_binder;
		}
		target_proc = target_thread->proc;
2804
		target_proc->tmp_ref++;
2805
		binder_inner_proc_unlock(target_thread->proc);
2806 2807 2808
	} else {
		if (tr->target.handle) {
			struct binder_ref *ref;
2809

2810 2811 2812 2813 2814 2815 2816
			/*
			 * There must already be a strong ref
			 * on this node. If so, do a strong
			 * increment on the node to ensure it
			 * stays alive until the transaction is
			 * done.
			 */
2817 2818 2819
			binder_proc_lock(proc);
			ref = binder_get_ref_olocked(proc, tr->target.handle,
						     true);
2820
			if (ref) {
2821 2822 2823 2824
				target_node = binder_get_node_refs_for_txn(
						ref->node, &target_proc,
						&return_error);
			} else {
2825 2826
				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
						  proc->pid, thread->pid, tr->target.handle);
2827 2828
				return_error = BR_FAILED_REPLY;
			}
2829
			binder_proc_unlock(proc);
2830
		} else {
2831
			mutex_lock(&context->context_mgr_node_lock);
2832
			target_node = context->binder_context_mgr_node;
2833 2834 2835 2836 2837
			if (target_node)
				target_node = binder_get_node_refs_for_txn(
						target_node, &target_proc,
						&return_error);
			else
2838
				return_error = BR_DEAD_REPLY;
2839
			mutex_unlock(&context->context_mgr_node_lock);
2840
			if (target_node && target_proc->pid == proc->pid) {
2841 2842 2843 2844 2845 2846 2847
				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
						  proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_invalid_target_handle;
			}
2848
		}
2849 2850 2851 2852 2853
		if (!target_node) {
			/*
			 * return_error is set above
			 */
			return_error_param = -EINVAL;
2854
			return_error_line = __LINE__;
2855 2856
			goto err_dead_binder;
		}
2857
		e->to_node = target_node->debug_id;
2858 2859 2860 2861 2862 2863
		if (WARN_ON(proc == target_proc)) {
			return_error = BR_FAILED_REPLY;
			return_error_param = -EINVAL;
			return_error_line = __LINE__;
			goto err_invalid_target_handle;
		}
2864 2865
		if (security_binder_transaction(proc->cred,
						target_proc->cred) < 0) {
2866
			return_error = BR_FAILED_REPLY;
2867 2868
			return_error_param = -EPERM;
			return_error_line = __LINE__;
2869 2870
			goto err_invalid_target_handle;
		}
2871
		binder_inner_proc_lock(proc);
2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894

		w = list_first_entry_or_null(&thread->todo,
					     struct binder_work, entry);
		if (!(tr->flags & TF_ONE_WAY) && w &&
		    w->type == BINDER_WORK_TRANSACTION) {
			/*
			 * Do not allow new outgoing transaction from a
			 * thread that has a transaction at the head of
			 * its todo list. Only need to check the head
			 * because binder_select_thread_ilocked picks a
			 * thread from proc->waiting_threads to enqueue
			 * the transaction, and nothing is queued to the
			 * todo list while the thread is on waiting_threads.
			 */
			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
					  proc->pid, thread->pid);
			binder_inner_proc_unlock(proc);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EPROTO;
			return_error_line = __LINE__;
			goto err_bad_todo_list;
		}

2895 2896
		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
			struct binder_transaction *tmp;
2897

2898 2899
			tmp = thread->transaction_stack;
			if (tmp->to_thread != thread) {
2900
				spin_lock(&tmp->lock);
2901
				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2902 2903 2904 2905
					proc->pid, thread->pid, tmp->debug_id,
					tmp->to_proc ? tmp->to_proc->pid : 0,
					tmp->to_thread ?
					tmp->to_thread->pid : 0);
2906
				spin_unlock(&tmp->lock);
2907
				binder_inner_proc_unlock(proc);
2908
				return_error = BR_FAILED_REPLY;
2909 2910
				return_error_param = -EPROTO;
				return_error_line = __LINE__;
2911 2912 2913
				goto err_bad_call_stack;
			}
			while (tmp) {
2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
				struct binder_thread *from;

				spin_lock(&tmp->lock);
				from = tmp->from;
				if (from && from->proc == target_proc) {
					atomic_inc(&from->tmp_ref);
					target_thread = from;
					spin_unlock(&tmp->lock);
					break;
				}
				spin_unlock(&tmp->lock);
2925 2926 2927
				tmp = tmp->from_parent;
			}
		}
2928
		binder_inner_proc_unlock(proc);
2929
	}
2930
	if (target_thread)
2931 2932 2933 2934 2935 2936 2937
		e->to_thread = target_thread->pid;
	e->to_proc = target_proc->pid;

	/* TODO: reuse incoming transaction for reply */
	t = kzalloc(sizeof(*t), GFP_KERNEL);
	if (t == NULL) {
		return_error = BR_FAILED_REPLY;
2938 2939
		return_error_param = -ENOMEM;
		return_error_line = __LINE__;
2940 2941
		goto err_alloc_t_failed;
	}
2942
	INIT_LIST_HEAD(&t->fd_fixups);
2943
	binder_stats_created(BINDER_STAT_TRANSACTION);
2944
	spin_lock_init(&t->lock);
2945 2946 2947 2948

	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
	if (tcomplete == NULL) {
		return_error = BR_FAILED_REPLY;
2949 2950
		return_error_param = -ENOMEM;
		return_error_line = __LINE__;
2951 2952 2953 2954
		goto err_alloc_tcomplete_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

2955
	t->debug_id = t_debug_id;
2956 2957 2958

	if (reply)
		binder_debug(BINDER_DEBUG_TRANSACTION,
2959
			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2960 2961
			     proc->pid, thread->pid, t->debug_id,
			     target_proc->pid, target_thread->pid,
2962 2963
			     (u64)tr->data.ptr.buffer,
			     (u64)tr->data.ptr.offsets,
2964 2965
			     (u64)tr->data_size, (u64)tr->offsets_size,
			     (u64)extra_buffers_size);
2966 2967
	else
		binder_debug(BINDER_DEBUG_TRANSACTION,
2968
			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2969 2970
			     proc->pid, thread->pid, t->debug_id,
			     target_proc->pid, target_node->debug_id,
2971 2972
			     (u64)tr->data.ptr.buffer,
			     (u64)tr->data.ptr.offsets,
2973 2974
			     (u64)tr->data_size, (u64)tr->offsets_size,
			     (u64)extra_buffers_size);
2975 2976 2977 2978 2979

	if (!reply && !(tr->flags & TF_ONE_WAY))
		t->from = thread;
	else
		t->from = NULL;
2980
	t->sender_euid = task_euid(proc->tsk);
2981 2982 2983 2984 2985
	t->to_proc = target_proc;
	t->to_thread = target_thread;
	t->code = tr->code;
	t->flags = tr->flags;
	t->priority = task_nice(current);
2986

2987 2988
	if (target_node && target_node->txn_security_ctx) {
		u32 secid;
2989
		size_t added_size;
2990

2991
		security_cred_getsecid(proc->cred, &secid);
2992 2993 2994 2995 2996 2997 2998
		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
		if (ret) {
			return_error = BR_FAILED_REPLY;
			return_error_param = ret;
			return_error_line = __LINE__;
			goto err_get_secctx_failed;
		}
2999 3000 3001 3002 3003
		added_size = ALIGN(secctx_sz, sizeof(u64));
		extra_buffers_size += added_size;
		if (extra_buffers_size < added_size) {
			/* integer overflow of extra_buffers_size */
			return_error = BR_FAILED_REPLY;
3004
			return_error_param = -EINVAL;
3005 3006 3007
			return_error_line = __LINE__;
			goto err_bad_extra_size;
		}
3008 3009
	}

3010 3011
	trace_binder_transaction(reply, t, target_node);

3012
	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3013
		tr->offsets_size, extra_buffers_size,
3014
		!reply && (t->flags & TF_ONE_WAY), current->tgid);
3015 3016 3017 3018 3019 3020 3021 3022 3023
	if (IS_ERR(t->buffer)) {
		/*
		 * -ESRCH indicates VMA cleared. The target is dying.
		 */
		return_error_param = PTR_ERR(t->buffer);
		return_error = return_error_param == -ESRCH ?
			BR_DEAD_REPLY : BR_FAILED_REPLY;
		return_error_line = __LINE__;
		t->buffer = NULL;
3024 3025
		goto err_binder_alloc_buf_failed;
	}
3026
	if (secctx) {
3027
		int err;
3028 3029 3030 3031 3032
		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
				    ALIGN(tr->offsets_size, sizeof(void *)) +
				    ALIGN(extra_buffers_size, sizeof(void *)) -
				    ALIGN(secctx_sz, sizeof(u64));

3033
		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3034 3035 3036 3037 3038 3039 3040
		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
						  t->buffer, buf_offset,
						  secctx, secctx_sz);
		if (err) {
			t->security_ctx = 0;
			WARN_ON(1);
		}
3041 3042 3043
		security_release_secctx(secctx, secctx_sz);
		secctx = NULL;
	}
3044 3045 3046
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;
3047
	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3048
	trace_binder_transaction_alloc_buf(t->buffer);
3049

3050 3051 3052 3053 3054 3055 3056
	if (binder_alloc_copy_user_to_buffer(
				&target_proc->alloc,
				t->buffer,
				ALIGN(tr->data_size, sizeof(void *)),
				(const void __user *)
					(uintptr_t)tr->data.ptr.offsets,
				tr->offsets_size)) {
3057 3058
		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
				proc->pid, thread->pid);
3059
		return_error = BR_FAILED_REPLY;
3060 3061
		return_error_param = -EFAULT;
		return_error_line = __LINE__;
3062 3063
		goto err_copy_data_failed;
	}
3064 3065 3066
	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
				proc->pid, thread->pid, (u64)tr->offsets_size);
3067
		return_error = BR_FAILED_REPLY;
3068 3069
		return_error_param = -EINVAL;
		return_error_line = __LINE__;
3070 3071
		goto err_bad_offset;
	}
3072 3073 3074 3075 3076
	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
				  proc->pid, thread->pid,
				  (u64)extra_buffers_size);
		return_error = BR_FAILED_REPLY;
3077 3078
		return_error_param = -EINVAL;
		return_error_line = __LINE__;
3079 3080
		goto err_bad_offset;
	}
3081 3082 3083 3084
	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
	buffer_offset = off_start_offset;
	off_end_offset = off_start_offset + tr->offsets_size;
	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3085 3086
	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
		ALIGN(secctx_sz, sizeof(u64));
3087
	off_min = 0;
3088 3089
	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
	     buffer_offset += sizeof(binder_size_t)) {
3090
		struct binder_object_header *hdr;
3091
		size_t object_size;
3092
		struct binder_object object;
3093
		binder_size_t object_offset;
3094
		binder_size_t copy_size;
3095

3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
						  &object_offset,
						  t->buffer,
						  buffer_offset,
						  sizeof(object_offset))) {
			return_error = BR_FAILED_REPLY;
			return_error_param = -EINVAL;
			return_error_line = __LINE__;
			goto err_bad_offset;
		}
3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126

		/*
		 * Copy the source user buffer up to the next object
		 * that will be processed.
		 */
		copy_size = object_offset - user_offset;
		if (copy_size && (user_offset > object_offset ||
				binder_alloc_copy_user_to_buffer(
					&target_proc->alloc,
					t->buffer, user_offset,
					user_buffer + user_offset,
					copy_size))) {
			binder_user_error("%d:%d got transaction with invalid data ptr\n",
					proc->pid, thread->pid);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EFAULT;
			return_error_line = __LINE__;
			goto err_copy_data_failed;
		}
		object_size = binder_get_object(target_proc, user_buffer,
				t->buffer, object_offset, &object);
3127
		if (object_size == 0 || object_offset < off_min) {
3128
			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3129 3130
					  proc->pid, thread->pid,
					  (u64)object_offset,
3131
					  (u64)off_min,
3132
					  (u64)t->buffer->data_size);
3133
			return_error = BR_FAILED_REPLY;
3134 3135
			return_error_param = -EINVAL;
			return_error_line = __LINE__;
3136 3137
			goto err_bad_offset;
		}
3138 3139 3140 3141 3142
		/*
		 * Set offset to the next buffer fragment to be
		 * copied
		 */
		user_offset = object_offset + object_size;
3143

3144
		hdr = &object.hdr;
3145
		off_min = object_offset + object_size;
3146
		switch (hdr->type) {
3147 3148
		case BINDER_TYPE_BINDER:
		case BINDER_TYPE_WEAK_BINDER: {
3149
			struct flat_binder_object *fp;
3150

3151
			fp = to_flat_binder_object(hdr);
3152
			ret = binder_translate_binder(fp, t, thread);
3153 3154 3155 3156 3157 3158

			if (ret < 0 ||
			    binder_alloc_copy_to_buffer(&target_proc->alloc,
							t->buffer,
							object_offset,
							fp, sizeof(*fp))) {
3159
				return_error = BR_FAILED_REPLY;
3160 3161
				return_error_param = ret;
				return_error_line = __LINE__;
3162
				goto err_translate_failed;
3163 3164 3165 3166
			}
		} break;
		case BINDER_TYPE_HANDLE:
		case BINDER_TYPE_WEAK_HANDLE: {
3167
			struct flat_binder_object *fp;
3168

3169
			fp = to_flat_binder_object(hdr);
3170
			ret = binder_translate_handle(fp, t, thread);
3171 3172 3173 3174 3175
			if (ret < 0 ||
			    binder_alloc_copy_to_buffer(&target_proc->alloc,
							t->buffer,
							object_offset,
							fp, sizeof(*fp))) {
3176
				return_error = BR_FAILED_REPLY;
3177 3178
				return_error_param = ret;
				return_error_line = __LINE__;
3179
				goto err_translate_failed;
3180 3181 3182 3183
			}
		} break;

		case BINDER_TYPE_FD: {
3184
			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3185 3186 3187 3188
			binder_size_t fd_offset = object_offset +
				(uintptr_t)&fp->fd - (uintptr_t)fp;
			int ret = binder_translate_fd(fp->fd, fd_offset, t,
						      thread, in_reply_to);
3189

3190 3191 3192 3193 3194 3195
			fp->pad_binder = 0;
			if (ret < 0 ||
			    binder_alloc_copy_to_buffer(&target_proc->alloc,
							t->buffer,
							object_offset,
							fp, sizeof(*fp))) {
3196
				return_error = BR_FAILED_REPLY;
3197
				return_error_param = ret;
3198
				return_error_line = __LINE__;
3199
				goto err_translate_failed;
3200 3201
			}
		} break;
3202
		case BINDER_TYPE_FDA: {
3203 3204
			struct binder_object ptr_object;
			binder_size_t parent_offset;
3205 3206
			struct binder_object user_object;
			size_t user_parent_size;
3207 3208
			struct binder_fd_array_object *fda =
				to_binder_fd_array_object(hdr);
3209
			size_t num_valid = (buffer_offset - off_start_offset) /
3210
						sizeof(binder_size_t);
3211
			struct binder_buffer_object *parent =
3212 3213 3214 3215
				binder_validate_ptr(target_proc, t->buffer,
						    &ptr_object, fda->parent,
						    off_start_offset,
						    &parent_offset,
3216
						    num_valid);
3217 3218 3219 3220
			if (!parent) {
				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
						  proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
3221 3222
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
3223 3224
				goto err_bad_parent;
			}
3225 3226 3227 3228 3229
			if (!binder_validate_fixup(target_proc, t->buffer,
						   off_start_offset,
						   parent_offset,
						   fda->parent_offset,
						   last_fixup_obj_off,
3230 3231 3232 3233
						   last_fixup_min_off)) {
				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
						  proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
3234 3235
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
3236 3237
				goto err_bad_parent;
			}
3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254
			/*
			 * We need to read the user version of the parent
			 * object to get the original user offset
			 */
			user_parent_size =
				binder_get_object(proc, user_buffer, t->buffer,
						  parent_offset, &user_object);
			if (user_parent_size != sizeof(user_object.bbo)) {
				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
						  proc->pid, thread->pid,
						  user_parent_size,
						  sizeof(user_object.bbo));
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_bad_parent;
			}
3255 3256
			ret = binder_translate_fd_array(&pf_head, fda,
							user_buffer, parent,
3257 3258
							&user_object.bbo, t,
							thread, in_reply_to);
3259 3260 3261 3262 3263 3264
			if (!ret)
				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
								  t->buffer,
								  object_offset,
								  fda, sizeof(*fda));
			if (ret) {
3265
				return_error = BR_FAILED_REPLY;
3266
				return_error_param = ret > 0 ? -EINVAL : ret;
3267
				return_error_line = __LINE__;
3268 3269
				goto err_translate_failed;
			}
3270
			last_fixup_obj_off = parent_offset;
3271 3272 3273
			last_fixup_min_off =
				fda->parent_offset + sizeof(u32) * fda->num_fds;
		} break;
3274 3275 3276
		case BINDER_TYPE_PTR: {
			struct binder_buffer_object *bp =
				to_binder_buffer_object(hdr);
3277 3278
			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
			size_t num_valid;
3279 3280 3281 3282 3283

			if (bp->length > buf_left) {
				binder_user_error("%d:%d got transaction with too large buffer\n",
						  proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
3284 3285
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
3286 3287
				goto err_bad_offset;
			}
3288 3289 3290 3291
			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
				(const void __user *)(uintptr_t)bp->buffer,
				bp->length);
			if (ret) {
3292
				return_error = BR_FAILED_REPLY;
3293
				return_error_param = ret;
3294
				return_error_line = __LINE__;
3295
				goto err_translate_failed;
3296 3297
			}
			/* Fixup buffer pointer to target proc address space */
3298 3299 3300
			bp->buffer = (uintptr_t)
				t->buffer->user_data + sg_buf_offset;
			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3301

3302
			num_valid = (buffer_offset - off_start_offset) /
3303
					sizeof(binder_size_t);
3304 3305
			ret = binder_fixup_parent(&pf_head, t,
						  thread, bp,
3306
						  off_start_offset,
3307
						  num_valid,
3308
						  last_fixup_obj_off,
3309
						  last_fixup_min_off);
3310 3311 3312 3313 3314
			if (ret < 0 ||
			    binder_alloc_copy_to_buffer(&target_proc->alloc,
							t->buffer,
							object_offset,
							bp, sizeof(*bp))) {
3315
				return_error = BR_FAILED_REPLY;
3316 3317
				return_error_param = ret;
				return_error_line = __LINE__;
3318 3319
				goto err_translate_failed;
			}
3320
			last_fixup_obj_off = object_offset;
3321 3322
			last_fixup_min_off = 0;
		} break;
3323
		default:
3324
			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3325
				proc->pid, thread->pid, hdr->type);
3326
			return_error = BR_FAILED_REPLY;
3327 3328
			return_error_param = -EINVAL;
			return_error_line = __LINE__;
3329 3330 3331
			goto err_bad_object_type;
		}
	}
3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
	/* Done processing objects, copy the rest of the buffer */
	if (binder_alloc_copy_user_to_buffer(
				&target_proc->alloc,
				t->buffer, user_offset,
				user_buffer + user_offset,
				tr->data_size - user_offset)) {
		binder_user_error("%d:%d got transaction with invalid data ptr\n",
				proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		return_error_param = -EFAULT;
		return_error_line = __LINE__;
		goto err_copy_data_failed;
	}
3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355

	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
					    &sgc_head, &pf_head);
	if (ret) {
		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
				  proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		return_error_param = ret;
		return_error_line = __LINE__;
		goto err_copy_data_failed;
	}
3356 3357 3358 3359
	if (t->buffer->oneway_spam_suspect)
		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
	else
		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3360
	t->work.type = BINDER_WORK_TRANSACTION;
3361

3362
	if (reply) {
3363
		binder_enqueue_thread_work(thread, tcomplete);
3364
		binder_inner_proc_lock(target_proc);
L
Li Li 已提交
3365 3366
		if (target_thread->is_dead) {
			return_error = BR_DEAD_REPLY;
3367
			binder_inner_proc_unlock(target_proc);
3368
			goto err_dead_proc_or_thread;
3369
		}
3370
		BUG_ON(t->buffer->async_transaction != 0);
3371
		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3372
		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
M
Marco Ballesio 已提交
3373
		target_proc->outstanding_txns++;
3374
		binder_inner_proc_unlock(target_proc);
3375
		wake_up_interruptible_sync(&target_thread->wait);
3376
		binder_free_transaction(in_reply_to);
3377 3378
	} else if (!(t->flags & TF_ONE_WAY)) {
		BUG_ON(t->buffer->async_transaction != 0);
3379
		binder_inner_proc_lock(proc);
3380 3381 3382 3383 3384 3385 3386 3387
		/*
		 * Defer the TRANSACTION_COMPLETE, so we don't return to
		 * userspace immediately; this allows the target process to
		 * immediately start processing this transaction, reducing
		 * latency. We will then return the TRANSACTION_COMPLETE when
		 * the target replies (or there is an error).
		 */
		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3388 3389 3390
		t->need_reply = 1;
		t->from_parent = thread->transaction_stack;
		thread->transaction_stack = t;
3391
		binder_inner_proc_unlock(proc);
M
Marco Ballesio 已提交
3392 3393 3394
		return_error = binder_proc_transaction(t,
				target_proc, target_thread);
		if (return_error) {
3395 3396 3397
			binder_inner_proc_lock(proc);
			binder_pop_transaction_ilocked(thread, t);
			binder_inner_proc_unlock(proc);
3398 3399
			goto err_dead_proc_or_thread;
		}
3400 3401 3402
	} else {
		BUG_ON(target_node == NULL);
		BUG_ON(t->buffer->async_transaction != 1);
3403
		binder_enqueue_thread_work(thread, tcomplete);
M
Marco Ballesio 已提交
3404 3405
		return_error = binder_proc_transaction(t, target_proc, NULL);
		if (return_error)
3406
			goto err_dead_proc_or_thread;
3407
	}
3408 3409 3410
	if (target_thread)
		binder_thread_dec_tmpref(target_thread);
	binder_proc_dec_tmpref(target_proc);
3411 3412
	if (target_node)
		binder_dec_node_tmpref(target_node);
3413 3414 3415 3416 3417 3418
	/*
	 * write barrier to synchronize with initialization
	 * of log entry
	 */
	smp_wmb();
	WRITE_ONCE(e->debug_id_done, t_debug_id);
3419 3420
	return;

3421 3422
err_dead_proc_or_thread:
	return_error_line = __LINE__;
3423
	binder_dequeue_work(proc, tcomplete);
3424
err_translate_failed:
3425 3426
err_bad_object_type:
err_bad_offset:
3427
err_bad_parent:
3428
err_copy_data_failed:
3429
	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3430
	binder_free_txn_fixups(t);
3431
	trace_binder_transaction_failed_buffer_release(t->buffer);
T
Todd Kjos 已提交
3432
	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3433
					  buffer_offset, true);
3434 3435
	if (target_node)
		binder_dec_node_tmpref(target_node);
3436
	target_node = NULL;
3437
	t->buffer->transaction = NULL;
3438
	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3439
err_binder_alloc_buf_failed:
3440
err_bad_extra_size:
3441 3442 3443
	if (secctx)
		security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:
3444 3445 3446
	kfree(tcomplete);
	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
3447 3448
	if (trace_binder_txn_latency_free_enabled())
		binder_txn_latency_free(t);
3449 3450 3451
	kfree(t);
	binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
3452
err_bad_todo_list:
3453 3454 3455 3456
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
3457 3458 3459 3460
	if (target_thread)
		binder_thread_dec_tmpref(target_thread);
	if (target_proc)
		binder_proc_dec_tmpref(target_proc);
3461
	if (target_node) {
3462
		binder_dec_node(target_node, 1, 0);
3463 3464
		binder_dec_node_tmpref(target_node);
	}
3465

3466
	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3467 3468 3469 3470
		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
		     proc->pid, thread->pid, return_error, return_error_param,
		     (u64)tr->data_size, (u64)tr->offsets_size,
		     return_error_line);
3471 3472 3473

	{
		struct binder_transaction_log_entry *fe;
3474

3475 3476 3477
		e->return_error = return_error;
		e->return_error_param = return_error_param;
		e->return_error_line = return_error_line;
3478 3479
		fe = binder_transaction_log_add(&binder_transaction_log_failed);
		*fe = *e;
3480 3481 3482 3483 3484 3485 3486
		/*
		 * write barrier to synchronize with initialization
		 * of log entry
		 */
		smp_wmb();
		WRITE_ONCE(e->debug_id_done, t_debug_id);
		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3487 3488
	}

3489
	BUG_ON(thread->return_error.cmd != BR_OK);
3490
	if (in_reply_to) {
3491
		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3492
		binder_enqueue_thread_work(thread, &thread->return_error.work);
3493
		binder_send_failed_reply(in_reply_to, return_error);
3494 3495
	} else {
		thread->return_error.cmd = return_error;
3496
		binder_enqueue_thread_work(thread, &thread->return_error.work);
3497
	}
3498 3499
}

3500 3501 3502 3503
/**
 * binder_free_buf() - free the specified buffer
 * @proc:	binder proc that owns buffer
 * @buffer:	buffer to be freed
3504
 * @is_failure:	failed to send transaction
3505 3506 3507 3508 3509 3510
 *
 * If buffer for an async transaction, enqueue the next async
 * transaction from the node.
 *
 * Cleanup buffer and free it.
 */
3511
static void
T
Todd Kjos 已提交
3512 3513
binder_free_buf(struct binder_proc *proc,
		struct binder_thread *thread,
3514
		struct binder_buffer *buffer, bool is_failure)
3515
{
3516
	binder_inner_proc_lock(proc);
3517 3518 3519 3520
	if (buffer->transaction) {
		buffer->transaction->buffer = NULL;
		buffer->transaction = NULL;
	}
3521
	binder_inner_proc_unlock(proc);
3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541
	if (buffer->async_transaction && buffer->target_node) {
		struct binder_node *buf_node;
		struct binder_work *w;

		buf_node = buffer->target_node;
		binder_node_inner_lock(buf_node);
		BUG_ON(!buf_node->has_async_transaction);
		BUG_ON(buf_node->proc != proc);
		w = binder_dequeue_work_head_ilocked(
				&buf_node->async_todo);
		if (!w) {
			buf_node->has_async_transaction = false;
		} else {
			binder_enqueue_work_ilocked(
					w, &proc->todo);
			binder_wakeup_proc_ilocked(proc);
		}
		binder_node_inner_unlock(buf_node);
	}
	trace_binder_transaction_buffer_release(buffer);
3542
	binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3543 3544 3545
	binder_alloc_free_buf(&proc->alloc, buffer);
}

3546 3547
static int binder_thread_write(struct binder_proc *proc,
			struct binder_thread *thread,
3548 3549
			binder_uintptr_t binder_buffer, size_t size,
			binder_size_t *consumed)
3550 3551
{
	uint32_t cmd;
3552
	struct binder_context *context = proc->context;
3553
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3554 3555 3556
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

3557
	while (ptr < end && thread->return_error.cmd == BR_OK) {
3558 3559
		int ret;

3560 3561 3562
		if (get_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
3563
		trace_binder_command(cmd);
3564
		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3565 3566 3567
			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3568 3569 3570 3571 3572 3573 3574 3575
		}
		switch (cmd) {
		case BC_INCREFS:
		case BC_ACQUIRE:
		case BC_RELEASE:
		case BC_DECREFS: {
			uint32_t target;
			const char *debug_string;
3576 3577 3578
			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
			struct binder_ref_data rdata;
3579 3580 3581

			if (get_user(target, (uint32_t __user *)ptr))
				return -EFAULT;
3582

3583
			ptr += sizeof(uint32_t);
3584 3585
			ret = -1;
			if (increment && !target) {
3586
				struct binder_node *ctx_mgr_node;
3587

3588 3589
				mutex_lock(&context->context_mgr_node_lock);
				ctx_mgr_node = context->binder_context_mgr_node;
3590 3591 3592 3593 3594 3595 3596
				if (ctx_mgr_node) {
					if (ctx_mgr_node->proc == proc) {
						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
								  proc->pid, thread->pid);
						mutex_unlock(&context->context_mgr_node_lock);
						return -EINVAL;
					}
3597 3598 3599
					ret = binder_inc_ref_for_node(
							proc, ctx_mgr_node,
							strong, NULL, &rdata);
3600
				}
3601 3602
				mutex_unlock(&context->context_mgr_node_lock);
			}
3603 3604 3605 3606 3607 3608 3609 3610
			if (ret)
				ret = binder_update_ref_for_handle(
						proc, target, increment, strong,
						&rdata);
			if (!ret && rdata.desc != target) {
				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
					proc->pid, thread->pid,
					target, rdata.desc);
3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624
			}
			switch (cmd) {
			case BC_INCREFS:
				debug_string = "IncRefs";
				break;
			case BC_ACQUIRE:
				debug_string = "Acquire";
				break;
			case BC_RELEASE:
				debug_string = "Release";
				break;
			case BC_DECREFS:
			default:
				debug_string = "DecRefs";
3625 3626 3627 3628 3629 3630
				break;
			}
			if (ret) {
				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
					proc->pid, thread->pid, debug_string,
					strong, target, ret);
3631 3632 3633
				break;
			}
			binder_debug(BINDER_DEBUG_USER_REFS,
3634 3635 3636 3637
				     "%d:%d %s ref %d desc %d s %d w %d\n",
				     proc->pid, thread->pid, debug_string,
				     rdata.debug_id, rdata.desc, rdata.strong,
				     rdata.weak);
3638 3639 3640 3641
			break;
		}
		case BC_INCREFS_DONE:
		case BC_ACQUIRE_DONE: {
3642 3643
			binder_uintptr_t node_ptr;
			binder_uintptr_t cookie;
3644
			struct binder_node *node;
3645
			bool free_node;
3646

3647
			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3648
				return -EFAULT;
3649 3650
			ptr += sizeof(binder_uintptr_t);
			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3651
				return -EFAULT;
3652
			ptr += sizeof(binder_uintptr_t);
3653 3654
			node = binder_get_node(proc, node_ptr);
			if (node == NULL) {
3655
				binder_user_error("%d:%d %s u%016llx no match\n",
3656 3657 3658 3659
					proc->pid, thread->pid,
					cmd == BC_INCREFS_DONE ?
					"BC_INCREFS_DONE" :
					"BC_ACQUIRE_DONE",
3660
					(u64)node_ptr);
3661 3662 3663
				break;
			}
			if (cookie != node->cookie) {
3664
				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3665 3666 3667
					proc->pid, thread->pid,
					cmd == BC_INCREFS_DONE ?
					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3668 3669
					(u64)node_ptr, node->debug_id,
					(u64)cookie, (u64)node->cookie);
3670
				binder_put_node(node);
3671 3672
				break;
			}
3673
			binder_node_inner_lock(node);
3674 3675
			if (cmd == BC_ACQUIRE_DONE) {
				if (node->pending_strong_ref == 0) {
3676
					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3677 3678
						proc->pid, thread->pid,
						node->debug_id);
3679
					binder_node_inner_unlock(node);
3680
					binder_put_node(node);
3681 3682 3683 3684 3685
					break;
				}
				node->pending_strong_ref = 0;
			} else {
				if (node->pending_weak_ref == 0) {
3686
					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3687 3688
						proc->pid, thread->pid,
						node->debug_id);
3689
					binder_node_inner_unlock(node);
3690
					binder_put_node(node);
3691 3692 3693 3694
					break;
				}
				node->pending_weak_ref = 0;
			}
3695 3696 3697
			free_node = binder_dec_node_nilocked(node,
					cmd == BC_ACQUIRE_DONE, 0);
			WARN_ON(free_node);
3698
			binder_debug(BINDER_DEBUG_USER_REFS,
3699
				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3700 3701
				     proc->pid, thread->pid,
				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3702 3703
				     node->debug_id, node->local_strong_refs,
				     node->local_weak_refs, node->tmp_refs);
3704
			binder_node_inner_unlock(node);
3705
			binder_put_node(node);
3706 3707 3708
			break;
		}
		case BC_ATTEMPT_ACQUIRE:
3709
			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3710 3711
			return -EINVAL;
		case BC_ACQUIRE_RESULT:
3712
			pr_err("BC_ACQUIRE_RESULT not supported\n");
3713 3714 3715
			return -EINVAL;

		case BC_FREE_BUFFER: {
3716
			binder_uintptr_t data_ptr;
3717 3718
			struct binder_buffer *buffer;

3719
			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3720
				return -EFAULT;
3721
			ptr += sizeof(binder_uintptr_t);
3722

3723 3724
			buffer = binder_alloc_prepare_to_free(&proc->alloc,
							      data_ptr);
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736
			if (IS_ERR_OR_NULL(buffer)) {
				if (PTR_ERR(buffer) == -EPERM) {
					binder_user_error(
						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
						proc->pid, thread->pid,
						(u64)data_ptr);
				} else {
					binder_user_error(
						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
						proc->pid, thread->pid,
						(u64)data_ptr);
				}
3737 3738 3739
				break;
			}
			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3740 3741 3742
				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
				     proc->pid, thread->pid, (u64)data_ptr,
				     buffer->debug_id,
3743
				     buffer->transaction ? "active" : "finished");
3744
			binder_free_buf(proc, thread, buffer, false);
3745 3746 3747
			break;
		}

3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758
		case BC_TRANSACTION_SG:
		case BC_REPLY_SG: {
			struct binder_transaction_data_sg tr;

			if (copy_from_user(&tr, ptr, sizeof(tr)))
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr.transaction_data,
					   cmd == BC_REPLY_SG, tr.buffers_size);
			break;
		}
3759 3760 3761 3762 3763 3764 3765
		case BC_TRANSACTION:
		case BC_REPLY: {
			struct binder_transaction_data tr;

			if (copy_from_user(&tr, ptr, sizeof(tr)))
				return -EFAULT;
			ptr += sizeof(tr);
3766 3767
			binder_transaction(proc, thread, &tr,
					   cmd == BC_REPLY, 0);
3768 3769 3770 3771 3772
			break;
		}

		case BC_REGISTER_LOOPER:
			binder_debug(BINDER_DEBUG_THREADS,
3773
				     "%d:%d BC_REGISTER_LOOPER\n",
3774
				     proc->pid, thread->pid);
3775
			binder_inner_proc_lock(proc);
3776 3777
			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3778
				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3779 3780 3781
					proc->pid, thread->pid);
			} else if (proc->requested_threads == 0) {
				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3782
				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3783 3784 3785 3786 3787 3788
					proc->pid, thread->pid);
			} else {
				proc->requested_threads--;
				proc->requested_threads_started++;
			}
			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3789
			binder_inner_proc_unlock(proc);
3790 3791 3792
			break;
		case BC_ENTER_LOOPER:
			binder_debug(BINDER_DEBUG_THREADS,
3793
				     "%d:%d BC_ENTER_LOOPER\n",
3794 3795 3796
				     proc->pid, thread->pid);
			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3797
				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3798 3799 3800 3801 3802 3803
					proc->pid, thread->pid);
			}
			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
			break;
		case BC_EXIT_LOOPER:
			binder_debug(BINDER_DEBUG_THREADS,
3804
				     "%d:%d BC_EXIT_LOOPER\n",
3805 3806 3807 3808 3809 3810 3811
				     proc->pid, thread->pid);
			thread->looper |= BINDER_LOOPER_STATE_EXITED;
			break;

		case BC_REQUEST_DEATH_NOTIFICATION:
		case BC_CLEAR_DEATH_NOTIFICATION: {
			uint32_t target;
3812
			binder_uintptr_t cookie;
3813
			struct binder_ref *ref;
3814
			struct binder_ref_death *death = NULL;
3815 3816 3817 3818

			if (get_user(target, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
3819
			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3820
				return -EFAULT;
3821
			ptr += sizeof(binder_uintptr_t);
3822 3823 3824 3825 3826 3827 3828 3829 3830 3831
			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
				/*
				 * Allocate memory for death notification
				 * before taking lock
				 */
				death = kzalloc(sizeof(*death), GFP_KERNEL);
				if (death == NULL) {
					WARN_ON(thread->return_error.cmd !=
						BR_OK);
					thread->return_error.cmd = BR_ERROR;
3832 3833 3834
					binder_enqueue_thread_work(
						thread,
						&thread->return_error.work);
3835 3836 3837 3838 3839 3840 3841 3842 3843
					binder_debug(
						BINDER_DEBUG_FAILED_TRANSACTION,
						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
						proc->pid, thread->pid);
					break;
				}
			}
			binder_proc_lock(proc);
			ref = binder_get_ref_olocked(proc, target, false);
3844
			if (ref == NULL) {
3845
				binder_user_error("%d:%d %s invalid ref %d\n",
3846 3847 3848 3849 3850
					proc->pid, thread->pid,
					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
					"BC_REQUEST_DEATH_NOTIFICATION" :
					"BC_CLEAR_DEATH_NOTIFICATION",
					target);
3851 3852
				binder_proc_unlock(proc);
				kfree(death);
3853 3854 3855 3856
				break;
			}

			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3857
				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3858 3859 3860 3861
				     proc->pid, thread->pid,
				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
				     "BC_REQUEST_DEATH_NOTIFICATION" :
				     "BC_CLEAR_DEATH_NOTIFICATION",
3862 3863 3864
				     (u64)cookie, ref->data.debug_id,
				     ref->data.desc, ref->data.strong,
				     ref->data.weak, ref->node->debug_id);
3865

3866
			binder_node_lock(ref->node);
3867 3868
			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
				if (ref->death) {
3869
					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3870
						proc->pid, thread->pid);
3871
					binder_node_unlock(ref->node);
3872 3873
					binder_proc_unlock(proc);
					kfree(death);
3874 3875 3876 3877 3878 3879 3880 3881
					break;
				}
				binder_stats_created(BINDER_STAT_DEATH);
				INIT_LIST_HEAD(&death->work.entry);
				death->cookie = cookie;
				ref->death = death;
				if (ref->node->proc == NULL) {
					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3882 3883 3884 3885 3886 3887

					binder_inner_proc_lock(proc);
					binder_enqueue_work_ilocked(
						&ref->death->work, &proc->todo);
					binder_wakeup_proc_ilocked(proc);
					binder_inner_proc_unlock(proc);
3888 3889 3890
				}
			} else {
				if (ref->death == NULL) {
3891
					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3892
						proc->pid, thread->pid);
3893
					binder_node_unlock(ref->node);
3894
					binder_proc_unlock(proc);
3895 3896 3897 3898
					break;
				}
				death = ref->death;
				if (death->cookie != cookie) {
3899
					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3900
						proc->pid, thread->pid,
3901 3902
						(u64)death->cookie,
						(u64)cookie);
3903
					binder_node_unlock(ref->node);
3904
					binder_proc_unlock(proc);
3905 3906 3907
					break;
				}
				ref->death = NULL;
3908
				binder_inner_proc_lock(proc);
3909 3910
				if (list_empty(&death->work.entry)) {
					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3911 3912 3913
					if (thread->looper &
					    (BINDER_LOOPER_STATE_REGISTERED |
					     BINDER_LOOPER_STATE_ENTERED))
3914 3915 3916
						binder_enqueue_thread_work_ilocked(
								thread,
								&death->work);
3917 3918 3919 3920
					else {
						binder_enqueue_work_ilocked(
								&death->work,
								&proc->todo);
3921
						binder_wakeup_proc_ilocked(
3922
								proc);
3923 3924 3925 3926 3927
					}
				} else {
					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
				}
3928
				binder_inner_proc_unlock(proc);
3929
			}
3930
			binder_node_unlock(ref->node);
3931
			binder_proc_unlock(proc);
3932 3933 3934
		} break;
		case BC_DEAD_BINDER_DONE: {
			struct binder_work *w;
3935
			binder_uintptr_t cookie;
3936
			struct binder_ref_death *death = NULL;
3937

3938
			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3939 3940
				return -EFAULT;

3941
			ptr += sizeof(cookie);
3942 3943 3944 3945 3946 3947 3948
			binder_inner_proc_lock(proc);
			list_for_each_entry(w, &proc->delivered_death,
					    entry) {
				struct binder_ref_death *tmp_death =
					container_of(w,
						     struct binder_ref_death,
						     work);
3949

3950 3951 3952 3953 3954 3955
				if (tmp_death->cookie == cookie) {
					death = tmp_death;
					break;
				}
			}
			binder_debug(BINDER_DEBUG_DEAD_BINDER,
T
Todd Kjos 已提交
3956
				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3957 3958
				     proc->pid, thread->pid, (u64)cookie,
				     death);
3959
			if (death == NULL) {
3960 3961
				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
					proc->pid, thread->pid, (u64)cookie);
3962
				binder_inner_proc_unlock(proc);
3963 3964
				break;
			}
3965
			binder_dequeue_work_ilocked(&death->work);
3966 3967
			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3968 3969 3970
				if (thread->looper &
					(BINDER_LOOPER_STATE_REGISTERED |
					 BINDER_LOOPER_STATE_ENTERED))
3971 3972
					binder_enqueue_thread_work_ilocked(
						thread, &death->work);
3973 3974 3975 3976
				else {
					binder_enqueue_work_ilocked(
							&death->work,
							&proc->todo);
3977
					binder_wakeup_proc_ilocked(proc);
3978 3979
				}
			}
3980
			binder_inner_proc_unlock(proc);
3981 3982 3983
		} break;

		default:
3984
			pr_err("%d:%d unknown command %d\n",
3985 3986 3987 3988 3989 3990 3991 3992
			       proc->pid, thread->pid, cmd);
			return -EINVAL;
		}
		*consumed = ptr - buffer;
	}
	return 0;
}

3993 3994
static void binder_stat_br(struct binder_proc *proc,
			   struct binder_thread *thread, uint32_t cmd)
3995
{
3996
	trace_binder_return(cmd);
3997
	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3998 3999 4000
		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4001 4002 4003
	}
}

4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034
static int binder_put_node_cmd(struct binder_proc *proc,
			       struct binder_thread *thread,
			       void __user **ptrp,
			       binder_uintptr_t node_ptr,
			       binder_uintptr_t node_cookie,
			       int node_debug_id,
			       uint32_t cmd, const char *cmd_name)
{
	void __user *ptr = *ptrp;

	if (put_user(cmd, (uint32_t __user *)ptr))
		return -EFAULT;
	ptr += sizeof(uint32_t);

	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
		return -EFAULT;
	ptr += sizeof(binder_uintptr_t);

	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
		return -EFAULT;
	ptr += sizeof(binder_uintptr_t);

	binder_stat_br(proc, thread, cmd);
	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
		     proc->pid, thread->pid, cmd_name, node_debug_id,
		     (u64)node_ptr, (u64)node_cookie);

	*ptrp = ptr;
	return 0;
}

4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055
static int binder_wait_for_work(struct binder_thread *thread,
				bool do_proc_work)
{
	DEFINE_WAIT(wait);
	struct binder_proc *proc = thread->proc;
	int ret = 0;

	freezer_do_not_count();
	binder_inner_proc_lock(proc);
	for (;;) {
		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
		if (binder_has_work_ilocked(thread, do_proc_work))
			break;
		if (do_proc_work)
			list_add(&thread->waiting_thread_node,
				 &proc->waiting_threads);
		binder_inner_proc_unlock(proc);
		schedule();
		binder_inner_proc_lock(proc);
		list_del_init(&thread->waiting_thread_node);
		if (signal_pending(current)) {
4056
			ret = -EINTR;
4057 4058 4059 4060 4061 4062 4063 4064 4065 4066
			break;
		}
	}
	finish_wait(&thread->wait, &wait);
	binder_inner_proc_unlock(proc);
	freezer_count();

	return ret;
}

4067 4068
/**
 * binder_apply_fd_fixups() - finish fd translation
4069
 * @proc:         binder_proc associated @t->buffer
4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080
 * @t:	binder transaction with list of fd fixups
 *
 * Now that we are in the context of the transaction target
 * process, we can allocate and install fds. Process the
 * list of fds to translate and fixup the buffer with the
 * new fds.
 *
 * If we fail to allocate an fd, then free the resources by
 * fput'ing files that have not been processed and ksys_close'ing
 * any fds that have already been allocated.
 */
4081 4082
static int binder_apply_fd_fixups(struct binder_proc *proc,
				  struct binder_transaction *t)
4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102
{
	struct binder_txn_fd_fixup *fixup, *tmp;
	int ret = 0;

	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
		int fd = get_unused_fd_flags(O_CLOEXEC);

		if (fd < 0) {
			binder_debug(BINDER_DEBUG_TRANSACTION,
				     "failed fd fixup txn %d fd %d\n",
				     t->debug_id, fd);
			ret = -ENOMEM;
			break;
		}
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "fd fixup txn %d fd %d\n",
			     t->debug_id, fd);
		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
		fd_install(fd, fixup->file);
		fixup->file = NULL;
4103 4104 4105 4106 4107 4108
		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
						fixup->offset, &fd,
						sizeof(u32))) {
			ret = -EINVAL;
			break;
		}
4109 4110 4111 4112 4113
	}
	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
		if (fixup->file) {
			fput(fixup->file);
		} else if (ret) {
4114
			u32 fd;
4115 4116 4117 4118 4119 4120 4121 4122 4123
			int err;

			err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
							    t->buffer,
							    fixup->offset,
							    sizeof(fd));
			WARN_ON(err);
			if (!err)
				binder_deferred_fd_close(fd);
4124 4125 4126 4127 4128 4129 4130 4131
		}
		list_del(&fixup->fixup_entry);
		kfree(fixup);
	}

	return ret;
}

4132 4133
static int binder_thread_read(struct binder_proc *proc,
			      struct binder_thread *thread,
4134 4135
			      binder_uintptr_t binder_buffer, size_t size,
			      binder_size_t *consumed, int non_block)
4136
{
4137
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}

retry:
4151
	binder_inner_proc_lock(proc);
4152
	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4153
	binder_inner_proc_unlock(proc);
4154 4155

	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4156 4157 4158

	trace_binder_wait_for_work(wait_for_proc_work,
				   !!thread->transaction_stack,
4159
				   !binder_worklist_empty(proc, &thread->todo));
4160 4161 4162
	if (wait_for_proc_work) {
		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
					BINDER_LOOPER_STATE_ENTERED))) {
4163
			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4164 4165 4166 4167 4168
				proc->pid, thread->pid, thread->looper);
			wait_event_interruptible(binder_user_error_wait,
						 binder_stop_on_user_error < 2);
		}
		binder_set_nice(proc->default_priority);
4169 4170 4171 4172 4173
	}

	if (non_block) {
		if (!binder_has_work(thread, wait_for_proc_work))
			ret = -EAGAIN;
4174
	} else {
4175
		ret = binder_wait_for_work(thread, wait_for_proc_work);
4176
	}
4177

4178 4179 4180 4181 4182 4183 4184
	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

	if (ret)
		return ret;

	while (1) {
		uint32_t cmd;
4185 4186
		struct binder_transaction_data_secctx tr;
		struct binder_transaction_data *trd = &tr.transaction_data;
4187 4188
		struct binder_work *w = NULL;
		struct list_head *list = NULL;
4189
		struct binder_transaction *t = NULL;
4190
		struct binder_thread *t_from;
4191
		size_t trsize = sizeof(*trd);
4192

4193
		binder_inner_proc_lock(proc);
4194 4195 4196 4197 4198 4199 4200 4201
		if (!binder_worklist_empty_ilocked(&thread->todo))
			list = &thread->todo;
		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
			   wait_for_proc_work)
			list = &proc->todo;
		else {
			binder_inner_proc_unlock(proc);

4202
			/* no data added */
4203
			if (ptr - buffer == 4 && !thread->looper_need_return)
4204 4205 4206 4207
				goto retry;
			break;
		}

4208 4209
		if (end - ptr < sizeof(tr) + 4) {
			binder_inner_proc_unlock(proc);
4210
			break;
4211
		}
4212
		w = binder_dequeue_work_head_ilocked(list);
4213 4214
		if (binder_worklist_empty_ilocked(&thread->todo))
			thread->process_todo = false;
4215 4216 4217

		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
4218
			binder_inner_proc_unlock(proc);
4219 4220
			t = container_of(w, struct binder_transaction, work);
		} break;
4221 4222 4223 4224 4225
		case BINDER_WORK_RETURN_ERROR: {
			struct binder_error *e = container_of(
					w, struct binder_error, work);

			WARN_ON(e->cmd == BR_OK);
4226
			binder_inner_proc_unlock(proc);
4227 4228
			if (put_user(e->cmd, (uint32_t __user *)ptr))
				return -EFAULT;
4229
			cmd = e->cmd;
4230 4231 4232
			e->cmd = BR_OK;
			ptr += sizeof(uint32_t);

4233
			binder_stat_br(proc, thread, cmd);
4234
		} break;
4235 4236 4237 4238 4239 4240 4241
		case BINDER_WORK_TRANSACTION_COMPLETE:
		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
			if (proc->oneway_spam_detection_enabled &&
				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
				cmd = BR_ONEWAY_SPAM_SUSPECT;
			else
				cmd = BR_TRANSACTION_COMPLETE;
4242
			binder_inner_proc_unlock(proc);
4243 4244
			kfree(w);
			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4245 4246 4247 4248 4249 4250
			if (put_user(cmd, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);

			binder_stat_br(proc, thread, cmd);
			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4251
				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4252 4253 4254 4255
				     proc->pid, thread->pid);
		} break;
		case BINDER_WORK_NODE: {
			struct binder_node *node = container_of(w, struct binder_node, work);
4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267
			int strong, weak;
			binder_uintptr_t node_ptr = node->ptr;
			binder_uintptr_t node_cookie = node->cookie;
			int node_debug_id = node->debug_id;
			int has_weak_ref;
			int has_strong_ref;
			void __user *orig_ptr = ptr;

			BUG_ON(proc != node->proc);
			strong = node->internal_strong_refs ||
					node->local_strong_refs;
			weak = !hlist_empty(&node->refs) ||
4268 4269
					node->local_weak_refs ||
					node->tmp_refs || strong;
4270 4271 4272 4273
			has_strong_ref = node->has_strong_ref;
			has_weak_ref = node->has_weak_ref;

			if (weak && !has_weak_ref) {
4274 4275 4276
				node->has_weak_ref = 1;
				node->pending_weak_ref = 1;
				node->local_weak_refs++;
4277 4278
			}
			if (strong && !has_strong_ref) {
4279 4280 4281
				node->has_strong_ref = 1;
				node->pending_strong_ref = 1;
				node->local_strong_refs++;
4282 4283
			}
			if (!strong && has_strong_ref)
4284
				node->has_strong_ref = 0;
4285
			if (!weak && has_weak_ref)
4286
				node->has_weak_ref = 0;
4287 4288 4289 4290 4291 4292 4293 4294
			if (!weak && !strong) {
				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
					     "%d:%d node %d u%016llx c%016llx deleted\n",
					     proc->pid, thread->pid,
					     node_debug_id,
					     (u64)node_ptr,
					     (u64)node_cookie);
				rb_erase(&node->rb_node, &proc->nodes);
4295
				binder_inner_proc_unlock(proc);
4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306
				binder_node_lock(node);
				/*
				 * Acquire the node lock before freeing the
				 * node to serialize with other threads that
				 * may have been holding the node lock while
				 * decrementing this node (avoids race where
				 * this thread frees while the other thread
				 * is unlocking the node after the final
				 * decrement)
				 */
				binder_node_unlock(node);
4307 4308 4309 4310
				binder_free_node(node);
			} else
				binder_inner_proc_unlock(proc);

4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339
			if (weak && !has_weak_ref)
				ret = binder_put_node_cmd(
						proc, thread, &ptr, node_ptr,
						node_cookie, node_debug_id,
						BR_INCREFS, "BR_INCREFS");
			if (!ret && strong && !has_strong_ref)
				ret = binder_put_node_cmd(
						proc, thread, &ptr, node_ptr,
						node_cookie, node_debug_id,
						BR_ACQUIRE, "BR_ACQUIRE");
			if (!ret && !strong && has_strong_ref)
				ret = binder_put_node_cmd(
						proc, thread, &ptr, node_ptr,
						node_cookie, node_debug_id,
						BR_RELEASE, "BR_RELEASE");
			if (!ret && !weak && has_weak_ref)
				ret = binder_put_node_cmd(
						proc, thread, &ptr, node_ptr,
						node_cookie, node_debug_id,
						BR_DECREFS, "BR_DECREFS");
			if (orig_ptr == ptr)
				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
					     proc->pid, thread->pid,
					     node_debug_id,
					     (u64)node_ptr,
					     (u64)node_cookie);
			if (ret)
				return ret;
4340 4341 4342 4343 4344 4345
		} break;
		case BINDER_WORK_DEAD_BINDER:
		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
			struct binder_ref_death *death;
			uint32_t cmd;
4346
			binder_uintptr_t cookie;
4347 4348 4349 4350 4351 4352

			death = container_of(w, struct binder_ref_death, work);
			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
			else
				cmd = BR_DEAD_BINDER;
4353 4354
			cookie = death->cookie;

4355
			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4356
				     "%d:%d %s %016llx\n",
4357 4358 4359 4360
				      proc->pid, thread->pid,
				      cmd == BR_DEAD_BINDER ?
				      "BR_DEAD_BINDER" :
				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4361
				      (u64)cookie);
4362
			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4363
				binder_inner_proc_unlock(proc);
4364 4365
				kfree(death);
				binder_stats_deleted(BINDER_STAT_DEATH);
4366
			} else {
4367 4368
				binder_enqueue_work_ilocked(
						w, &proc->delivered_death);
4369 4370
				binder_inner_proc_unlock(proc);
			}
4371 4372 4373 4374 4375 4376 4377 4378
			if (put_user(cmd, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			if (put_user(cookie,
				     (binder_uintptr_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(binder_uintptr_t);
			binder_stat_br(proc, thread, cmd);
4379 4380 4381
			if (cmd == BR_DEAD_BINDER)
				goto done; /* DEAD_BINDER notifications can cause transactions */
		} break;
4382 4383 4384 4385 4386
		default:
			binder_inner_proc_unlock(proc);
			pr_err("%d:%d: bad work type %d\n",
			       proc->pid, thread->pid, w->type);
			break;
4387 4388 4389 4390 4391 4392 4393 4394
		}

		if (!t)
			continue;

		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {
			struct binder_node *target_node = t->buffer->target_node;
4395

4396 4397
			trd->target.ptr = target_node->ptr;
			trd->cookie =  target_node->cookie;
4398 4399 4400 4401 4402 4403 4404 4405 4406
			t->saved_priority = task_nice(current);
			if (t->priority < target_node->min_priority &&
			    !(t->flags & TF_ONE_WAY))
				binder_set_nice(t->priority);
			else if (!(t->flags & TF_ONE_WAY) ||
				 t->saved_priority > target_node->min_priority)
				binder_set_nice(target_node->min_priority);
			cmd = BR_TRANSACTION;
		} else {
4407 4408
			trd->target.ptr = 0;
			trd->cookie = 0;
4409 4410
			cmd = BR_REPLY;
		}
4411 4412 4413
		trd->code = t->code;
		trd->flags = t->flags;
		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4414

4415 4416 4417
		t_from = binder_get_txn_from(t);
		if (t_from) {
			struct task_struct *sender = t_from->proc->tsk;
4418

4419 4420 4421
			trd->sender_pid =
				task_tgid_nr_ns(sender,
						task_active_pid_ns(current));
4422
		} else {
4423
			trd->sender_pid = 0;
4424 4425
		}

4426
		ret = binder_apply_fd_fixups(proc, t);
4427 4428 4429 4430 4431 4432 4433 4434 4435 4436
		if (ret) {
			struct binder_buffer *buffer = t->buffer;
			bool oneway = !!(t->flags & TF_ONE_WAY);
			int tid = t->debug_id;

			if (t_from)
				binder_thread_dec_tmpref(t_from);
			buffer->transaction = NULL;
			binder_cleanup_transaction(t, "fd fixups failed",
						   BR_FAILED_REPLY);
4437
			binder_free_buf(proc, thread, buffer, true);
4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453
			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
				     proc->pid, thread->pid,
				     oneway ? "async " :
					(cmd == BR_REPLY ? "reply " : ""),
				     tid, BR_FAILED_REPLY, ret, __LINE__);
			if (cmd == BR_REPLY) {
				cmd = BR_FAILED_REPLY;
				if (put_user(cmd, (uint32_t __user *)ptr))
					return -EFAULT;
				ptr += sizeof(uint32_t);
				binder_stat_br(proc, thread, cmd);
				break;
			}
			continue;
		}
4454 4455
		trd->data_size = t->buffer->data_size;
		trd->offsets_size = t->buffer->offsets_size;
4456
		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4457
		trd->data.ptr.offsets = trd->data.ptr.buffer +
4458 4459 4460
					ALIGN(t->buffer->data_size,
					    sizeof(void *));

4461 4462 4463 4464 4465
		tr.secctx = t->security_ctx;
		if (t->security_ctx) {
			cmd = BR_TRANSACTION_SEC_CTX;
			trsize = sizeof(tr);
		}
4466 4467 4468
		if (put_user(cmd, (uint32_t __user *)ptr)) {
			if (t_from)
				binder_thread_dec_tmpref(t_from);
4469 4470 4471 4472

			binder_cleanup_transaction(t, "put_user failed",
						   BR_FAILED_REPLY);

4473
			return -EFAULT;
4474
		}
4475
		ptr += sizeof(uint32_t);
4476
		if (copy_to_user(ptr, &tr, trsize)) {
4477 4478
			if (t_from)
				binder_thread_dec_tmpref(t_from);
4479 4480 4481 4482

			binder_cleanup_transaction(t, "copy_to_user failed",
						   BR_FAILED_REPLY);

4483
			return -EFAULT;
4484
		}
4485
		ptr += trsize;
4486

4487
		trace_binder_transaction_received(t);
4488 4489
		binder_stat_br(proc, thread, cmd);
		binder_debug(BINDER_DEBUG_TRANSACTION,
4490
			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4491 4492
			     proc->pid, thread->pid,
			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4493 4494
				(cmd == BR_TRANSACTION_SEC_CTX) ?
				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4495 4496
			     t->debug_id, t_from ? t_from->proc->pid : 0,
			     t_from ? t_from->pid : 0, cmd,
4497
			     t->buffer->data_size, t->buffer->offsets_size,
4498 4499
			     (u64)trd->data.ptr.buffer,
			     (u64)trd->data.ptr.offsets);
4500

4501 4502
		if (t_from)
			binder_thread_dec_tmpref(t_from);
4503
		t->buffer->allow_user_free = 1;
4504
		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4505
			binder_inner_proc_lock(thread->proc);
4506 4507 4508
			t->to_parent = thread->transaction_stack;
			t->to_thread = thread;
			thread->transaction_stack = t;
4509
			binder_inner_proc_unlock(thread->proc);
4510
		} else {
4511
			binder_free_transaction(t);
4512 4513 4514 4515 4516 4517 4518
		}
		break;
	}

done:

	*consumed = ptr - buffer;
4519
	binder_inner_proc_lock(proc);
4520 4521
	if (proc->requested_threads == 0 &&
	    list_empty(&thread->proc->waiting_threads) &&
4522 4523 4524 4525 4526
	    proc->requested_threads_started < proc->max_threads &&
	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
	     /*spawn a new thread if we leave this out */) {
		proc->requested_threads++;
4527
		binder_inner_proc_unlock(proc);
4528
		binder_debug(BINDER_DEBUG_THREADS,
4529
			     "%d:%d BR_SPAWN_LOOPER\n",
4530 4531 4532
			     proc->pid, thread->pid);
		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
			return -EFAULT;
4533
		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4534 4535
	} else
		binder_inner_proc_unlock(proc);
4536 4537 4538
	return 0;
}

4539 4540
static void binder_release_work(struct binder_proc *proc,
				struct list_head *list)
4541 4542
{
	struct binder_work *w;
4543
	enum binder_work_type wtype;
4544

4545
	while (1) {
4546 4547 4548 4549
		binder_inner_proc_lock(proc);
		w = binder_dequeue_work_head_ilocked(list);
		wtype = w ? w->type : 0;
		binder_inner_proc_unlock(proc);
4550 4551 4552
		if (!w)
			return;

4553
		switch (wtype) {
4554 4555 4556 4557
		case BINDER_WORK_TRANSACTION: {
			struct binder_transaction *t;

			t = container_of(w, struct binder_transaction, work);
4558 4559 4560

			binder_cleanup_transaction(t, "process died.",
						   BR_DEAD_REPLY);
4561
		} break;
4562 4563 4564 4565 4566 4567 4568 4569
		case BINDER_WORK_RETURN_ERROR: {
			struct binder_error *e = container_of(
					w, struct binder_error, work);

			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
				"undelivered TRANSACTION_ERROR: %u\n",
				e->cmd);
		} break;
4570
		case BINDER_WORK_TRANSACTION_COMPLETE: {
4571
			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4572
				"undelivered TRANSACTION_COMPLETE\n");
4573 4574 4575
			kfree(w);
			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
		} break;
4576 4577 4578 4579 4580 4581
		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
			struct binder_ref_death *death;

			death = container_of(w, struct binder_ref_death, work);
			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4582 4583
				"undelivered death notification, %016llx\n",
				(u64)death->cookie);
4584 4585 4586
			kfree(death);
			binder_stats_deleted(BINDER_STAT_DEATH);
		} break;
4587 4588
		case BINDER_WORK_NODE:
			break;
4589
		default:
4590
			pr_err("unexpected work type, %d, not freed\n",
4591
			       wtype);
4592 4593 4594 4595 4596 4597
			break;
		}
	}

}

4598 4599
static struct binder_thread *binder_get_thread_ilocked(
		struct binder_proc *proc, struct binder_thread *new_thread)
4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613
{
	struct binder_thread *thread = NULL;
	struct rb_node *parent = NULL;
	struct rb_node **p = &proc->threads.rb_node;

	while (*p) {
		parent = *p;
		thread = rb_entry(parent, struct binder_thread, rb_node);

		if (current->pid < thread->pid)
			p = &(*p)->rb_left;
		else if (current->pid > thread->pid)
			p = &(*p)->rb_right;
		else
4614
			return thread;
4615
	}
4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631
	if (!new_thread)
		return NULL;
	thread = new_thread;
	binder_stats_created(BINDER_STAT_THREAD);
	thread->proc = proc;
	thread->pid = current->pid;
	atomic_set(&thread->tmp_ref, 0);
	init_waitqueue_head(&thread->wait);
	INIT_LIST_HEAD(&thread->todo);
	rb_link_node(&thread->rb_node, parent, p);
	rb_insert_color(&thread->rb_node, &proc->threads);
	thread->looper_need_return = true;
	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
	thread->return_error.cmd = BR_OK;
	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
	thread->reply_error.cmd = BR_OK;
4632
	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646
	return thread;
}

static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
	struct binder_thread *thread;
	struct binder_thread *new_thread;

	binder_inner_proc_lock(proc);
	thread = binder_get_thread_ilocked(proc, NULL);
	binder_inner_proc_unlock(proc);
	if (!thread) {
		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
		if (new_thread == NULL)
4647
			return NULL;
4648 4649 4650 4651 4652
		binder_inner_proc_lock(proc);
		thread = binder_get_thread_ilocked(proc, new_thread);
		binder_inner_proc_unlock(proc);
		if (thread != new_thread)
			kfree(new_thread);
4653 4654 4655 4656
	}
	return thread;
}

4657 4658
static void binder_free_proc(struct binder_proc *proc)
{
4659 4660
	struct binder_device *device;

4661 4662
	BUG_ON(!list_empty(&proc->todo));
	BUG_ON(!list_empty(&proc->delivered_death));
M
Marco Ballesio 已提交
4663 4664 4665
	if (proc->outstanding_txns)
		pr_warn("%s: Unexpected outstanding_txns %d\n",
			__func__, proc->outstanding_txns);
4666 4667 4668 4669 4670
	device = container_of(proc->context, struct binder_device, context);
	if (refcount_dec_and_test(&device->ref)) {
		kfree(proc->context->name);
		kfree(device);
	}
4671 4672
	binder_alloc_deferred_release(&proc->alloc);
	put_task_struct(proc->tsk);
4673
	put_cred(proc->cred);
4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687
	binder_stats_deleted(BINDER_STAT_PROC);
	kfree(proc);
}

static void binder_free_thread(struct binder_thread *thread)
{
	BUG_ON(!list_empty(&thread->todo));
	binder_stats_deleted(BINDER_STAT_THREAD);
	binder_proc_dec_tmpref(thread->proc);
	kfree(thread);
}

static int binder_thread_release(struct binder_proc *proc,
				 struct binder_thread *thread)
4688 4689 4690 4691
{
	struct binder_transaction *t;
	struct binder_transaction *send_reply = NULL;
	int active_transactions = 0;
4692
	struct binder_transaction *last_t = NULL;
4693

4694
	binder_inner_proc_lock(thread->proc);
4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706
	/*
	 * take a ref on the proc so it survives
	 * after we remove this thread from proc->threads.
	 * The corresponding dec is when we actually
	 * free the thread in binder_free_thread()
	 */
	proc->tmp_ref++;
	/*
	 * take a ref on this thread to ensure it
	 * survives while we are releasing it
	 */
	atomic_inc(&thread->tmp_ref);
4707 4708
	rb_erase(&thread->rb_node, &proc->threads);
	t = thread->transaction_stack;
4709 4710 4711 4712
	if (t) {
		spin_lock(&t->lock);
		if (t->to_thread == thread)
			send_reply = t;
4713 4714
	} else {
		__acquire(&t->lock);
4715 4716 4717
	}
	thread->is_dead = true;

4718
	while (t) {
4719
		last_t = t;
4720 4721
		active_transactions++;
		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4722 4723
			     "release %d:%d transaction %d %s, still active\n",
			      proc->pid, thread->pid,
4724 4725 4726 4727
			     t->debug_id,
			     (t->to_thread == thread) ? "in" : "out");

		if (t->to_thread == thread) {
M
Marco Ballesio 已提交
4728
			thread->proc->outstanding_txns--;
4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740
			t->to_proc = NULL;
			t->to_thread = NULL;
			if (t->buffer) {
				t->buffer->transaction = NULL;
				t->buffer = NULL;
			}
			t = t->to_parent;
		} else if (t->from == thread) {
			t->from = NULL;
			t = t->from_parent;
		} else
			BUG();
4741 4742 4743
		spin_unlock(&last_t->lock);
		if (t)
			spin_lock(&t->lock);
4744 4745
		else
			__acquire(&t->lock);
4746
	}
4747 4748
	/* annotation for sparse, lock not acquired in last iteration above */
	__release(&t->lock);
4749 4750

	/*
E
Eric Biggers 已提交
4751 4752
	 * If this thread used poll, make sure we remove the waitqueue from any
	 * poll data structures holding it.
4753
	 */
E
Eric Biggers 已提交
4754 4755
	if (thread->looper & BINDER_LOOPER_STATE_POLL)
		wake_up_pollfree(&thread->wait);
4756

4757
	binder_inner_proc_unlock(thread->proc);
4758

4759
	/*
E
Eric Biggers 已提交
4760 4761 4762 4763 4764
	 * This is needed to avoid races between wake_up_pollfree() above and
	 * someone else removing the last entry from the queue for other reasons
	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
	 * descriptor being closed).  Such other users hold an RCU read lock, so
	 * we can be sure they're done after we call synchronize_rcu().
4765 4766 4767 4768
	 */
	if (thread->looper & BINDER_LOOPER_STATE_POLL)
		synchronize_rcu();

4769 4770
	if (send_reply)
		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4771
	binder_release_work(proc, &thread->todo);
4772
	binder_thread_dec_tmpref(thread);
4773 4774 4775
	return active_transactions;
}

4776
static __poll_t binder_poll(struct file *filp,
4777 4778 4779 4780
				struct poll_table_struct *wait)
{
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread = NULL;
4781
	bool wait_for_proc_work;
4782 4783

	thread = binder_get_thread(proc);
4784 4785
	if (!thread)
		return POLLERR;
4786

4787
	binder_inner_proc_lock(thread->proc);
4788 4789 4790
	thread->looper |= BINDER_LOOPER_STATE_POLL;
	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);

4791
	binder_inner_proc_unlock(thread->proc);
4792

4793 4794
	poll_wait(filp, &thread->wait, wait);

4795
	if (binder_has_work(thread, wait_for_proc_work))
4796
		return EPOLLIN;
4797

4798 4799 4800
	return 0;
}

4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843
static int binder_ioctl_write_read(struct file *filp,
				unsigned int cmd, unsigned long arg,
				struct binder_thread *thread)
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;
	struct binder_write_read bwr;

	if (size != sizeof(struct binder_write_read)) {
		ret = -EINVAL;
		goto out;
	}
	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
	binder_debug(BINDER_DEBUG_READ_WRITE,
		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
		     proc->pid, thread->pid,
		     (u64)bwr.write_size, (u64)bwr.write_buffer,
		     (u64)bwr.read_size, (u64)bwr.read_buffer);

	if (bwr.write_size > 0) {
		ret = binder_thread_write(proc, thread,
					  bwr.write_buffer,
					  bwr.write_size,
					  &bwr.write_consumed);
		trace_binder_write_done(ret);
		if (ret < 0) {
			bwr.read_consumed = 0;
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
	}
	if (bwr.read_size > 0) {
		ret = binder_thread_read(proc, thread, bwr.read_buffer,
					 bwr.read_size,
					 &bwr.read_consumed,
					 filp->f_flags & O_NONBLOCK);
		trace_binder_read_done(ret);
4844 4845
		binder_inner_proc_lock(proc);
		if (!binder_worklist_empty_ilocked(&proc->todo))
4846
			binder_wakeup_proc_ilocked(proc);
4847
		binder_inner_proc_unlock(proc);
4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866
		if (ret < 0) {
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
	}
	binder_debug(BINDER_DEBUG_READ_WRITE,
		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
		     proc->pid, thread->pid,
		     (u64)bwr.write_consumed, (u64)bwr.write_size,
		     (u64)bwr.read_consumed, (u64)bwr.read_size);
	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
out:
	return ret;
}

4867 4868
static int binder_ioctl_set_ctx_mgr(struct file *filp,
				    struct flat_binder_object *fbo)
4869 4870 4871
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
4872
	struct binder_context *context = proc->context;
4873
	struct binder_node *new_node;
4874 4875
	kuid_t curr_euid = current_euid();

4876
	mutex_lock(&context->context_mgr_node_lock);
4877
	if (context->binder_context_mgr_node) {
4878 4879 4880 4881
		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
		ret = -EBUSY;
		goto out;
	}
4882
	ret = security_binder_set_context_mgr(proc->cred);
4883 4884
	if (ret < 0)
		goto out;
4885 4886
	if (uid_valid(context->binder_context_mgr_uid)) {
		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4887 4888 4889
			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
			       from_kuid(&init_user_ns, curr_euid),
			       from_kuid(&init_user_ns,
4890
					 context->binder_context_mgr_uid));
4891 4892 4893 4894
			ret = -EPERM;
			goto out;
		}
	} else {
4895
		context->binder_context_mgr_uid = curr_euid;
4896
	}
4897
	new_node = binder_new_node(proc, fbo);
4898
	if (!new_node) {
4899 4900 4901
		ret = -ENOMEM;
		goto out;
	}
4902
	binder_node_lock(new_node);
4903 4904 4905 4906 4907
	new_node->local_weak_refs++;
	new_node->local_strong_refs++;
	new_node->has_strong_ref = 1;
	new_node->has_weak_ref = 1;
	context->binder_context_mgr_node = new_node;
4908
	binder_node_unlock(new_node);
4909
	binder_put_node(new_node);
4910
out:
4911
	mutex_unlock(&context->context_mgr_node_lock);
4912 4913 4914
	return ret;
}

4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950
static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
		struct binder_node_info_for_ref *info)
{
	struct binder_node *node;
	struct binder_context *context = proc->context;
	__u32 handle = info->handle;

	if (info->strong_count || info->weak_count || info->reserved1 ||
	    info->reserved2 || info->reserved3) {
		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
				  proc->pid);
		return -EINVAL;
	}

	/* This ioctl may only be used by the context manager */
	mutex_lock(&context->context_mgr_node_lock);
	if (!context->binder_context_mgr_node ||
		context->binder_context_mgr_node->proc != proc) {
		mutex_unlock(&context->context_mgr_node_lock);
		return -EPERM;
	}
	mutex_unlock(&context->context_mgr_node_lock);

	node = binder_get_node_from_ref(proc, handle, true, NULL);
	if (!node)
		return -EINVAL;

	info->strong_count = node->local_strong_refs +
		node->internal_strong_refs;
	info->weak_count = node->local_weak_refs;

	binder_put_node(node);

	return 0;
}

4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975
static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
				struct binder_node_debug_info *info)
{
	struct rb_node *n;
	binder_uintptr_t ptr = info->ptr;

	memset(info, 0, sizeof(*info));

	binder_inner_proc_lock(proc);
	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
		struct binder_node *node = rb_entry(n, struct binder_node,
						    rb_node);
		if (node->ptr > ptr) {
			info->ptr = node->ptr;
			info->cookie = node->cookie;
			info->has_strong_ref = node->has_strong_ref;
			info->has_weak_ref = node->has_weak_ref;
			break;
		}
	}
	binder_inner_proc_unlock(proc);

	return 0;
}

L
Li Li 已提交
4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
{
	struct rb_node *n;
	struct binder_thread *thread;

	if (proc->outstanding_txns > 0)
		return true;

	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
		thread = rb_entry(n, struct binder_thread, rb_node);
		if (thread->transaction_stack)
			return true;
	}
	return false;
}

M
Marco Ballesio 已提交
4992 4993 4994 4995 4996 4997 4998
static int binder_ioctl_freeze(struct binder_freeze_info *info,
			       struct binder_proc *target_proc)
{
	int ret = 0;

	if (!info->enable) {
		binder_inner_proc_lock(target_proc);
4999 5000
		target_proc->sync_recv = false;
		target_proc->async_recv = false;
M
Marco Ballesio 已提交
5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011
		target_proc->is_frozen = false;
		binder_inner_proc_unlock(target_proc);
		return 0;
	}

	/*
	 * Freezing the target. Prevent new transactions by
	 * setting frozen state. If timeout specified, wait
	 * for transactions to drain.
	 */
	binder_inner_proc_lock(target_proc);
5012 5013
	target_proc->sync_recv = false;
	target_proc->async_recv = false;
M
Marco Ballesio 已提交
5014 5015 5016 5017 5018 5019 5020 5021 5022
	target_proc->is_frozen = true;
	binder_inner_proc_unlock(target_proc);

	if (info->timeout_ms > 0)
		ret = wait_event_interruptible_timeout(
			target_proc->freeze_wait,
			(!target_proc->outstanding_txns),
			msecs_to_jiffies(info->timeout_ms));

L
Li Li 已提交
5023 5024 5025 5026 5027 5028 5029
	/* Check pending transactions that wait for reply */
	if (ret >= 0) {
		binder_inner_proc_lock(target_proc);
		if (binder_txns_pending_ilocked(target_proc))
			ret = -EAGAIN;
		binder_inner_proc_unlock(target_proc);
	}
M
Marco Ballesio 已提交
5030 5031 5032 5033 5034 5035 5036 5037 5038 5039

	if (ret < 0) {
		binder_inner_proc_lock(target_proc);
		target_proc->is_frozen = false;
		binder_inner_proc_unlock(target_proc);
	}

	return ret;
}

5040 5041 5042 5043 5044
static int binder_ioctl_get_freezer_info(
				struct binder_frozen_status_info *info)
{
	struct binder_proc *target_proc;
	bool found = false;
L
Li Li 已提交
5045
	__u32 txns_pending;
5046 5047 5048 5049 5050 5051 5052 5053 5054

	info->sync_recv = 0;
	info->async_recv = 0;

	mutex_lock(&binder_procs_lock);
	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
		if (target_proc->pid == info->pid) {
			found = true;
			binder_inner_proc_lock(target_proc);
L
Li Li 已提交
5055 5056 5057
			txns_pending = binder_txns_pending_ilocked(target_proc);
			info->sync_recv |= target_proc->sync_recv |
					(txns_pending << 1);
5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069
			info->async_recv |= target_proc->async_recv;
			binder_inner_proc_unlock(target_proc);
		}
	}
	mutex_unlock(&binder_procs_lock);

	if (!found)
		return -EINVAL;

	return 0;
}

5070 5071 5072 5073 5074 5075 5076 5077
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;

5078 5079
	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
			proc->pid, current->pid, cmd, arg);*/
5080

5081 5082
	binder_selftest_alloc(&proc->alloc);

5083 5084
	trace_binder_ioctl(cmd, arg);

5085 5086
	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret)
5087
		goto err_unlocked;
5088 5089 5090 5091 5092 5093 5094 5095

	thread = binder_get_thread(proc);
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
5096 5097 5098
	case BINDER_WRITE_READ:
		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
		if (ret)
5099 5100
			goto err;
		break;
5101 5102 5103 5104 5105
	case BINDER_SET_MAX_THREADS: {
		int max_threads;

		if (copy_from_user(&max_threads, ubuf,
				   sizeof(max_threads))) {
5106 5107 5108
			ret = -EINVAL;
			goto err;
		}
5109 5110 5111
		binder_inner_proc_lock(proc);
		proc->max_threads = max_threads;
		binder_inner_proc_unlock(proc);
5112
		break;
5113
	}
5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125
	case BINDER_SET_CONTEXT_MGR_EXT: {
		struct flat_binder_object fbo;

		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
			ret = -EINVAL;
			goto err;
		}
		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
		if (ret)
			goto err;
		break;
	}
5126
	case BINDER_SET_CONTEXT_MGR:
5127
		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5128
		if (ret)
5129 5130 5131
			goto err;
		break;
	case BINDER_THREAD_EXIT:
5132
		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5133
			     proc->pid, thread->pid);
5134
		binder_thread_release(proc, thread);
5135 5136
		thread = NULL;
		break;
5137 5138 5139
	case BINDER_VERSION: {
		struct binder_version __user *ver = ubuf;

5140 5141 5142 5143
		if (size != sizeof(struct binder_version)) {
			ret = -EINVAL;
			goto err;
		}
5144 5145
		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
			     &ver->protocol_version)) {
5146 5147 5148 5149
			ret = -EINVAL;
			goto err;
		}
		break;
5150
	}
5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169
	case BINDER_GET_NODE_INFO_FOR_REF: {
		struct binder_node_info_for_ref info;

		if (copy_from_user(&info, ubuf, sizeof(info))) {
			ret = -EFAULT;
			goto err;
		}

		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
		if (ret < 0)
			goto err;

		if (copy_to_user(ubuf, &info, sizeof(info))) {
			ret = -EFAULT;
			goto err;
		}

		break;
	}
5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187
	case BINDER_GET_NODE_DEBUG_INFO: {
		struct binder_node_debug_info info;

		if (copy_from_user(&info, ubuf, sizeof(info))) {
			ret = -EFAULT;
			goto err;
		}

		ret = binder_ioctl_get_node_debug_info(proc, &info);
		if (ret < 0)
			goto err;

		if (copy_to_user(ubuf, &info, sizeof(info))) {
			ret = -EFAULT;
			goto err;
		}
		break;
	}
M
Marco Ballesio 已提交
5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247
	case BINDER_FREEZE: {
		struct binder_freeze_info info;
		struct binder_proc **target_procs = NULL, *target_proc;
		int target_procs_count = 0, i = 0;

		ret = 0;

		if (copy_from_user(&info, ubuf, sizeof(info))) {
			ret = -EFAULT;
			goto err;
		}

		mutex_lock(&binder_procs_lock);
		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
			if (target_proc->pid == info.pid)
				target_procs_count++;
		}

		if (target_procs_count == 0) {
			mutex_unlock(&binder_procs_lock);
			ret = -EINVAL;
			goto err;
		}

		target_procs = kcalloc(target_procs_count,
				       sizeof(struct binder_proc *),
				       GFP_KERNEL);

		if (!target_procs) {
			mutex_unlock(&binder_procs_lock);
			ret = -ENOMEM;
			goto err;
		}

		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
			if (target_proc->pid != info.pid)
				continue;

			binder_inner_proc_lock(target_proc);
			target_proc->tmp_ref++;
			binder_inner_proc_unlock(target_proc);

			target_procs[i++] = target_proc;
		}
		mutex_unlock(&binder_procs_lock);

		for (i = 0; i < target_procs_count; i++) {
			if (ret >= 0)
				ret = binder_ioctl_freeze(&info,
							  target_procs[i]);

			binder_proc_dec_tmpref(target_procs[i]);
		}

		kfree(target_procs);

		if (ret < 0)
			goto err;
		break;
	}
5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265
	case BINDER_GET_FROZEN_INFO: {
		struct binder_frozen_status_info info;

		if (copy_from_user(&info, ubuf, sizeof(info))) {
			ret = -EFAULT;
			goto err;
		}

		ret = binder_ioctl_get_freezer_info(&info);
		if (ret < 0)
			goto err;

		if (copy_to_user(ubuf, &info, sizeof(info))) {
			ret = -EFAULT;
			goto err;
		}
		break;
	}
5266 5267 5268 5269
	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
		uint32_t enable;

		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5270
			ret = -EFAULT;
5271 5272 5273 5274 5275 5276 5277
			goto err;
		}
		binder_inner_proc_lock(proc);
		proc->oneway_spam_detection_enabled = (bool)enable;
		binder_inner_proc_unlock(proc);
		break;
	}
5278 5279 5280 5281 5282 5283 5284
	default:
		ret = -EINVAL;
		goto err;
	}
	ret = 0;
err:
	if (thread)
5285
		thread->looper_need_return = false;
5286
	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5287
	if (ret && ret != -EINTR)
5288
		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5289 5290
err_unlocked:
	trace_binder_ioctl_done(ret);
5291 5292 5293 5294 5295 5296
	return ret;
}

static void binder_vma_open(struct vm_area_struct *vma)
{
	struct binder_proc *proc = vma->vm_private_data;
5297

5298
	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5299
		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5300 5301 5302 5303 5304 5305 5306 5307
		     proc->pid, vma->vm_start, vma->vm_end,
		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
		     (unsigned long)pgprot_val(vma->vm_page_prot));
}

static void binder_vma_close(struct vm_area_struct *vma)
{
	struct binder_proc *proc = vma->vm_private_data;
5308

5309
	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5310
		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5311 5312 5313
		     proc->pid, vma->vm_start, vma->vm_end,
		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
		     (unsigned long)pgprot_val(vma->vm_page_prot));
5314
	binder_alloc_vma_close(&proc->alloc);
5315 5316
}

5317
static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5318 5319 5320 5321
{
	return VM_FAULT_SIGBUS;
}

5322
static const struct vm_operations_struct binder_vm_ops = {
5323 5324
	.open = binder_vma_open,
	.close = binder_vma_close,
5325
	.fault = binder_vm_fault,
5326 5327
};

5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct binder_proc *proc = filp->private_data;

	if (proc->tsk != current->group_leader)
		return -EINVAL;

	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
		     __func__, proc->pid, vma->vm_start, vma->vm_end,
		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
		     (unsigned long)pgprot_val(vma->vm_page_prot));

	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5342 5343 5344
		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
		return -EPERM;
5345
	}
5346 5347 5348
	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
	vma->vm_flags &= ~VM_MAYWRITE;

5349 5350 5351
	vma->vm_ops = &binder_vm_ops;
	vma->vm_private_data = proc;

5352
	return binder_alloc_mmap_handler(&proc->alloc, vma);
5353 5354 5355 5356
}

static int binder_open(struct inode *nodp, struct file *filp)
{
5357
	struct binder_proc *proc, *itr;
5358
	struct binder_device *binder_dev;
5359 5360
	struct binderfs_info *info;
	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5361
	bool existing_pid = false;
5362

5363
	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5364 5365 5366 5367 5368
		     current->group_leader->pid, current->pid);

	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
	if (proc == NULL)
		return -ENOMEM;
5369 5370
	spin_lock_init(&proc->inner_lock);
	spin_lock_init(&proc->outer_lock);
5371 5372
	get_task_struct(current->group_leader);
	proc->tsk = current->group_leader;
5373
	proc->cred = get_cred(filp->f_cred);
5374
	INIT_LIST_HEAD(&proc->todo);
M
Marco Ballesio 已提交
5375
	init_waitqueue_head(&proc->freeze_wait);
5376
	proc->default_priority = task_nice(current);
C
Christian Brauner 已提交
5377
	/* binderfs stashes devices in i_private */
5378
	if (is_binderfs_device(nodp)) {
5379
		binder_dev = nodp->i_private;
5380 5381 5382
		info = nodp->i_sb->s_fs_info;
		binder_binderfs_dir_entry_proc = info->proc_log_dir;
	} else {
C
Christian Brauner 已提交
5383 5384
		binder_dev = container_of(filp->private_data,
					  struct binder_device, miscdev);
5385
	}
5386
	refcount_inc(&binder_dev->ref);
5387
	proc->context = &binder_dev->context;
5388
	binder_alloc_init(&proc->alloc);
5389

5390 5391 5392
	binder_stats_created(BINDER_STAT_PROC);
	proc->pid = current->group_leader->pid;
	INIT_LIST_HEAD(&proc->delivered_death);
5393
	INIT_LIST_HEAD(&proc->waiting_threads);
5394
	filp->private_data = proc;
5395

5396
	mutex_lock(&binder_procs_lock);
5397 5398 5399 5400 5401 5402
	hlist_for_each_entry(itr, &binder_procs, proc_node) {
		if (itr->pid == proc->pid) {
			existing_pid = true;
			break;
		}
	}
5403 5404 5405
	hlist_add_head(&proc->proc_node, &binder_procs);
	mutex_unlock(&binder_procs_lock);

5406
	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5407
		char strbuf[11];
5408

5409
		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5410
		/*
5411 5412 5413 5414
		 * proc debug entries are shared between contexts.
		 * Only create for the first PID to avoid debugfs log spamming
		 * The printing code will anyway print all contexts for a given
		 * PID so this is not a problem.
5415
		 */
5416
		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5417 5418
			binder_debugfs_dir_entry_proc,
			(void *)(unsigned long)proc->pid,
Y
Yangtao Li 已提交
5419
			&proc_fops);
5420 5421
	}

5422
	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5423 5424 5425 5426 5427 5428
		char strbuf[11];
		struct dentry *binderfs_entry;

		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
		/*
		 * Similar to debugfs, the process specific log file is shared
5429 5430 5431
		 * between contexts. Only create for the first PID.
		 * This is ok since same as debugfs, the log file will contain
		 * information on all contexts of a given PID.
5432 5433 5434 5435 5436 5437 5438 5439 5440
		 */
		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
		if (!IS_ERR(binderfs_entry)) {
			proc->binderfs_entry = binderfs_entry;
		} else {
			int error;

			error = PTR_ERR(binderfs_entry);
5441 5442
			pr_warn("Unable to create file %s in binderfs (error %d)\n",
				strbuf, error);
5443 5444 5445
		}
	}

5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461
	return 0;
}

static int binder_flush(struct file *filp, fl_owner_t id)
{
	struct binder_proc *proc = filp->private_data;

	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);

	return 0;
}

static void binder_deferred_flush(struct binder_proc *proc)
{
	struct rb_node *n;
	int wake_count = 0;
5462

5463
	binder_inner_proc_lock(proc);
5464 5465
	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5466

5467
		thread->looper_need_return = true;
5468 5469 5470 5471 5472
		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
			wake_up_interruptible(&thread->wait);
			wake_count++;
		}
	}
5473
	binder_inner_proc_unlock(proc);
5474 5475 5476 5477 5478 5479 5480 5481 5482

	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
		     "binder_flush: %d woke %d threads\n", proc->pid,
		     wake_count);
}

static int binder_release(struct inode *nodp, struct file *filp)
{
	struct binder_proc *proc = filp->private_data;
5483

5484
	debugfs_remove(proc->debugfs_entry);
5485 5486 5487 5488 5489 5490

	if (proc->binderfs_entry) {
		binderfs_remove_file(proc->binderfs_entry);
		proc->binderfs_entry = NULL;
	}

5491 5492 5493 5494 5495
	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);

	return 0;
}

5496 5497 5498 5499
static int binder_node_release(struct binder_node *node, int refs)
{
	struct binder_ref *ref;
	int death = 0;
5500
	struct binder_proc *proc = node->proc;
5501

5502
	binder_release_work(proc, &node->async_todo);
5503

5504
	binder_node_lock(node);
5505
	binder_inner_proc_lock(proc);
5506
	binder_dequeue_work_ilocked(&node->work);
5507 5508 5509 5510 5511
	/*
	 * The caller must have taken a temporary ref on the node,
	 */
	BUG_ON(!node->tmp_refs);
	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5512
		binder_inner_proc_unlock(proc);
5513
		binder_node_unlock(node);
5514
		binder_free_node(node);
5515 5516 5517 5518 5519 5520 5521

		return refs;
	}

	node->proc = NULL;
	node->local_strong_refs = 0;
	node->local_weak_refs = 0;
5522
	binder_inner_proc_unlock(proc);
5523 5524

	spin_lock(&binder_dead_nodes_lock);
5525
	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5526
	spin_unlock(&binder_dead_nodes_lock);
5527 5528 5529

	hlist_for_each_entry(ref, &node->refs, node_entry) {
		refs++;
5530 5531 5532 5533 5534 5535 5536 5537 5538
		/*
		 * Need the node lock to synchronize
		 * with new notification requests and the
		 * inner lock to synchronize with queued
		 * death notifications.
		 */
		binder_inner_proc_lock(ref->proc);
		if (!ref->death) {
			binder_inner_proc_unlock(ref->proc);
5539
			continue;
5540
		}
5541 5542 5543

		death++;

5544 5545 5546 5547
		BUG_ON(!list_empty(&ref->death->work.entry));
		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
		binder_enqueue_work_ilocked(&ref->death->work,
					    &ref->proc->todo);
5548
		binder_wakeup_proc_ilocked(ref->proc);
5549
		binder_inner_proc_unlock(ref->proc);
5550 5551 5552 5553 5554
	}

	binder_debug(BINDER_DEBUG_DEAD_BINDER,
		     "node %d now dead, refs %d, death %d\n",
		     node->debug_id, refs, death);
5555
	binder_node_unlock(node);
5556
	binder_put_node(node);
5557 5558 5559 5560

	return refs;
}

5561 5562
static void binder_deferred_release(struct binder_proc *proc)
{
5563
	struct binder_context *context = proc->context;
5564
	struct rb_node *n;
5565
	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5566

5567
	mutex_lock(&binder_procs_lock);
5568
	hlist_del(&proc->proc_node);
5569
	mutex_unlock(&binder_procs_lock);
5570

5571
	mutex_lock(&context->context_mgr_node_lock);
5572 5573
	if (context->binder_context_mgr_node &&
	    context->binder_context_mgr_node->proc == proc) {
5574
		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5575 5576
			     "%s: %d context_mgr_node gone\n",
			     __func__, proc->pid);
5577
		context->binder_context_mgr_node = NULL;
5578
	}
5579
	mutex_unlock(&context->context_mgr_node_lock);
5580
	binder_inner_proc_lock(proc);
5581 5582 5583 5584 5585
	/*
	 * Make sure proc stays alive after we
	 * remove all the threads
	 */
	proc->tmp_ref++;
5586

5587
	proc->is_dead = true;
M
Marco Ballesio 已提交
5588
	proc->is_frozen = false;
5589 5590
	proc->sync_recv = false;
	proc->async_recv = false;
5591 5592 5593
	threads = 0;
	active_transactions = 0;
	while ((n = rb_first(&proc->threads))) {
5594 5595 5596
		struct binder_thread *thread;

		thread = rb_entry(n, struct binder_thread, rb_node);
5597
		binder_inner_proc_unlock(proc);
5598
		threads++;
5599
		active_transactions += binder_thread_release(proc, thread);
5600
		binder_inner_proc_lock(proc);
5601
	}
5602

5603 5604 5605
	nodes = 0;
	incoming_refs = 0;
	while ((n = rb_first(&proc->nodes))) {
5606
		struct binder_node *node;
5607

5608
		node = rb_entry(n, struct binder_node, rb_node);
5609
		nodes++;
5610 5611 5612 5613 5614
		/*
		 * take a temporary ref on the node before
		 * calling binder_node_release() which will either
		 * kfree() the node or call binder_put_node()
		 */
5615
		binder_inc_node_tmpref_ilocked(node);
5616
		rb_erase(&node->rb_node, &proc->nodes);
5617
		binder_inner_proc_unlock(proc);
5618
		incoming_refs = binder_node_release(node, incoming_refs);
5619
		binder_inner_proc_lock(proc);
5620
	}
5621
	binder_inner_proc_unlock(proc);
5622

5623
	outgoing_refs = 0;
5624
	binder_proc_lock(proc);
5625
	while ((n = rb_first(&proc->refs_by_desc))) {
5626 5627 5628
		struct binder_ref *ref;

		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5629
		outgoing_refs++;
5630 5631
		binder_cleanup_ref_olocked(ref);
		binder_proc_unlock(proc);
5632
		binder_free_ref(ref);
5633
		binder_proc_lock(proc);
5634
	}
5635
	binder_proc_unlock(proc);
5636

5637 5638
	binder_release_work(proc, &proc->todo);
	binder_release_work(proc, &proc->delivered_death);
5639 5640

	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5641
		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5642
		     __func__, proc->pid, threads, nodes, incoming_refs,
5643
		     outgoing_refs, active_transactions);
5644

5645
	binder_proc_dec_tmpref(proc);
5646 5647 5648 5649 5650 5651 5652
}

static void binder_deferred_func(struct work_struct *work)
{
	struct binder_proc *proc;

	int defer;
5653

5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684
	do {
		mutex_lock(&binder_deferred_lock);
		if (!hlist_empty(&binder_deferred_list)) {
			proc = hlist_entry(binder_deferred_list.first,
					struct binder_proc, deferred_work_node);
			hlist_del_init(&proc->deferred_work_node);
			defer = proc->deferred_work;
			proc->deferred_work = 0;
		} else {
			proc = NULL;
			defer = 0;
		}
		mutex_unlock(&binder_deferred_lock);

		if (defer & BINDER_DEFERRED_FLUSH)
			binder_deferred_flush(proc);

		if (defer & BINDER_DEFERRED_RELEASE)
			binder_deferred_release(proc); /* frees proc */
	} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);

static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
	mutex_lock(&binder_deferred_lock);
	proc->deferred_work |= defer;
	if (hlist_unhashed(&proc->deferred_work_node)) {
		hlist_add_head(&proc->deferred_work_node,
				&binder_deferred_list);
5685
		schedule_work(&binder_deferred_work);
5686 5687 5688 5689
	}
	mutex_unlock(&binder_deferred_lock);
}

5690 5691 5692 5693
static void print_binder_transaction_ilocked(struct seq_file *m,
					     struct binder_proc *proc,
					     const char *prefix,
					     struct binder_transaction *t)
5694
{
5695 5696 5697
	struct binder_proc *to_proc;
	struct binder_buffer *buffer = t->buffer;

5698
	spin_lock(&t->lock);
5699
	to_proc = t->to_proc;
5700
	seq_printf(m,
T
Todd Kjos 已提交
5701
		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5702 5703 5704
		   prefix, t->debug_id, t,
		   t->from ? t->from->proc->pid : 0,
		   t->from ? t->from->pid : 0,
5705
		   to_proc ? to_proc->pid : 0,
5706 5707
		   t->to_thread ? t->to_thread->pid : 0,
		   t->code, t->flags, t->priority, t->need_reply);
5708 5709
	spin_unlock(&t->lock);

5710 5711 5712 5713 5714 5715 5716 5717 5718 5719
	if (proc != to_proc) {
		/*
		 * Can only safely deref buffer if we are holding the
		 * correct proc inner lock for this node
		 */
		seq_puts(m, "\n");
		return;
	}

	if (buffer == NULL) {
5720 5721
		seq_puts(m, " buffer free\n");
		return;
5722
	}
5723 5724
	if (buffer->target_node)
		seq_printf(m, " node %d", buffer->target_node->debug_id);
T
Todd Kjos 已提交
5725
	seq_printf(m, " size %zd:%zd data %pK\n",
5726
		   buffer->data_size, buffer->offsets_size,
5727
		   buffer->user_data);
5728 5729
}

5730 5731 5732 5733 5734
static void print_binder_work_ilocked(struct seq_file *m,
				     struct binder_proc *proc,
				     const char *prefix,
				     const char *transaction_prefix,
				     struct binder_work *w)
5735 5736 5737 5738 5739 5740 5741
{
	struct binder_node *node;
	struct binder_transaction *t;

	switch (w->type) {
	case BINDER_WORK_TRANSACTION:
		t = container_of(w, struct binder_transaction, work);
5742 5743
		print_binder_transaction_ilocked(
				m, proc, transaction_prefix, t);
5744
		break;
5745 5746 5747 5748 5749 5750 5751
	case BINDER_WORK_RETURN_ERROR: {
		struct binder_error *e = container_of(
				w, struct binder_error, work);

		seq_printf(m, "%stransaction error: %u\n",
			   prefix, e->cmd);
	} break;
5752
	case BINDER_WORK_TRANSACTION_COMPLETE:
5753
		seq_printf(m, "%stransaction complete\n", prefix);
5754 5755 5756
		break;
	case BINDER_WORK_NODE:
		node = container_of(w, struct binder_node, work);
5757 5758 5759
		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
			   prefix, node->debug_id,
			   (u64)node->ptr, (u64)node->cookie);
5760 5761
		break;
	case BINDER_WORK_DEAD_BINDER:
5762
		seq_printf(m, "%shas dead binder\n", prefix);
5763 5764
		break;
	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5765
		seq_printf(m, "%shas cleared dead binder\n", prefix);
5766 5767
		break;
	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5768
		seq_printf(m, "%shas cleared death notification\n", prefix);
5769 5770
		break;
	default:
5771
		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5772 5773 5774 5775
		break;
	}
}

5776 5777 5778
static void print_binder_thread_ilocked(struct seq_file *m,
					struct binder_thread *thread,
					int print_always)
5779 5780 5781
{
	struct binder_transaction *t;
	struct binder_work *w;
5782 5783
	size_t start_pos = m->count;
	size_t header_pos;
5784

5785
	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5786
			thread->pid, thread->looper,
5787 5788
			thread->looper_need_return,
			atomic_read(&thread->tmp_ref));
5789
	header_pos = m->count;
5790 5791 5792
	t = thread->transaction_stack;
	while (t) {
		if (t->from == thread) {
5793 5794
			print_binder_transaction_ilocked(m, thread->proc,
					"    outgoing transaction", t);
5795 5796
			t = t->from_parent;
		} else if (t->to_thread == thread) {
5797
			print_binder_transaction_ilocked(m, thread->proc,
5798
						 "    incoming transaction", t);
5799 5800
			t = t->to_parent;
		} else {
5801 5802
			print_binder_transaction_ilocked(m, thread->proc,
					"    bad transaction", t);
5803 5804 5805 5806
			t = NULL;
		}
	}
	list_for_each_entry(w, &thread->todo, entry) {
5807
		print_binder_work_ilocked(m, thread->proc, "    ",
5808
					  "    pending transaction", w);
5809
	}
5810 5811
	if (!print_always && m->count == header_pos)
		m->count = start_pos;
5812 5813
}

5814 5815
static void print_binder_node_nilocked(struct seq_file *m,
				       struct binder_node *node)
5816 5817 5818 5819 5820 5821
{
	struct binder_ref *ref;
	struct binder_work *w;
	int count;

	count = 0;
5822
	hlist_for_each_entry(ref, &node->refs, node_entry)
5823 5824
		count++;

5825
	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5826
		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5827 5828
		   node->has_strong_ref, node->has_weak_ref,
		   node->local_strong_refs, node->local_weak_refs,
5829
		   node->internal_strong_refs, count, node->tmp_refs);
5830
	if (count) {
5831
		seq_puts(m, " proc");
5832
		hlist_for_each_entry(ref, &node->refs, node_entry)
5833
			seq_printf(m, " %d", ref->proc->pid);
5834
	}
5835
	seq_puts(m, "\n");
5836 5837
	if (node->proc) {
		list_for_each_entry(w, &node->async_todo, entry)
5838
			print_binder_work_ilocked(m, node->proc, "    ",
5839 5840
					  "    pending async transaction", w);
	}
5841 5842
}

5843 5844
static void print_binder_ref_olocked(struct seq_file *m,
				     struct binder_ref *ref)
5845
{
5846
	binder_node_lock(ref->node);
5847 5848 5849 5850 5851
	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
		   ref->data.debug_id, ref->data.desc,
		   ref->node->proc ? "" : "dead ",
		   ref->node->debug_id, ref->data.strong,
		   ref->data.weak, ref->death);
5852
	binder_node_unlock(ref->node);
5853 5854
}

5855 5856
static void print_binder_proc(struct seq_file *m,
			      struct binder_proc *proc, int print_all)
5857 5858 5859
{
	struct binder_work *w;
	struct rb_node *n;
5860 5861
	size_t start_pos = m->count;
	size_t header_pos;
5862
	struct binder_node *last_node = NULL;
5863 5864

	seq_printf(m, "proc %d\n", proc->pid);
5865
	seq_printf(m, "context %s\n", proc->context->name);
5866 5867
	header_pos = m->count;

5868
	binder_inner_proc_lock(proc);
5869
	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5870
		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5871
						rb_node), print_all);
5872

5873
	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5874 5875
		struct binder_node *node = rb_entry(n, struct binder_node,
						    rb_node);
5876 5877 5878
		if (!print_all && !node->has_async_transaction)
			continue;

5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893
		/*
		 * take a temporary reference on the node so it
		 * survives and isn't removed from the tree
		 * while we print it.
		 */
		binder_inc_node_tmpref_ilocked(node);
		/* Need to drop inner lock to take node lock */
		binder_inner_proc_unlock(proc);
		if (last_node)
			binder_put_node(last_node);
		binder_node_inner_lock(node);
		print_binder_node_nilocked(m, node);
		binder_node_inner_unlock(node);
		last_node = node;
		binder_inner_proc_lock(proc);
5894
	}
5895 5896 5897 5898
	binder_inner_proc_unlock(proc);
	if (last_node)
		binder_put_node(last_node);

5899
	if (print_all) {
5900
		binder_proc_lock(proc);
5901
		for (n = rb_first(&proc->refs_by_desc);
5902
		     n != NULL;
5903
		     n = rb_next(n))
5904 5905 5906 5907
			print_binder_ref_olocked(m, rb_entry(n,
							    struct binder_ref,
							    rb_node_desc));
		binder_proc_unlock(proc);
5908
	}
5909
	binder_alloc_print_allocated(m, &proc->alloc);
5910
	binder_inner_proc_lock(proc);
5911
	list_for_each_entry(w, &proc->todo, entry)
5912 5913
		print_binder_work_ilocked(m, proc, "  ",
					  "  pending transaction", w);
5914
	list_for_each_entry(w, &proc->delivered_death, entry) {
5915
		seq_puts(m, "  has delivered dead binder\n");
5916 5917
		break;
	}
5918
	binder_inner_proc_unlock(proc);
5919 5920
	if (!print_all && m->count == header_pos)
		m->count = start_pos;
5921 5922
}

5923
static const char * const binder_return_strings[] = {
5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940
	"BR_ERROR",
	"BR_OK",
	"BR_TRANSACTION",
	"BR_REPLY",
	"BR_ACQUIRE_RESULT",
	"BR_DEAD_REPLY",
	"BR_TRANSACTION_COMPLETE",
	"BR_INCREFS",
	"BR_ACQUIRE",
	"BR_RELEASE",
	"BR_DECREFS",
	"BR_ATTEMPT_ACQUIRE",
	"BR_NOOP",
	"BR_SPAWN_LOOPER",
	"BR_FINISHED",
	"BR_DEAD_BINDER",
	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5941 5942
	"BR_FAILED_REPLY",
	"BR_FROZEN_REPLY",
5943
	"BR_ONEWAY_SPAM_SUSPECT",
5944 5945
};

5946
static const char * const binder_command_strings[] = {
5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962
	"BC_TRANSACTION",
	"BC_REPLY",
	"BC_ACQUIRE_RESULT",
	"BC_FREE_BUFFER",
	"BC_INCREFS",
	"BC_ACQUIRE",
	"BC_RELEASE",
	"BC_DECREFS",
	"BC_INCREFS_DONE",
	"BC_ACQUIRE_DONE",
	"BC_ATTEMPT_ACQUIRE",
	"BC_REGISTER_LOOPER",
	"BC_ENTER_LOOPER",
	"BC_EXIT_LOOPER",
	"BC_REQUEST_DEATH_NOTIFICATION",
	"BC_CLEAR_DEATH_NOTIFICATION",
5963 5964 5965
	"BC_DEAD_BINDER_DONE",
	"BC_TRANSACTION_SG",
	"BC_REPLY_SG",
5966 5967
};

5968
static const char * const binder_objstat_strings[] = {
5969 5970 5971 5972 5973 5974 5975 5976 5977
	"proc",
	"thread",
	"node",
	"ref",
	"death",
	"transaction",
	"transaction_complete"
};

5978 5979
static void print_binder_stats(struct seq_file *m, const char *prefix,
			       struct binder_stats *stats)
5980 5981 5982 5983
{
	int i;

	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5984
		     ARRAY_SIZE(binder_command_strings));
5985
	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5986 5987 5988
		int temp = atomic_read(&stats->bc[i]);

		if (temp)
5989
			seq_printf(m, "%s%s: %d\n", prefix,
5990
				   binder_command_strings[i], temp);
5991 5992 5993
	}

	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5994
		     ARRAY_SIZE(binder_return_strings));
5995
	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5996 5997 5998
		int temp = atomic_read(&stats->br[i]);

		if (temp)
5999
			seq_printf(m, "%s%s: %d\n", prefix,
6000
				   binder_return_strings[i], temp);
6001 6002 6003
	}

	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6004
		     ARRAY_SIZE(binder_objstat_strings));
6005
	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6006
		     ARRAY_SIZE(stats->obj_deleted));
6007
	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6008 6009 6010 6011 6012 6013
		int created = atomic_read(&stats->obj_created[i]);
		int deleted = atomic_read(&stats->obj_deleted[i]);

		if (created || deleted)
			seq_printf(m, "%s%s: active %d total %d\n",
				prefix,
6014
				binder_objstat_strings[i],
6015 6016
				created - deleted,
				created);
6017 6018 6019
	}
}

6020 6021
static void print_binder_proc_stats(struct seq_file *m,
				    struct binder_proc *proc)
6022 6023
{
	struct binder_work *w;
6024
	struct binder_thread *thread;
6025
	struct rb_node *n;
6026
	int count, strong, weak, ready_threads;
6027 6028
	size_t free_async_space =
		binder_alloc_get_free_async_space(&proc->alloc);
6029

6030
	seq_printf(m, "proc %d\n", proc->pid);
6031
	seq_printf(m, "context %s\n", proc->context->name);
6032
	count = 0;
6033
	ready_threads = 0;
6034
	binder_inner_proc_lock(proc);
6035 6036
	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
		count++;
6037 6038 6039 6040

	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
		ready_threads++;

6041 6042
	seq_printf(m, "  threads: %d\n", count);
	seq_printf(m, "  requested threads: %d+%d/%d\n"
6043 6044 6045
			"  ready threads %d\n"
			"  free async space %zd\n", proc->requested_threads,
			proc->requested_threads_started, proc->max_threads,
6046
			ready_threads,
6047
			free_async_space);
6048 6049 6050
	count = 0;
	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
		count++;
6051
	binder_inner_proc_unlock(proc);
6052
	seq_printf(m, "  nodes: %d\n", count);
6053 6054 6055
	count = 0;
	strong = 0;
	weak = 0;
6056
	binder_proc_lock(proc);
6057 6058 6059 6060
	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
		struct binder_ref *ref = rb_entry(n, struct binder_ref,
						  rb_node_desc);
		count++;
6061 6062
		strong += ref->data.strong;
		weak += ref->data.weak;
6063
	}
6064
	binder_proc_unlock(proc);
6065
	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6066

6067
	count = binder_alloc_get_allocated_count(&proc->alloc);
6068
	seq_printf(m, "  buffers: %d\n", count);
6069

6070 6071
	binder_alloc_print_pages(m, &proc->alloc);

6072
	count = 0;
6073
	binder_inner_proc_lock(proc);
6074
	list_for_each_entry(w, &proc->todo, entry) {
6075
		if (w->type == BINDER_WORK_TRANSACTION)
6076 6077
			count++;
	}
6078
	binder_inner_proc_unlock(proc);
6079
	seq_printf(m, "  pending transactions: %d\n", count);
6080

6081
	print_binder_stats(m, "  ", &proc->stats);
6082 6083 6084
}


6085
int binder_state_show(struct seq_file *m, void *unused)
6086 6087 6088
{
	struct binder_proc *proc;
	struct binder_node *node;
6089
	struct binder_node *last_node = NULL;
6090

6091
	seq_puts(m, "binder state:\n");
6092

6093
	spin_lock(&binder_dead_nodes_lock);
6094
	if (!hlist_empty(&binder_dead_nodes))
6095
		seq_puts(m, "dead nodes:\n");
6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106
	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
		/*
		 * take a temporary reference on the node so it
		 * survives and isn't removed from the list
		 * while we print it.
		 */
		node->tmp_refs++;
		spin_unlock(&binder_dead_nodes_lock);
		if (last_node)
			binder_put_node(last_node);
		binder_node_lock(node);
6107
		print_binder_node_nilocked(m, node);
6108 6109 6110 6111
		binder_node_unlock(node);
		last_node = node;
		spin_lock(&binder_dead_nodes_lock);
	}
6112
	spin_unlock(&binder_dead_nodes_lock);
6113 6114
	if (last_node)
		binder_put_node(last_node);
6115

6116
	mutex_lock(&binder_procs_lock);
6117
	hlist_for_each_entry(proc, &binder_procs, proc_node)
6118
		print_binder_proc(m, proc, 1);
6119
	mutex_unlock(&binder_procs_lock);
T
Todd Kjos 已提交
6120

6121
	return 0;
6122 6123
}

6124
int binder_stats_show(struct seq_file *m, void *unused)
6125 6126 6127
{
	struct binder_proc *proc;

6128
	seq_puts(m, "binder stats:\n");
6129

6130
	print_binder_stats(m, "", &binder_stats);
6131

6132
	mutex_lock(&binder_procs_lock);
6133
	hlist_for_each_entry(proc, &binder_procs, proc_node)
6134
		print_binder_proc_stats(m, proc);
6135
	mutex_unlock(&binder_procs_lock);
T
Todd Kjos 已提交
6136

6137
	return 0;
6138 6139
}

6140
int binder_transactions_show(struct seq_file *m, void *unused)
6141 6142 6143
{
	struct binder_proc *proc;

6144
	seq_puts(m, "binder transactions:\n");
6145
	mutex_lock(&binder_procs_lock);
6146
	hlist_for_each_entry(proc, &binder_procs, proc_node)
6147
		print_binder_proc(m, proc, 0);
6148
	mutex_unlock(&binder_procs_lock);
T
Todd Kjos 已提交
6149

6150
	return 0;
6151 6152
}

Y
Yangtao Li 已提交
6153
static int proc_show(struct seq_file *m, void *unused)
6154
{
6155
	struct binder_proc *itr;
6156
	int pid = (unsigned long)m->private;
6157

6158
	mutex_lock(&binder_procs_lock);
6159
	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6160 6161 6162
		if (itr->pid == pid) {
			seq_puts(m, "binder proc state:\n");
			print_binder_proc(m, itr, 1);
6163 6164
		}
	}
6165 6166
	mutex_unlock(&binder_procs_lock);

6167
	return 0;
6168 6169
}

6170
static void print_binder_transaction_log_entry(struct seq_file *m,
6171 6172
					struct binder_transaction_log_entry *e)
{
6173 6174 6175 6176 6177 6178
	int debug_id = READ_ONCE(e->debug_id_done);
	/*
	 * read barrier to guarantee debug_id_done read before
	 * we print the log values
	 */
	smp_rmb();
6179
	seq_printf(m,
6180
		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6181 6182
		   e->debug_id, (e->call_type == 2) ? "reply" :
		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6183
		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6184 6185 6186
		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
		   e->return_error, e->return_error_param,
		   e->return_error_line);
6187 6188 6189 6190 6191 6192 6193
	/*
	 * read-barrier to guarantee read of debug_id_done after
	 * done printing the fields of the entry
	 */
	smp_rmb();
	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
			"\n" : " (incomplete)\n");
6194 6195
}

6196
int binder_transaction_log_show(struct seq_file *m, void *unused)
6197
{
6198
	struct binder_transaction_log *log = m->private;
6199 6200 6201
	unsigned int log_cur = atomic_read(&log->cur);
	unsigned int count;
	unsigned int cur;
6202 6203
	int i;

6204 6205 6206 6207 6208 6209 6210 6211 6212
	count = log_cur + 1;
	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
		0 : count % ARRAY_SIZE(log->entry);
	if (count > ARRAY_SIZE(log->entry) || log->full)
		count = ARRAY_SIZE(log->entry);
	for (i = 0; i < count; i++) {
		unsigned int index = cur++ % ARRAY_SIZE(log->entry);

		print_binder_transaction_log_entry(m, &log->entry[index]);
6213
	}
6214
	return 0;
6215 6216
}

C
Christian Brauner 已提交
6217
const struct file_operations binder_fops = {
6218 6219 6220
	.owner = THIS_MODULE,
	.poll = binder_poll,
	.unlocked_ioctl = binder_ioctl,
6221
	.compat_ioctl = compat_ptr_ioctl,
6222 6223 6224 6225 6226 6227
	.mmap = binder_mmap,
	.open = binder_open,
	.flush = binder_flush,
	.release = binder_release,
};

6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240
static int __init init_binder_device(const char *name)
{
	int ret;
	struct binder_device *binder_device;

	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
	if (!binder_device)
		return -ENOMEM;

	binder_device->miscdev.fops = &binder_fops;
	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
	binder_device->miscdev.name = name;

6241
	refcount_set(&binder_device->ref, 1);
6242 6243
	binder_device->context.binder_context_mgr_uid = INVALID_UID;
	binder_device->context.name = name;
6244
	mutex_init(&binder_device->context.context_mgr_node_lock);
6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256

	ret = misc_register(&binder_device->miscdev);
	if (ret < 0) {
		kfree(binder_device);
		return ret;
	}

	hlist_add_head(&binder_device->hlist, &binder_devices);

	return ret;
}

6257 6258 6259
static int __init binder_init(void)
{
	int ret;
6260
	char *device_name, *device_tmp;
6261 6262
	struct binder_device *device;
	struct hlist_node *tmp;
6263
	char *device_names = NULL;
6264

6265 6266 6267
	ret = binder_alloc_shrinker_init();
	if (ret)
		return ret;
6268

6269 6270 6271
	atomic_set(&binder_transaction_log.cur, ~0U);
	atomic_set(&binder_transaction_log_failed.cur, ~0U);

6272 6273 6274 6275
	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
	if (binder_debugfs_dir_entry_root)
		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
						 binder_debugfs_dir_entry_root);
6276

6277 6278
	if (binder_debugfs_dir_entry_root) {
		debugfs_create_file("state",
6279
				    0444,
6280 6281
				    binder_debugfs_dir_entry_root,
				    NULL,
6282
				    &binder_state_fops);
6283
		debugfs_create_file("stats",
6284
				    0444,
6285 6286
				    binder_debugfs_dir_entry_root,
				    NULL,
6287
				    &binder_stats_fops);
6288
		debugfs_create_file("transactions",
6289
				    0444,
6290 6291
				    binder_debugfs_dir_entry_root,
				    NULL,
6292
				    &binder_transactions_fops);
6293
		debugfs_create_file("transaction_log",
6294
				    0444,
6295 6296
				    binder_debugfs_dir_entry_root,
				    &binder_transaction_log,
6297
				    &binder_transaction_log_fops);
6298
		debugfs_create_file("failed_transaction_log",
6299
				    0444,
6300 6301
				    binder_debugfs_dir_entry_root,
				    &binder_transaction_log_failed,
6302
				    &binder_transaction_log_fops);
6303
	}
6304

6305 6306
	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
	    strcmp(binder_devices_param, "") != 0) {
6307 6308 6309 6310 6311 6312 6313 6314 6315
		/*
		* Copy the module_parameter string, because we don't want to
		* tokenize it in-place.
		 */
		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
		if (!device_names) {
			ret = -ENOMEM;
			goto err_alloc_device_names_failed;
		}
6316

6317 6318 6319 6320 6321 6322
		device_tmp = device_names;
		while ((device_name = strsep(&device_tmp, ","))) {
			ret = init_binder_device(device_name);
			if (ret)
				goto err_init_binder_device_failed;
		}
6323 6324
	}

6325 6326 6327 6328
	ret = init_binderfs();
	if (ret)
		goto err_init_binder_device_failed;

6329 6330 6331 6332 6333 6334 6335 6336
	return ret;

err_init_binder_device_failed:
	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
		misc_deregister(&device->miscdev);
		hlist_del(&device->hlist);
		kfree(device);
	}
C
Christian Brauner 已提交
6337 6338 6339

	kfree(device_names);

6340 6341 6342
err_alloc_device_names_failed:
	debugfs_remove_recursive(binder_debugfs_dir_entry_root);

6343 6344 6345 6346 6347
	return ret;
}

device_initcall(binder_init);

6348 6349 6350
#define CREATE_TRACE_POINTS
#include "binder_trace.h"

6351
MODULE_LICENSE("GPL v2");