提交 34e5e709 编写于 作者: S Simon Fels

Add binder and ashmem kernel drivers as out-of-tree modules

Based on code from https://github.com/hungys/binder-for-linux
上级 3d05c50c
ccflags-y += -I$(src) -Wno-error=implicit-int
ifneq ($(KERNELRELEASE),)
obj-m := ashmem_linux.o
ashmem_linux-y := deps.o ashmem.o
$(obj)/deps.o: $(src)/deps.h
$(obj)/deps.h: $(src)/gen_deps.sh
sudo $(src)/gen_deps.sh > $@
else
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
all:
$(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD
clean:
rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions
endif
/* mm/ashmem.c
*
* Anonymous Shared Memory Subsystem, ashmem
*
* Copyright (C) 2008 Google, Inc.
*
* Robert Love <rlove@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "ashmem: " fmt
#include <linux/init.h>
#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/falloc.h>
#include <linux/miscdevice.h>
#include <linux/security.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/personality.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/shmem_fs.h>
#include "ashmem.h"
#define ASHMEM_NAME_PREFIX "dev/ashmem/"
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
/**
* struct ashmem_area - The anonymous shared memory area
* @name: The optional name in /proc/pid/maps
* @unpinned_list: The list of all ashmem areas
* @file: The shmem-based backing file
* @size: The size of the mapping, in bytes
* @prot_mask: The allowed protection bits, as vm_flags
*
* The lifecycle of this structure is from our parent file's open() until
* its release(). It is also protected by 'ashmem_mutex'
*
* Warning: Mappings do NOT pin this structure; It dies on close()
*/
struct ashmem_area {
char name[ASHMEM_FULL_NAME_LEN];
struct list_head unpinned_list;
struct file *file;
size_t size;
unsigned long prot_mask;
};
/**
* struct ashmem_range - A range of unpinned/evictable pages
* @lru: The entry in the LRU list
* @unpinned: The entry in its area's unpinned list
* @asma: The associated anonymous shared memory area.
* @pgstart: The starting page (inclusive)
* @pgend: The ending page (inclusive)
* @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
*
* The lifecycle of this structure is from unpin to pin.
* It is protected by 'ashmem_mutex'
*/
struct ashmem_range {
struct list_head lru;
struct list_head unpinned;
struct ashmem_area *asma;
size_t pgstart;
size_t pgend;
unsigned int purged;
};
/* LRU list of unpinned pages, protected by ashmem_mutex */
static LIST_HEAD(ashmem_lru_list);
/*
* long lru_count - The count of pages on our LRU list.
*
* This is protected by ashmem_mutex.
*/
static unsigned long lru_count;
/*
* ashmem_mutex - protects the list of and each individual ashmem_area
*
* Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
*/
static DEFINE_MUTEX(ashmem_mutex);
static struct kmem_cache *ashmem_area_cachep __read_mostly;
static struct kmem_cache *ashmem_range_cachep __read_mostly;
#define range_size(range) \
((range)->pgend - (range)->pgstart + 1)
#define range_on_lru(range) \
((range)->purged == ASHMEM_NOT_PURGED)
#define page_range_subsumes_range(range, start, end) \
(((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
#define page_range_subsumed_by_range(range, start, end) \
(((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
#define page_in_range(range, page) \
(((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
#define page_range_in_range(range, start, end) \
(page_in_range(range, start) || page_in_range(range, end) || \
page_range_subsumes_range(range, start, end))
#define range_before_page(range, page) \
((range)->pgend < (page))
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
/**
* lru_add() - Adds a range of memory to the LRU list
* @range: The memory range being added.
*
* The range is first added to the end (tail) of the LRU list.
* After this, the size of the range is added to @lru_count
*/
static inline void lru_add(struct ashmem_range *range)
{
list_add_tail(&range->lru, &ashmem_lru_list);
lru_count += range_size(range);
}
/**
* lru_del() - Removes a range of memory from the LRU list
* @range: The memory range being removed
*
* The range is first deleted from the LRU list.
* After this, the size of the range is removed from @lru_count
*/
static inline void lru_del(struct ashmem_range *range)
{
list_del(&range->lru);
lru_count -= range_size(range);
}
/**
* range_alloc() - Allocates and initializes a new ashmem_range structure
* @asma: The associated ashmem_area
* @prev_range: The previous ashmem_range in the sorted asma->unpinned list
* @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
* @start: The starting page (inclusive)
* @end: The ending page (inclusive)
*
* This function is protected by ashmem_mutex.
*
* Return: 0 if successful, or -ENOMEM if there is an error
*/
static int range_alloc(struct ashmem_area *asma,
struct ashmem_range *prev_range, unsigned int purged,
size_t start, size_t end)
{
struct ashmem_range *range;
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
if (unlikely(!range))
return -ENOMEM;
range->asma = asma;
range->pgstart = start;
range->pgend = end;
range->purged = purged;
list_add_tail(&range->unpinned, &prev_range->unpinned);
if (range_on_lru(range))
lru_add(range);
return 0;
}
/**
* range_del() - Deletes and dealloctes an ashmem_range structure
* @range: The associated ashmem_range that has previously been allocated
*/
static void range_del(struct ashmem_range *range)
{
list_del(&range->unpinned);
if (range_on_lru(range))
lru_del(range);
kmem_cache_free(ashmem_range_cachep, range);
}
/**
* range_shrink() - Shrinks an ashmem_range
* @range: The associated ashmem_range being shrunk
* @start: The starting byte of the new range
* @end: The ending byte of the new range
*
* This does not modify the data inside the existing range in any way - It
* simply shrinks the boundaries of the range.
*
* Theoretically, with a little tweaking, this could eventually be changed
* to range_resize, and expand the lru_count if the new range is larger.
*/
static inline void range_shrink(struct ashmem_range *range,
size_t start, size_t end)
{
size_t pre = range_size(range);
range->pgstart = start;
range->pgend = end;
if (range_on_lru(range))
lru_count -= pre - range_size(range);
}
/**
* ashmem_open() - Opens an Anonymous Shared Memory structure
* @inode: The backing file's index node(?)
* @file: The backing file
*
* Please note that the ashmem_area is not returned by this function - It is
* instead written to "file->private_data".
*
* Return: 0 if successful, or another code if unsuccessful.
*/
static int ashmem_open(struct inode *inode, struct file *file)
{
struct ashmem_area *asma;
int ret;
ret = generic_file_open(inode, file);
if (unlikely(ret))
return ret;
asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
if (unlikely(!asma))
return -ENOMEM;
INIT_LIST_HEAD(&asma->unpinned_list);
memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
asma->prot_mask = PROT_MASK;
file->private_data = asma;
return 0;
}
/**
* ashmem_release() - Releases an Anonymous Shared Memory structure
* @ignored: The backing file's Index Node(?) - It is ignored here.
* @file: The backing file
*
* Return: 0 if successful. If it is anything else, go have a coffee and
* try again.
*/
static int ashmem_release(struct inode *ignored, struct file *file)
{
struct ashmem_area *asma = file->private_data;
struct ashmem_range *range, *next;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
range_del(range);
mutex_unlock(&ashmem_mutex);
if (asma->file)
fput(asma->file);
kmem_cache_free(ashmem_area_cachep, asma);
return 0;
}
/**
* ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
* @file: The associated backing file.
* @buf: The buffer of data being written to
* @len: The number of bytes being read
* @pos: The position of the first byte to read.
*
* Return: 0 if successful, or another return code if not.
*/
static ssize_t ashmem_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct ashmem_area *asma = file->private_data;
int ret = 0;
mutex_lock(&ashmem_mutex);
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
goto out_unlock;
if (!asma->file) {
ret = -EBADF;
goto out_unlock;
}
mutex_unlock(&ashmem_mutex);
/*
* asma and asma->file are used outside the lock here. We assume
* once asma->file is set it will never be changed, and will not
* be destroyed until all references to the file are dropped and
* ashmem_release is called.
*/
ret = __vfs_read(asma->file, buf, len, pos);
if (ret >= 0)
/** Update backing file pos, since f_ops->read() doesn't */
asma->file->f_pos = *pos;
return ret;
out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
{
struct ashmem_area *asma = file->private_data;
int ret;
mutex_lock(&ashmem_mutex);
if (asma->size == 0) {
ret = -EINVAL;
goto out;
}
if (!asma->file) {
ret = -EBADF;
goto out;
}
ret = vfs_llseek(asma->file, offset, origin);
if (ret < 0)
goto out;
/** Copy f_pos from backing file, since f_ops->llseek() sets it */
file->f_pos = asma->file->f_pos;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
{
return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
_calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
_calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
}
static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
{
struct ashmem_area *asma = file->private_data;
int ret = 0;
mutex_lock(&ashmem_mutex);
/* user needs to SET_SIZE before mapping */
if (unlikely(!asma->size)) {
ret = -EINVAL;
goto out;
}
/* requested protection bits must match our allowed protection mask */
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
calc_vm_prot_bits(PROT_MASK))) {
ret = -EPERM;
goto out;
}
vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
if (!asma->file) {
char *name = ASHMEM_NAME_DEF;
struct file *vmfile;
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
name = asma->name;
/* ... and allocate the backing shmem file */
vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
if (IS_ERR(vmfile)) {
ret = PTR_ERR(vmfile);
goto out;
}
asma->file = vmfile;
}
get_file(asma->file);
/*
* XXX - Reworked to use shmem_zero_setup() instead of
* shmem_set_file while we're in staging. -jstultz
*/
if (vma->vm_flags & VM_SHARED) {
ret = shmem_zero_setup(vma);
if (ret) {
fput(asma->file);
goto out;
}
}
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = asma->file;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
/*
* ashmem_shrink - our cache shrinker, called from mm/vmscan.c
*
* 'nr_to_scan' is the number of objects to scan for freeing.
*
* 'gfp_mask' is the mask of the allocation that got us into this mess.
*
* Return value is the number of objects freed or -1 if we cannot
* proceed without risk of deadlock (due to gfp_mask).
*
* We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
static unsigned long
ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct ashmem_range *range, *next;
unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
vfs_fallocate(range->asma->file,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, end - start);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
freed += range_size(range);
if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
return freed;
}
static unsigned long
ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
/*
* note that lru_count is count of pages on the lru, not a count of
* objects on the list. This means the scan function needs to return the
* number of pages freed, not the number of objects scanned.
*/
return lru_count;
}
static struct shrinker ashmem_shrinker = {
.count_objects = ashmem_shrink_count,
.scan_objects = ashmem_shrink_scan,
/*
* XXX (dchinner): I wish people would comment on why they need on
* significant changes to the default value here
*/
.seeks = DEFAULT_SEEKS * 4,
};
static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
{
int ret = 0;
mutex_lock(&ashmem_mutex);
/* the user can only remove, not add, protection bits */
if (unlikely((asma->prot_mask & prot) != prot)) {
ret = -EINVAL;
goto out;
}
/* does the application expect PROT_READ to imply PROT_EXEC? */
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
prot |= PROT_EXEC;
asma->prot_mask = prot;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static int set_name(struct ashmem_area *asma, void __user *name)
{
int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
/*
* Holding the ashmem_mutex while doing a copy_from_user might cause
* an data abort which would try to access mmap_sem. If another
* thread has invoked ashmem_mmap then it will be holding the
* semaphore and will be waiting for ashmem_mutex, there by leading to
* deadlock. We'll release the mutex and take the name to a local
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
if (len < 0)
return len;
if (len == ASHMEM_NAME_LEN)
local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
if (unlikely(asma->file))
ret = -EINVAL;
else
strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
mutex_unlock(&ashmem_mutex);
return ret;
}
static int get_name(struct ashmem_area *asma, void __user *name)
{
int ret = 0;
size_t len;
/*
* Have a local variable to which we'll copy the content
* from asma with the lock held. Later we can copy this to the user
* space safely without holding any locks. So even if we proceed to
* wait for mmap_sem, it won't lead to deadlock.
*/
char local_name[ASHMEM_NAME_LEN];
mutex_lock(&ashmem_mutex);
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
/*
* Copying only `len', instead of ASHMEM_NAME_LEN, bytes
* prevents us from revealing one user's stack to another.
*/
len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
} else {
len = sizeof(ASHMEM_NAME_DEF);
memcpy(local_name, ASHMEM_NAME_DEF, len);
}
mutex_unlock(&ashmem_mutex);
/*
* Now we are just copying from the stack variable to userland
* No lock held
*/
if (unlikely(copy_to_user(name, local_name, len)))
ret = -EFAULT;
return ret;
}
/*
* ashmem_pin - pin the given ashmem region, returning whether it was
* previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
{
struct ashmem_range *range, *next;
int ret = ASHMEM_NOT_PURGED;
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
/* moved past last applicable page; we can short circuit */
if (range_before_page(range, pgstart))
break;
/*
* The user can ask us to pin pages that span multiple ranges,
* or to pin pages that aren't even unpinned, so this is messy.
*
* Four cases:
* 1. The requested range subsumes an existing range, so we
* just remove the entire matching range.
* 2. The requested range overlaps the start of an existing
* range, so we just update that range.
* 3. The requested range overlaps the end of an existing
* range, so we just update that range.
* 4. The requested range punches a hole in an existing range,
* so we have to update one side of the range and then
* create a new range for the other side.
*/
if (page_range_in_range(range, pgstart, pgend)) {
ret |= range->purged;
/* Case #1: Easy. Just nuke the whole thing. */
if (page_range_subsumes_range(range, pgstart, pgend)) {
range_del(range);
continue;
}
/* Case #2: We overlap from the start, so adjust it */
if (range->pgstart >= pgstart) {
range_shrink(range, pgend + 1, range->pgend);
continue;
}
/* Case #3: We overlap from the rear, so adjust it */
if (range->pgend <= pgend) {
range_shrink(range, range->pgstart,
pgstart - 1);
continue;
}
/*
* Case #4: We eat a chunk out of the middle. A bit
* more complicated, we allocate a new range for the
* second half and adjust the first chunk's endpoint.
*/
range_alloc(asma, range, range->purged,
pgend + 1, range->pgend);
range_shrink(range, range->pgstart, pgstart - 1);
break;
}
}
return ret;
}
/*
* ashmem_unpin - unpin the given range of pages. Returns zero on success.
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
{
struct ashmem_range *range, *next;
unsigned int purged = ASHMEM_NOT_PURGED;
restart:
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
/* short circuit: this is our insertion point */
if (range_before_page(range, pgstart))
break;
/*
* The user can ask us to unpin pages that are already entirely
* or partially pinned. We handle those two cases here.
*/
if (page_range_subsumed_by_range(range, pgstart, pgend))
return 0;
if (page_range_in_range(range, pgstart, pgend)) {
pgstart = min_t(size_t, range->pgstart, pgstart);
pgend = max_t(size_t, range->pgend, pgend);
purged |= range->purged;
range_del(range);
goto restart;
}
}
return range_alloc(asma, range, purged, pgstart, pgend);
}
/*
* ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
* given interval are unpinned and ASHMEM_IS_PINNED otherwise.
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
size_t pgend)
{
struct ashmem_range *range;
int ret = ASHMEM_IS_PINNED;
list_for_each_entry(range, &asma->unpinned_list, unpinned) {
if (range_before_page(range, pgstart))
break;
if (page_range_in_range(range, pgstart, pgend)) {
ret = ASHMEM_IS_UNPINNED;
break;
}
}
return ret;
}
static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
void __user *p)
{
struct ashmem_pin pin;
size_t pgstart, pgend;
int ret = -EINVAL;
if (unlikely(!asma->file))
return -EINVAL;
if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
return -EFAULT;
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;
if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
return -EINVAL;
if (unlikely(((__u32)-1) - pin.offset < pin.len))
return -EINVAL;
if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
return -EINVAL;
pgstart = pin.offset / PAGE_SIZE;
pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
mutex_lock(&ashmem_mutex);
switch (cmd) {
case ASHMEM_PIN:
ret = ashmem_pin(asma, pgstart, pgend);
break;
case ASHMEM_UNPIN:
ret = ashmem_unpin(asma, pgstart, pgend);
break;
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_get_pin_status(asma, pgstart, pgend);
break;
}
mutex_unlock(&ashmem_mutex);
return ret;
}
static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ashmem_area *asma = file->private_data;
long ret = -ENOTTY;
switch (cmd) {
case ASHMEM_SET_NAME:
ret = set_name(asma, (void __user *)arg);
break;
case ASHMEM_GET_NAME:
ret = get_name(asma, (void __user *)arg);
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
if (!asma->file) {
ret = 0;
asma->size = (size_t)arg;
}
break;
case ASHMEM_GET_SIZE:
ret = asma->size;
break;
case ASHMEM_SET_PROT_MASK:
ret = set_prot_mask(asma, arg);
break;
case ASHMEM_GET_PROT_MASK:
ret = asma->prot_mask;
break;
case ASHMEM_PIN:
case ASHMEM_UNPIN:
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
break;
case ASHMEM_PURGE_ALL_CACHES:
ret = -EPERM;
if (capable(CAP_SYS_ADMIN)) {
struct shrink_control sc = {
.gfp_mask = GFP_KERNEL,
.nr_to_scan = LONG_MAX,
};
ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
break;
}
return ret;
}
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case COMPAT_ASHMEM_SET_SIZE:
cmd = ASHMEM_SET_SIZE;
break;
case COMPAT_ASHMEM_SET_PROT_MASK:
cmd = ASHMEM_SET_PROT_MASK;
break;
}
return ashmem_ioctl(file, cmd, arg);
}
#endif
static const struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,
.release = ashmem_release,
.read = ashmem_read,
.llseek = ashmem_llseek,
.mmap = ashmem_mmap,
.unlocked_ioctl = ashmem_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ashmem_ioctl,
#endif
};
static struct miscdevice ashmem_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ashmem",
.fops = &ashmem_fops,
};
static int __init ashmem_init(void)
{
int ret;
ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
sizeof(struct ashmem_area),
0, 0, NULL);
if (unlikely(!ashmem_area_cachep)) {
pr_err("failed to create slab cache\n");
return -ENOMEM;
}
ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
sizeof(struct ashmem_range),
0, 0, NULL);
if (unlikely(!ashmem_range_cachep)) {
pr_err("failed to create slab cache\n");
return -ENOMEM;
}
ret = misc_register(&ashmem_misc);
if (unlikely(ret)) {
pr_err("failed to register misc device!\n");
return ret;
}
register_shrinker(&ashmem_shrinker);
pr_info("initialized\n");
return 0;
}
device_initcall(ashmem_init);
/*
* include/linux/ashmem.h
*
* Copyright 2008 Google Inc.
* Author: Robert Love
*
* This file is dual licensed. It may be redistributed and/or modified
* under the terms of the Apache 2.0 License OR version 2 of the GNU
* General Public License.
*/
#ifndef _LINUX_ASHMEM_H
#define _LINUX_ASHMEM_H
#include <linux/limits.h>
#include <linux/ioctl.h>
#include <linux/compat.h>
#include "uapi/ashmem.h"
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
#define COMPAT_ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, compat_size_t)
#define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int)
#endif
#endif /* _LINUX_ASHMEM_H */
#include <linux/mm.h>
#include "deps.h"
static int (*shmem_zero_setup_ptr)(struct vm_area_struct *) = SHMEM_ZERO_SETUP;
int shmem_zero_setup(struct vm_area_struct *vma)
{
return shmem_zero_setup_ptr(vma);
}
#!/bin/sh
SYMS="shmem_zero_setup"
for sym in $SYMS; do
addr=`cat /proc/kallsyms | grep -Ee '^[0-9a-f]+ T '$sym'$' | sed -e 's/\s.*$//g'`
if [ a$addr = 'a' ]; then
echo "Error: can't find symbol $sym"
exit 1
fi
name=`echo $sym | tr '[:lower:]' '[:upper:]'`
echo "#define $name\t(void *)0x$addr"
done
/*
* drivers/staging/android/uapi/ashmem.h
*
* Copyright 2008 Google Inc.
* Author: Robert Love
*
* This file is dual licensed. It may be redistributed and/or modified
* under the terms of the Apache 2.0 License OR version 2 of the GNU
* General Public License.
*/
#ifndef _UAPI_LINUX_ASHMEM_H
#define _UAPI_LINUX_ASHMEM_H
#include <linux/ioctl.h>
#define ASHMEM_NAME_LEN 256
#define ASHMEM_NAME_DEF "dev/ashmem"
/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
#define ASHMEM_NOT_PURGED 0
#define ASHMEM_WAS_PURGED 1
/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
#define ASHMEM_IS_UNPINNED 0
#define ASHMEM_IS_PINNED 1
struct ashmem_pin {
__u32 offset; /* offset into region, in bytes, page-aligned */
__u32 len; /* length forward from offset, in bytes, page-aligned */
};
#define __ASHMEMIOC 0x77
#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
#endif /* _UAPI_LINUX_ASHMEM_H */
ccflags-y += -I$(src)
ifneq ($(KERNELRELEASE),)
obj-m := binder_linux.o
binder_linux-y := deps.o binder.o
$(obj)/deps.o: $(src)/deps.h
$(obj)/deps.h: $(src)/gen_deps.sh
sudo $(src)/gen_deps.sh > $@
else
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
all:
$(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD
clean:
rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions
endif
\ No newline at end of file
/* binder.c
*
* Android IPC Subsystem
*
* Copyright (C) 2007-2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
#include <linux/poll.h>
#include <linux/debugfs.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
#endif
#include <uapi/linux/android/binder.h>
#include "binder_trace.h"
static DEFINE_MUTEX(binder_main_lock);
static DEFINE_MUTEX(binder_deferred_lock);
static DEFINE_MUTEX(binder_mmap_lock);
static HLIST_HEAD(binder_procs);
static HLIST_HEAD(binder_deferred_list);
static HLIST_HEAD(binder_dead_nodes);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
static struct binder_node *binder_context_mgr_node;
static kuid_t binder_context_mgr_uid = INVALID_UID;
static int binder_last_id;
static struct workqueue_struct *binder_deferred_workqueue;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
{ \
return single_open(file, binder_##name##_show, inode->i_private); \
} \
\
static const struct file_operations binder_##name##_fops = { \
.owner = THIS_MODULE, \
.open = binder_##name##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
}
static int binder_proc_show(struct seq_file *m, void *unused);
BINDER_DEBUG_ENTRY(proc);
/* This is only defined in include/asm-arm/sizes.h */
#ifndef SZ_1K
#define SZ_1K 0x400
#endif
#ifndef SZ_4M
#define SZ_4M 0x400000
#endif
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
enum {
BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
BINDER_DEBUG_DEAD_BINDER = 1U << 4,
BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
BINDER_DEBUG_READ_WRITE = 1U << 6,
BINDER_DEBUG_USER_REFS = 1U << 7,
BINDER_DEBUG_THREADS = 1U << 8,
BINDER_DEBUG_TRANSACTION = 1U << 9,
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
static bool binder_debug_no_lock;
module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;
static int binder_set_stop_on_user_error(const char *val,
struct kernel_param *kp)
{
int ret;
ret = param_set_int(val, kp);
if (binder_stop_on_user_error < 2)
wake_up(&binder_user_error_wait);
return ret;
}
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
#define binder_debug(mask, x...) \
do { \
if (binder_debug_mask & mask) \
pr_info(x); \
} while (0)
#define binder_user_error(x...) \
do { \
if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
pr_info(x); \
if (binder_stop_on_user_error) \
binder_stop_on_user_error = 2; \
} while (0)
enum binder_stat_types {
BINDER_STAT_PROC,
BINDER_STAT_THREAD,
BINDER_STAT_NODE,
BINDER_STAT_REF,
BINDER_STAT_DEATH,
BINDER_STAT_TRANSACTION,
BINDER_STAT_TRANSACTION_COMPLETE,
BINDER_STAT_COUNT
};
struct binder_stats {
int br[_IOC_NR(BR_FAILED_REPLY) + 1];
int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
int obj_created[BINDER_STAT_COUNT];
int obj_deleted[BINDER_STAT_COUNT];
};
static struct binder_stats binder_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
{
binder_stats.obj_deleted[type]++;
}
static inline void binder_stats_created(enum binder_stat_types type)
{
binder_stats.obj_created[type]++;
}
struct binder_transaction_log_entry {
int debug_id;
int call_type;
int from_proc;
int from_thread;
int target_handle;
int to_proc;
int to_thread;
int to_node;
int data_size;
int offsets_size;
};
struct binder_transaction_log {
int next;
int full;
struct binder_transaction_log_entry entry[32];
};
static struct binder_transaction_log binder_transaction_log;
static struct binder_transaction_log binder_transaction_log_failed;
static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
{
struct binder_transaction_log_entry *e;
e = &log->entry[log->next];
memset(e, 0, sizeof(*e));
log->next++;
if (log->next == ARRAY_SIZE(log->entry)) {
log->next = 0;
log->full = 1;
}
return e;
}
struct binder_work {
struct list_head entry;
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
} type;
};
struct binder_node {
int debug_id;
struct binder_work work;
union {
struct rb_node rb_node;
struct hlist_node dead_node;
};
struct binder_proc *proc;
struct hlist_head refs;
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
unsigned has_strong_ref:1;
unsigned pending_strong_ref:1;
unsigned has_weak_ref:1;
unsigned pending_weak_ref:1;
unsigned has_async_transaction:1;
unsigned accept_fds:1;
unsigned min_priority:8;
struct list_head async_todo;
};
struct binder_ref_death {
struct binder_work work;
binder_uintptr_t cookie;
};
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
int debug_id;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
uint32_t desc;
int strong;
int weak;
struct binder_ref_death *death;
};
struct binder_buffer {
struct list_head entry; /* free and allocated entries by address */
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
unsigned debug_id:29;
struct binder_transaction *transaction;
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
uint8_t data[0];
};
enum binder_deferred_state {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
int pid;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer;
ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
size_t buffer_size;
uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;
int requested_threads;
int requested_threads_started;
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
};
enum {
BINDER_LOOPER_STATE_REGISTERED = 0x01,
BINDER_LOOPER_STATE_ENTERED = 0x02,
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
BINDER_LOOPER_STATE_NEED_RETURN = 0x20
};
struct binder_thread {
struct binder_proc *proc;
struct rb_node rb_node;
int pid;
int looper;
struct binder_transaction *transaction_stack;
struct list_head todo;
uint32_t return_error; /* Write failed, return error code in read buf */
uint32_t return_error2; /* Write failed, return error code in read */
/* buffer. Used when sending a reply to a dead process that */
/* we are also waiting on */
wait_queue_head_t wait;
struct binder_stats stats;
};
struct binder_transaction {
int debug_id;
struct binder_work work;
struct binder_thread *from;
struct binder_transaction *from_parent;
struct binder_proc *to_proc;
struct binder_thread *to_thread;
struct binder_transaction *to_parent;
unsigned need_reply:1;
/* unsigned is_dead:1; */ /* not used at the moment */
struct binder_buffer *buffer;
unsigned int code;
unsigned int flags;
long priority;
long saved_priority;
kuid_t sender_euid;
};
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
if (files == NULL)
return -ESRCH;
if (!lock_task_sighand(proc->tsk, &irqs))
return -EMFILE;
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
return __alloc_fd(files, 0, rlim_cur, flags);
}
/*
* copied from fd_install
*/
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
if (proc->files)
__fd_install(proc->files, fd, file);
}
/*
* copied from sys_close
*/
static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
int retval;
if (proc->files == NULL)
return -ESRCH;
retval = __close_fd(proc->files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
retval == -ERESTARTNOINTR ||
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
return retval;
}
static inline void binder_lock(const char *tag)
{
trace_binder_lock(tag);
mutex_lock(&binder_main_lock);
trace_binder_locked(tag);
}
static inline void binder_unlock(const char *tag)
{
trace_binder_unlock(tag);
mutex_unlock(&binder_main_lock);
}
static void binder_set_nice(long nice)
{
long min_nice;
if (can_nice(current, nice)) {
set_user_nice(current, nice);
return;
}
min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
"%d: nice value %ld not allowed use %ld instead\n",
current->pid, nice, min_nice);
set_user_nice(current, min_nice);
if (min_nice <= MAX_NICE)
return;
binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
}
static size_t binder_buffer_size(struct binder_proc *proc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &proc->buffers))
return proc->buffer + proc->buffer_size - (void *)buffer->data;
return (size_t)list_entry(buffer->entry.next,
struct binder_buffer, entry) - (size_t)buffer->data;
}
static void binder_insert_free_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer)
{
struct rb_node **p = &proc->free_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;
size_t buffer_size;
size_t new_buffer_size;
BUG_ON(!new_buffer->free);
new_buffer_size = binder_buffer_size(proc, new_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: add free buffer, size %zd, at %p\n",
proc->pid, new_buffer_size, new_buffer);
while (*p) {
parent = *p;
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
buffer_size = binder_buffer_size(proc, buffer);
if (new_buffer_size < buffer_size)
p = &parent->rb_left;
else
p = &parent->rb_right;
}
rb_link_node(&new_buffer->rb_node, parent, p);
rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
}
static void binder_insert_allocated_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer)
{
struct rb_node **p = &proc->allocated_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;
BUG_ON(new_buffer->free);
while (*p) {
parent = *p;
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
if (new_buffer < buffer)
p = &parent->rb_left;
else if (new_buffer > buffer)
p = &parent->rb_right;
else
BUG();
}
rb_link_node(&new_buffer->rb_node, parent, p);
rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
}
static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
uintptr_t user_ptr)
{
struct rb_node *n = proc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
struct binder_buffer *kern_ptr;
kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- offsetof(struct binder_buffer, data));
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
if (kern_ptr < buffer)
n = n->rb_left;
else if (kern_ptr > buffer)
n = n->rb_right;
else
return buffer;
}
return NULL;
}
static int binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
struct page **page;
struct mm_struct *mm;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: %s pages %p-%p\n", proc->pid,
allocate ? "allocate" : "free", start, end);
if (end <= start)
return 0;
trace_binder_update_page_range(proc, allocate, start, end);
if (vma)
mm = NULL;
else
mm = get_task_mm(proc->tsk);
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
if (vma && mm != proc->vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n",
proc->pid);
vma = NULL;
}
}
if (allocate == 0)
goto free_range;
if (vma == NULL) {
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
proc->pid);
goto err_no_vma;
}
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
BUG_ON(*page);
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
if (*page == NULL) {
pr_err("%d: binder_alloc_buf failed for page at %p\n",
proc->pid, page_addr);
goto err_alloc_page_failed;
}
ret = map_kernel_range_noflush((unsigned long)page_addr,
PAGE_SIZE, PAGE_KERNEL, page);
flush_cache_vmap((unsigned long)page_addr,
(unsigned long)page_addr + PAGE_SIZE);
if (ret != 1) {
pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
proc->pid, page_addr);
goto err_map_kernel_failed;
}
user_page_addr =
(uintptr_t)page_addr + proc->user_buffer_offset;
ret = vm_insert_page(vma, user_page_addr, page[0]);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
proc->pid, user_page_addr);
goto err_vm_insert_page_failed;
}
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_write(&mm->mmap_sem);
mmput(mm);
}
return 0;
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
if (vma)
zap_page_range(vma, (uintptr_t)page_addr +
proc->user_buffer_offset, PAGE_SIZE, NULL);
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
__free_page(*page);
*page = NULL;
err_alloc_page_failed:
;
}
err_no_vma:
if (mm) {
up_write(&mm->mmap_sem);
mmput(mm);
}
return -ENOMEM;
}
static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
size_t data_size,
size_t offsets_size, int is_async)
{
struct rb_node *n = proc->free_buffers.rb_node;
struct binder_buffer *buffer;
size_t buffer_size;
struct rb_node *best_fit = NULL;
void *has_page_addr;
void *end_page_addr;
size_t size;
if (proc->vma == NULL) {
pr_err("%d: binder_alloc_buf, no vma\n",
proc->pid);
return NULL;
}
size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
if (size < data_size || size < offsets_size) {
binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
proc->pid, data_size, offsets_size);
return NULL;
}
if (is_async &&
proc->free_async_space < size + sizeof(struct binder_buffer)) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
proc->pid, size);
return NULL;
}
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
buffer_size = binder_buffer_size(proc, buffer);
if (size < buffer_size) {
best_fit = n;
n = n->rb_left;
} else if (size > buffer_size)
n = n->rb_right;
else {
best_fit = n;
break;
}
}
if (best_fit == NULL) {
pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
proc->pid, size);
return NULL;
}
if (n == NULL) {
buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
buffer_size = binder_buffer_size(proc, buffer);
}
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
proc->pid, size, buffer, buffer_size);
has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
if (n == NULL) {
if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
buffer_size = size; /* no room for other buffers */
else
buffer_size = size + sizeof(struct binder_buffer);
}
end_page_addr =
(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
if (binder_update_page_range(proc, 1,
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
return NULL;
rb_erase(best_fit, &proc->free_buffers);
buffer->free = 0;
binder_insert_allocated_buffer(proc, buffer);
if (buffer_size != size) {
struct binder_buffer *new_buffer = (void *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(proc, new_buffer);
}
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %p\n",
proc->pid, size, buffer);
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
if (is_async) {
proc->free_async_space -= size + sizeof(struct binder_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
proc->pid, size, proc->free_async_space);
}
return buffer;
}
static void *buffer_start_page(struct binder_buffer *buffer)
{
return (void *)((uintptr_t)buffer & PAGE_MASK);
}
static void *buffer_end_page(struct binder_buffer *buffer)
{
return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_proc *proc,
struct binder_buffer *buffer)
{
struct binder_buffer *prev, *next = NULL;
int free_page_end = 1;
int free_page_start = 1;
BUG_ON(proc->buffers.next == &buffer->entry);
prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
BUG_ON(!prev->free);
if (buffer_end_page(prev) == buffer_start_page(buffer)) {
free_page_start = 0;
if (buffer_end_page(prev) == buffer_end_page(buffer))
free_page_end = 0;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %p share page with %p\n",
proc->pid, buffer, prev);
}
if (!list_is_last(&buffer->entry, &proc->buffers)) {
next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
if (buffer_start_page(next) == buffer_end_page(buffer)) {
free_page_end = 0;
if (buffer_start_page(next) ==
buffer_start_page(buffer))
free_page_start = 0;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %p share page with %p\n",
proc->pid, buffer, prev);
}
}
list_del(&buffer->entry);
if (free_page_start || free_page_end) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
proc->pid, buffer, free_page_start ? "" : " end",
free_page_end ? "" : " start", prev, next);
binder_update_page_range(proc, 0, free_page_start ?
buffer_start_page(buffer) : buffer_end_page(buffer),
(free_page_end ? buffer_end_page(buffer) :
buffer_start_page(buffer)) + PAGE_SIZE, NULL);
}
}
static void binder_free_buf(struct binder_proc *proc,
struct binder_buffer *buffer)
{
size_t size, buffer_size;
buffer_size = binder_buffer_size(proc, buffer);
size = ALIGN(buffer->data_size, sizeof(void *)) +
ALIGN(buffer->offsets_size, sizeof(void *));
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_free_buf %p size %zd buffer_size %zd\n",
proc->pid, buffer, size, buffer_size);
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
BUG_ON((void *)buffer < proc->buffer);
BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
if (buffer->async_transaction) {
proc->free_async_space += size + sizeof(struct binder_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
proc->pid, size, proc->free_async_space);
}
binder_update_page_range(proc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
NULL);
rb_erase(&buffer->rb_node, &proc->allocated_buffers);
buffer->free = 1;
if (!list_is_last(&buffer->entry, &proc->buffers)) {
struct binder_buffer *next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
if (next->free) {
rb_erase(&next->rb_node, &proc->free_buffers);
binder_delete_free_buffer(proc, next);
}
}
if (proc->buffers.next != &buffer->entry) {
struct binder_buffer *prev = list_entry(buffer->entry.prev,
struct binder_buffer, entry);
if (prev->free) {
binder_delete_free_buffer(proc, buffer);
rb_erase(&prev->rb_node, &proc->free_buffers);
buffer = prev;
}
}
binder_insert_free_buffer(proc, buffer);
}
static struct binder_node *binder_get_node(struct binder_proc *proc,
binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
if (ptr < node->ptr)
n = n->rb_left;
else if (ptr > node->ptr)
n = n->rb_right;
else
return node;
}
return NULL;
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_uintptr_t ptr,
binder_uintptr_t cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
while (*p) {
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
if (ptr < node->ptr)
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
else
return NULL;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE);
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = ++binder_last_id;
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
return node;
}
static int binder_inc_node(struct binder_node *node, int strong, int internal,
struct list_head *target_list)
{
if (strong) {
if (internal) {
if (target_list == NULL &&
node->internal_strong_refs == 0 &&
!(node == binder_context_mgr_node &&
node->has_strong_ref)) {
pr_err("invalid inc strong node for %d\n",
node->debug_id);
return -EINVAL;
}
node->internal_strong_refs++;
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
list_del_init(&node->work.entry);
list_add_tail(&node->work.entry, target_list);
}
} else {
if (!internal)
node->local_weak_refs++;
if (!node->has_weak_ref && list_empty(&node->work.entry)) {
if (target_list == NULL) {
pr_err("invalid inc weak node for %d\n",
node->debug_id);
return -EINVAL;
}
list_add_tail(&node->work.entry, target_list);
}
}
return 0;
}
static int binder_dec_node(struct binder_node *node, int strong, int internal)
{
if (strong) {
if (internal)
node->internal_strong_refs--;
else
node->local_strong_refs--;
if (node->local_strong_refs || node->internal_strong_refs)
return 0;
} else {
if (!internal)
node->local_weak_refs--;
if (node->local_weak_refs || !hlist_empty(&node->refs))
return 0;
}
if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
if (list_empty(&node->work.entry)) {
list_add_tail(&node->work.entry, &node->proc->todo);
wake_up_interruptible(&node->proc->wait);
}
} else {
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
!node->local_weak_refs) {
list_del_init(&node->work.entry);
if (node->proc) {
rb_erase(&node->rb_node, &node->proc->nodes);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"refless node %d deleted\n",
node->debug_id);
} else {
hlist_del(&node->dead_node);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"dead node %d deleted\n",
node->debug_id);
}
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
}
}
return 0;
}
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
uint32_t desc)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (desc < ref->desc)
n = n->rb_left;
else if (desc > ref->desc)
n = n->rb_right;
else
return ref;
}
return NULL;
}
static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
struct binder_node *node)
{
struct rb_node *n;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
struct binder_ref *ref, *new_ref;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
if (node < ref->node)
p = &(*p)->rb_left;
else if (node > ref->node)
p = &(*p)->rb_right;
else
return ref;
}
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
if (new_ref == NULL)
return NULL;
binder_stats_created(BINDER_STAT_REF);
new_ref->debug_id = ++binder_last_id;
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (ref->desc > new_ref->desc)
break;
new_ref->desc = ref->desc + 1;
}
p = &proc->refs_by_desc.rb_node;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
if (new_ref->desc < ref->desc)
p = &(*p)->rb_left;
else if (new_ref->desc > ref->desc)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
if (node) {
hlist_add_head(&new_ref->node_entry, &node->refs);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d new ref %d desc %d for node %d\n",
proc->pid, new_ref->debug_id, new_ref->desc,
node->debug_id);
} else {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d new ref %d desc %d for dead node\n",
proc->pid, new_ref->debug_id, new_ref->desc);
}
return new_ref;
}
static void binder_delete_ref(struct binder_ref *ref)
{
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d delete ref %d desc %d for node %d\n",
ref->proc->pid, ref->debug_id, ref->desc,
ref->node->debug_id);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
if (ref->strong)
binder_dec_node(ref->node, 1, 1);
hlist_del(&ref->node_entry);
binder_dec_node(ref->node, 0, 1);
if (ref->death) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d delete ref %d desc %d has death notification\n",
ref->proc->pid, ref->debug_id, ref->desc);
list_del(&ref->death->work.entry);
kfree(ref->death);
binder_stats_deleted(BINDER_STAT_DEATH);
}
kfree(ref);
binder_stats_deleted(BINDER_STAT_REF);
}
static int binder_inc_ref(struct binder_ref *ref, int strong,
struct list_head *target_list)
{
int ret;
if (strong) {
if (ref->strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
if (ret)
return ret;
}
ref->strong++;
} else {
if (ref->weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list);
if (ret)
return ret;
}
ref->weak++;
}
return 0;
}
static int binder_dec_ref(struct binder_ref *ref, int strong)
{
if (strong) {
if (ref->strong == 0) {
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
ref->proc->pid, ref->debug_id,
ref->desc, ref->strong, ref->weak);
return -EINVAL;
}
ref->strong--;
if (ref->strong == 0) {
int ret;
ret = binder_dec_node(ref->node, strong, 1);
if (ret)
return ret;
}
} else {
if (ref->weak == 0) {
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
ref->proc->pid, ref->debug_id,
ref->desc, ref->strong, ref->weak);
return -EINVAL;
}
ref->weak--;
}
if (ref->strong == 0 && ref->weak == 0)
binder_delete_ref(ref);
return 0;
}
static void binder_pop_transaction(struct binder_thread *target_thread,
struct binder_transaction *t)
{
if (target_thread) {
BUG_ON(target_thread->transaction_stack != t);
BUG_ON(target_thread->transaction_stack->from != target_thread);
target_thread->transaction_stack =
target_thread->transaction_stack->from_parent;
t->from = NULL;
}
t->need_reply = 0;
if (t->buffer)
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
static void binder_send_failed_reply(struct binder_transaction *t,
uint32_t error_code)
{
struct binder_thread *target_thread;
struct binder_transaction *next;
BUG_ON(t->flags & TF_ONE_WAY);
while (1) {
target_thread = t->from;
if (target_thread) {
if (target_thread->return_error != BR_OK &&
target_thread->return_error2 == BR_OK) {
target_thread->return_error2 =
target_thread->return_error;
target_thread->return_error = BR_OK;
}
if (target_thread->return_error == BR_OK) {
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"send failed reply for transaction %d to %d:%d\n",
t->debug_id,
target_thread->proc->pid,
target_thread->pid);
binder_pop_transaction(target_thread, t);
target_thread->return_error = error_code;
wake_up_interruptible(&target_thread->wait);
} else {
pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
target_thread->proc->pid,
target_thread->pid,
target_thread->return_error);
}
return;
}
next = t->from_parent;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"send failed reply for transaction %d, target dead\n",
t->debug_id);
binder_pop_transaction(target_thread, t);
if (next == NULL) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"reply failed, no target thread at root\n");
return;
}
t = next;
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"reply failed, no target thread -- retry %d\n",
t->debug_id);
}
}
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
binder_size_t *failed_at)
{
binder_size_t *offp, *off_end;
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d buffer release %d, size %zd-%zd, failed at %p\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at);
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
offp = (binder_size_t *)(buffer->data +
ALIGN(buffer->data_size, sizeof(void *)));
if (failed_at)
off_end = failed_at;
else
off_end = (void *)offp + buffer->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > buffer->data_size - sizeof(*fp) ||
buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
pr_err("transaction release %d bad offset %lld, size %zd\n",
debug_id, (u64)*offp, buffer->data_size);
continue;
}
fp = (struct flat_binder_object *)(buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
pr_err("transaction release %d bad node %016llx\n",
debug_id, (u64)fp->binder);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx\n",
node->debug_id, (u64)node->ptr);
binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
debug_id, fp->handle);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, ref->node->debug_id);
binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
} break;
case BINDER_TYPE_FD:
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d\n", fp->handle);
if (failed_at)
task_close_fd(proc, fp->handle);
break;
default:
pr_err("transaction release %d bad object type %x\n",
debug_id, fp->type);
break;
}
}
}
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
if (reply) {
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
} else {
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else {
target_node = binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
e->to_node = target_node->debug_id;
target_proc = target_node->proc;
if (target_proc == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = ++binder_last_id;
e->debug_id = t->debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
proc->pid, thread->pid, (u64)*offp);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_new_node_failed;
}
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
(u64)fp->binder, node->debug_id,
(u64)fp->cookie, (u64)node->cookie);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (security_binder_transfer_binder(proc->tsk,
target_proc->tsk)) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
trace_binder_transaction_node_to_ref(t, node, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
ref->debug_id, ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (security_binder_transfer_binder(proc->tsk,
target_proc->tsk)) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
ref->debug_id, ref->desc, ref->node->debug_id,
(u64)ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break;
case BINDER_TYPE_FD: {
int target_fd;
struct file *file;
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
file = fget(fp->handle);
if (file == NULL) {
binder_user_error("%d:%d got transaction with invalid fd, %d\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
if (security_binder_transfer_file(proc->tsk,
target_proc->tsk,
file) < 0) {
fput(file);
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
task_fd_install(target_proc, target_fd, file);
trace_binder_transaction_fd(t, fp->handle, target_fd);
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}
}
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
err_get_unused_fd_failed:
err_fget_failed:
err_fd_not_allowed:
err_binder_get_ref_for_node_failed:
err_binder_get_ref_failed:
err_binder_new_node_failed:
err_bad_object_type:
err_bad_offset:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
t->buffer->transaction = NULL;
binder_free_buf(target_proc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d, size %lld-%lld\n",
proc->pid, thread->pid, return_error,
(u64)tr->data_size, (u64)tr->offsets_size);
{
struct binder_transaction_log_entry *fe;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
}
BUG_ON(thread->return_error != BR_OK);
if (in_reply_to) {
thread->return_error = BR_TRANSACTION_COMPLETE;
binder_send_failed_reply(in_reply_to, return_error);
} else
thread->return_error = return_error;
}
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
case BC_INCREFS:
case BC_ACQUIRE:
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
struct binder_ref *ref;
const char *debug_string;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (target == 0 && binder_context_mgr_node &&
(cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
ref = binder_get_ref_for_node(proc,
binder_context_mgr_node);
if (ref->desc != target) {
binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
proc->pid, thread->pid,
ref->desc);
}
} else
ref = binder_get_ref(proc, target);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
break;
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
binder_inc_ref(ref, 0, NULL);
break;
case BC_ACQUIRE:
debug_string = "Acquire";
binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
binder_dec_ref(ref, 0);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
"%d:%d %s ref %d desc %d s %d w %d for node %d\n",
proc->pid, thread->pid, debug_string, ref->debug_id,
ref->desc, ref->strong, ref->weak, ref->node->debug_id);
break;
}
case BC_INCREFS_DONE:
case BC_ACQUIRE_DONE: {
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
if (node == NULL) {
binder_user_error("%d:%d %s u%016llx no match\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" :
"BC_ACQUIRE_DONE",
(u64)node_ptr);
break;
}
if (cookie != node->cookie) {
binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
break;
}
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
break;
}
node->pending_strong_ref = 0;
} else {
if (node->pending_weak_ref == 0) {
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
break;
}
node->pending_weak_ref = 0;
}
binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
binder_debug(BINDER_DEBUG_USER_REFS,
"%d:%d %s node %d ls %d lw %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
node->debug_id, node->local_strong_refs, node->local_weak_refs);
break;
}
case BC_ATTEMPT_ACQUIRE:
pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
return -EINVAL;
case BC_ACQUIRE_RESULT:
pr_err("BC_ACQUIRE_RESULT not supported\n");
return -EINVAL;
case BC_FREE_BUFFER: {
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
buffer = binder_buffer_lookup(proc, data_ptr);
if (buffer == NULL) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid, (u64)data_ptr);
break;
}
if (!buffer->allow_user_free) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
proc->pid, thread->pid, (u64)data_ptr);
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
"%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
BUG_ON(!buffer->target_node->has_async_transaction);
if (list_empty(&buffer->target_node->async_todo))
buffer->target_node->has_async_transaction = 0;
else
list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
binder_free_buf(proc, buffer);
break;
}
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
case BC_REGISTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
} else if (proc->requested_threads == 0) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
proc->pid, thread->pid);
} else {
proc->requested_threads--;
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
case BC_EXIT_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_EXIT_LOOPER\n",
proc->pid, thread->pid);
thread->looper |= BINDER_LOOPER_STATE_EXITED;
break;
case BC_REQUEST_DEATH_NOTIFICATION:
case BC_CLEAR_DEATH_NOTIFICATION: {
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
struct binder_ref_death *death;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
ref = binder_get_ref(proc, target);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
break;
}
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
(u64)cookie, ref->debug_id, ref->desc,
ref->strong, ref->weak, ref->node->debug_id);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
break;
}
death = kzalloc(sizeof(*death), GFP_KERNEL);
if (death == NULL) {
thread->return_error = BR_ERROR;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
proc->pid, thread->pid);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
INIT_LIST_HEAD(&death->work.entry);
death->cookie = cookie;
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
list_add_tail(&ref->death->work.entry, &thread->todo);
} else {
list_add_tail(&ref->death->work.entry, &proc->todo);
wake_up_interruptible(&proc->wait);
}
}
} else {
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
break;
}
death = ref->death;
if (death->cookie != cookie) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
break;
}
ref->death = NULL;
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
list_add_tail(&death->work.entry, &thread->todo);
} else {
list_add_tail(&death->work.entry, &proc->todo);
wake_up_interruptible(&proc->wait);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
}
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(cookie);
list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
break;
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
break;
}
list_del_init(&death->work.entry);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
list_add_tail(&death->work.entry, &thread->todo);
} else {
list_add_tail(&death->work.entry, &proc->todo);
wake_up_interruptible(&proc->wait);
}
}
} break;
default:
pr_err("%d:%d unknown command %d\n",
proc->pid, thread->pid, cmd);
return -EINVAL;
}
*consumed = ptr - buffer;
}
return 0;
}
static void binder_stat_br(struct binder_proc *proc,
struct binder_thread *thread, uint32_t cmd)
{
trace_binder_return(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
binder_stats.br[_IOC_NR(cmd)]++;
proc->stats.br[_IOC_NR(cmd)]++;
thread->stats.br[_IOC_NR(cmd)]++;
}
}
static int binder_has_proc_work(struct binder_proc *proc,
struct binder_thread *thread)
{
return !list_empty(&proc->todo) ||
(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
}
static int binder_has_thread_work(struct binder_thread *thread)
{
return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
}
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
if (thread->return_error != BR_OK && ptr < end) {
if (thread->return_error2 != BR_OK) {
if (put_user(thread->return_error2, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error2);
if (ptr == end)
goto done;
thread->return_error2 = BR_OK;
}
if (put_user(thread->return_error, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error);
thread->return_error = BR_OK;
goto done;
}
thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work)
proc->ready_threads++;
binder_unlock(__func__);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
!list_empty(&thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
proc->pid, thread->pid, thread->looper);
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
if (non_block) {
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else
ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
if (non_block) {
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
} else
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
binder_lock(__func__);
if (wait_for_proc_work)
proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
/* no data added */
if (ptr - buffer == 4 &&
!(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
uint32_t cmd = BR_NOOP;
const char *cmd_name;
int strong = node->internal_strong_refs || node->local_strong_refs;
int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
if (weak && !node->has_weak_ref) {
cmd = BR_INCREFS;
cmd_name = "BR_INCREFS";
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
} else if (strong && !node->has_strong_ref) {
cmd = BR_ACQUIRE;
cmd_name = "BR_ACQUIRE";
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
} else if (!strong && node->has_strong_ref) {
cmd = BR_RELEASE;
cmd_name = "BR_RELEASE";
node->has_strong_ref = 0;
} else if (!weak && node->has_weak_ref) {
cmd = BR_DECREFS;
cmd_name = "BR_DECREFS";
node->has_weak_ref = 0;
}
if (cmd != BR_NOOP) {
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(node->ptr,
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
if (put_user(node->cookie,
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_USER_REFS,
"%d:%d %s %d u%016llx c%016llx\n",
proc->pid, thread->pid, cmd_name,
node->debug_id,
(u64)node->ptr, (u64)node->cookie);
} else {
list_del_init(&w->entry);
if (!weak && !strong) {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx deleted\n",
proc->pid, thread->pid,
node->debug_id,
(u64)node->ptr,
(u64)node->cookie);
rb_erase(&node->rb_node, &proc->nodes);
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
} else {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx state unchanged\n",
proc->pid, thread->pid,
node->debug_id,
(u64)node->ptr,
(u64)node->cookie);
}
}
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(death->cookie,
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
(u64)death->cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
list_del(&w->entry);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} else
list_move(&w->entry, &proc->delivered_death);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
}
if (!t)
continue;
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
t->debug_id, t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
done:
*consumed = ptr - buffer;
if (proc->requested_threads + proc->ready_threads == 0 &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
}
return 0;
}
static void binder_release_work(struct list_head *list)
{
struct binder_work *w;
while (!list_empty(list)) {
w = list_first_entry(list, struct binder_work, entry);
list_del_init(&w->entry);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work);
if (t->buffer->target_node &&
!(t->flags & TF_ONE_WAY)) {
binder_send_failed_reply(t, BR_DEAD_REPLY);
} else {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
death = container_of(w, struct binder_ref_death, work);
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered death notification, %016llx\n",
(u64)death->cookie);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
default:
pr_err("unexpected work type, %d, not freed\n",
w->type);
break;
}
}
}
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
struct rb_node **p = &proc->threads.rb_node;
while (*p) {
parent = *p;
thread = rb_entry(parent, struct binder_thread, rb_node);
if (current->pid < thread->pid)
p = &(*p)->rb_left;
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
break;
}
if (*p == NULL) {
thread = kzalloc(sizeof(*thread), GFP_KERNEL);
if (thread == NULL)
return NULL;
binder_stats_created(BINDER_STAT_THREAD);
thread->proc = proc;
thread->pid = current->pid;
init_waitqueue_head(&thread->wait);
INIT_LIST_HEAD(&thread->todo);
rb_link_node(&thread->rb_node, parent, p);
rb_insert_color(&thread->rb_node, &proc->threads);
thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
thread->return_error = BR_OK;
thread->return_error2 = BR_OK;
}
return thread;
}
static int binder_free_thread(struct binder_proc *proc,
struct binder_thread *thread)
{
struct binder_transaction *t;
struct binder_transaction *send_reply = NULL;
int active_transactions = 0;
rb_erase(&thread->rb_node, &proc->threads);
t = thread->transaction_stack;
if (t && t->to_thread == thread)
send_reply = t;
while (t) {
active_transactions++;
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"release %d:%d transaction %d %s, still active\n",
proc->pid, thread->pid,
t->debug_id,
(t->to_thread == thread) ? "in" : "out");
if (t->to_thread == thread) {
t->to_proc = NULL;
t->to_thread = NULL;
if (t->buffer) {
t->buffer->transaction = NULL;
t->buffer = NULL;
}
t = t->to_parent;
} else if (t->from == thread) {
t->from = NULL;
t = t->from_parent;
} else
BUG();
}
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
binder_release_work(&thread->todo);
kfree(thread);
binder_stats_deleted(BINDER_STAT_THREAD);
return active_transactions;
}
static unsigned int binder_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread = NULL;
int wait_for_proc_work;
binder_lock(__func__);
thread = binder_get_thread(proc);
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo) && thread->return_error == BR_OK;
binder_unlock(__func__);
if (wait_for_proc_work) {
if (binder_has_proc_work(proc, thread))
return POLLIN;
poll_wait(filp, &proc->wait, wait);
if (binder_has_proc_work(proc, thread))
return POLLIN;
} else {
if (binder_has_thread_work(thread))
return POLLIN;
poll_wait(filp, &thread->wait, wait);
if (binder_has_thread_work(thread))
return POLLIN;
}
return 0;
}
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto out;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
(u64)bwr.write_size, (u64)bwr.write_buffer,
(u64)bwr.read_size, (u64)bwr.read_buffer);
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
kuid_t curr_euid = current_euid();
if (binder_context_mgr_node != NULL) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto out;
}
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto out;
if (uid_valid(binder_context_mgr_uid)) {
if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
binder_context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
binder_context_mgr_uid = curr_euid;
}
binder_context_mgr_node = binder_new_node(proc, 0, 0);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto out;
}
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
out:
return ret;
}
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
case BINDER_SET_MAX_THREADS:
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
goto err;
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
struct binder_version __user *ver = ubuf;
if (size != sizeof(struct binder_version)) {
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
}
default:
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
binder_unlock(__func__);
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
trace_binder_ioctl_done(ret);
return ret;
}
static void binder_vma_open(struct vm_area_struct *vma)
{
struct binder_proc *proc = vma->vm_private_data;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
}
static void binder_vma_close(struct vm_area_struct *vma)
{
struct binder_proc *proc = vma->vm_private_data;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
proc->vma = NULL;
proc->vma_vm_mm = NULL;
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
static const struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,
.close = binder_vma_close,
.fault = binder_vm_fault,
};
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
struct binder_buffer *buffer;
if (proc->tsk != current)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
ret = -EPERM;
failure_string = "bad vm_flags";
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
mutex_lock(&binder_mmap_lock);
if (proc->buffer) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
proc->buffer = area->addr;
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
mutex_unlock(&binder_mmap_lock);
#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
vma->vm_start += PAGE_SIZE;
}
}
#endif
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
if (proc->pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
proc->buffer_size = vma->vm_end - vma->vm_start;
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
buffer = proc->buffer;
INIT_LIST_HEAD(&proc->buffers);
list_add(&buffer->entry, &proc->buffers);
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2;
barrier();
proc->files = get_files_struct(current);
proc->vma = vma;
proc->vma_vm_mm = vma->vm_mm;
/*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
err_alloc_small_buf_failed:
kfree(proc->pages);
proc->pages = NULL;
err_alloc_pages_failed:
mutex_lock(&binder_mmap_lock);
vfree(proc->buffer);
proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
current->group_leader->pid, current->pid);
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
get_task_struct(current);
proc->tsk = current;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
binder_lock(__func__);
binder_stats_created(BINDER_STAT_PROC);
hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
binder_unlock(__func__);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
}
return 0;
}
static int binder_flush(struct file *filp, fl_owner_t id)
{
struct binder_proc *proc = filp->private_data;
binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
return 0;
}
static void binder_deferred_flush(struct binder_proc *proc)
{
struct rb_node *n;
int wake_count = 0;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
wake_up_interruptible(&thread->wait);
wake_count++;
}
}
wake_up_interruptible_all(&proc->wait);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_flush: %d woke %d threads\n", proc->pid,
wake_count);
}
static int binder_release(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc = filp->private_data;
debugfs_remove(proc->debugfs_entry);
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
return 0;
}
static int binder_node_release(struct binder_node *node, int refs)
{
struct binder_ref *ref;
int death = 0;
list_del_init(&node->work.entry);
binder_release_work(&node->async_todo);
if (hlist_empty(&node->refs)) {
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
return refs;
}
node->proc = NULL;
node->local_strong_refs = 0;
node->local_weak_refs = 0;
hlist_add_head(&node->dead_node, &binder_dead_nodes);
hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++;
if (!ref->death)
continue;
death++;
if (list_empty(&ref->death->work.entry)) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
list_add_tail(&ref->death->work.entry,
&ref->proc->todo);
wake_up_interruptible(&ref->proc->wait);
} else
BUG();
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
return refs;
}
static void binder_deferred_release(struct binder_proc *proc)
{
struct binder_transaction *t;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, buffers,
active_transactions, page_count;
BUG_ON(proc->vma);
BUG_ON(proc->files);
hlist_del(&proc->proc_node);
if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%s: %d context_mgr_node gone\n",
__func__, proc->pid);
binder_context_mgr_node = NULL;
}
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
struct binder_thread *thread;
thread = rb_entry(n, struct binder_thread, rb_node);
threads++;
active_transactions += binder_free_thread(proc, thread);
}
nodes = 0;
incoming_refs = 0;
while ((n = rb_first(&proc->nodes))) {
struct binder_node *node;
node = rb_entry(n, struct binder_node, rb_node);
nodes++;
rb_erase(&node->rb_node, &proc->nodes);
incoming_refs = binder_node_release(node, incoming_refs);
}
outgoing_refs = 0;
while ((n = rb_first(&proc->refs_by_desc))) {
struct binder_ref *ref;
ref = rb_entry(n, struct binder_ref, rb_node_desc);
outgoing_refs++;
binder_delete_ref(ref);
}
binder_release_work(&proc->todo);
binder_release_work(&proc->delivered_death);
buffers = 0;
while ((n = rb_first(&proc->allocated_buffers))) {
struct binder_buffer *buffer;
buffer = rb_entry(n, struct binder_buffer, rb_node);
t = buffer->transaction;
if (t) {
t->buffer = NULL;
buffer->transaction = NULL;
pr_err("release proc %d, transaction %d, not freed\n",
proc->pid, t->debug_id);
/*BUG();*/
}
binder_free_buf(proc, buffer);
buffers++;
}
binder_stats_deleted(BINDER_STAT_PROC);
page_count = 0;
if (proc->pages) {
int i;
for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
void *page_addr;
if (!proc->pages[i])
continue;
page_addr = proc->buffer + i * PAGE_SIZE;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d at %p not freed\n",
__func__, proc->pid, i, page_addr);
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
__free_page(proc->pages[i]);
page_count++;
}
kfree(proc->pages);
vfree(proc->buffer);
}
put_task_struct(proc->tsk);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
__func__, proc->pid, threads, nodes, incoming_refs,
outgoing_refs, active_transactions, buffers, page_count);
kfree(proc);
}
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
struct files_struct *files;
int defer;
do {
binder_lock(__func__);
mutex_lock(&binder_deferred_lock);
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
hlist_del_init(&proc->deferred_work_node);
defer = proc->deferred_work;
proc->deferred_work = 0;
} else {
proc = NULL;
defer = 0;
}
mutex_unlock(&binder_deferred_lock);
files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) {
files = proc->files;
if (files)
proc->files = NULL;
}
if (defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
binder_unlock(__func__);
if (files)
put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
mutex_lock(&binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
queue_work(binder_deferred_workqueue, &binder_deferred_work);
}
mutex_unlock(&binder_deferred_lock);
}
static void print_binder_transaction(struct seq_file *m, const char *prefix,
struct binder_transaction *t)
{
seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
t->to_proc ? t->to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
t->code, t->flags, t->priority, t->need_reply);
if (t->buffer == NULL) {
seq_puts(m, " buffer free\n");
return;
}
if (t->buffer->target_node)
seq_printf(m, " node %d",
t->buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
t->buffer->data_size, t->buffer->offsets_size,
t->buffer->data);
}
static void print_binder_buffer(struct seq_file *m, const char *prefix,
struct binder_buffer *buffer)
{
seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
buffer->transaction ? "active" : "delivered");
}
static void print_binder_work(struct seq_file *m, const char *prefix,
const char *transaction_prefix,
struct binder_work *w)
{
struct binder_node *node;
struct binder_transaction *t;
switch (w->type) {
case BINDER_WORK_TRANSACTION:
t = container_of(w, struct binder_transaction, work);
print_binder_transaction(m, transaction_prefix, t);
break;
case BINDER_WORK_TRANSACTION_COMPLETE:
seq_printf(m, "%stransaction complete\n", prefix);
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
prefix, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
seq_printf(m, "%shas dead binder\n", prefix);
break;
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
seq_printf(m, "%shas cleared dead binder\n", prefix);
break;
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
seq_printf(m, "%shas cleared death notification\n", prefix);
break;
default:
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
break;
}
}
static void print_binder_thread(struct seq_file *m,
struct binder_thread *thread,
int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
if (t->from == thread) {
print_binder_transaction(m,
" outgoing transaction", t);
t = t->from_parent;
} else if (t->to_thread == thread) {
print_binder_transaction(m,
" incoming transaction", t);
t = t->to_parent;
} else {
print_binder_transaction(m, " bad transaction", t);
t = NULL;
}
}
list_for_each_entry(w, &thread->todo, entry) {
print_binder_work(m, " ", " pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
static void print_binder_node(struct seq_file *m, struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
int count;
count = 0;
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count);
if (count) {
seq_puts(m, " proc");
hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
list_for_each_entry(w, &node->async_todo, entry)
print_binder_work(m, " ",
" pending async transaction", w);
}
static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
{
seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
ref->node->debug_id, ref->strong, ref->weak, ref->death);
}
static void print_binder_proc(struct seq_file *m,
struct binder_proc *proc, int print_all)
{
struct binder_work *w;
struct rb_node *n;
size_t start_pos = m->count;
size_t header_pos;
seq_printf(m, "proc %d\n", proc->pid);
header_pos = m->count;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
print_binder_thread(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
if (print_all || node->has_async_transaction)
print_binder_node(m, node);
}
if (print_all) {
for (n = rb_first(&proc->refs_by_desc);
n != NULL;
n = rb_next(n))
print_binder_ref(m, rb_entry(n, struct binder_ref,
rb_node_desc));
}
for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
print_binder_buffer(m, " buffer",
rb_entry(n, struct binder_buffer, rb_node));
list_for_each_entry(w, &proc->todo, entry)
print_binder_work(m, " ", " pending transaction", w);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
if (!print_all && m->count == header_pos)
m->count = start_pos;
}
static const char * const binder_return_strings[] = {
"BR_ERROR",
"BR_OK",
"BR_TRANSACTION",
"BR_REPLY",
"BR_ACQUIRE_RESULT",
"BR_DEAD_REPLY",
"BR_TRANSACTION_COMPLETE",
"BR_INCREFS",
"BR_ACQUIRE",
"BR_RELEASE",
"BR_DECREFS",
"BR_ATTEMPT_ACQUIRE",
"BR_NOOP",
"BR_SPAWN_LOOPER",
"BR_FINISHED",
"BR_DEAD_BINDER",
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
"BR_FAILED_REPLY"
};
static const char * const binder_command_strings[] = {
"BC_TRANSACTION",
"BC_REPLY",
"BC_ACQUIRE_RESULT",
"BC_FREE_BUFFER",
"BC_INCREFS",
"BC_ACQUIRE",
"BC_RELEASE",
"BC_DECREFS",
"BC_INCREFS_DONE",
"BC_ACQUIRE_DONE",
"BC_ATTEMPT_ACQUIRE",
"BC_REGISTER_LOOPER",
"BC_ENTER_LOOPER",
"BC_EXIT_LOOPER",
"BC_REQUEST_DEATH_NOTIFICATION",
"BC_CLEAR_DEATH_NOTIFICATION",
"BC_DEAD_BINDER_DONE"
};
static const char * const binder_objstat_strings[] = {
"proc",
"thread",
"node",
"ref",
"death",
"transaction",
"transaction_complete"
};
static void print_binder_stats(struct seq_file *m, const char *prefix,
struct binder_stats *stats)
{
int i;
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
ARRAY_SIZE(binder_command_strings));
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
if (stats->bc[i])
seq_printf(m, "%s%s: %d\n", prefix,
binder_command_strings[i], stats->bc[i]);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
ARRAY_SIZE(binder_return_strings));
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
if (stats->br[i])
seq_printf(m, "%s%s: %d\n", prefix,
binder_return_strings[i], stats->br[i]);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(binder_objstat_strings));
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(stats->obj_deleted));
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
if (stats->obj_created[i] || stats->obj_deleted[i])
seq_printf(m, "%s%s: active %d total %d\n", prefix,
binder_objstat_strings[i],
stats->obj_created[i] - stats->obj_deleted[i],
stats->obj_created[i]);
}
}
static void print_binder_proc_stats(struct seq_file *m,
struct binder_proc *proc)
{
struct binder_work *w;
struct rb_node *n;
int count, strong, weak;
seq_printf(m, "proc %d\n", proc->pid);
count = 0;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
seq_printf(m, " threads: %d\n", count);
seq_printf(m, " requested threads: %d+%d/%d\n"
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
proc->ready_threads, proc->free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
seq_printf(m, " nodes: %d\n", count);
count = 0;
strong = 0;
weak = 0;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
strong += ref->strong;
weak += ref->weak;
}
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
count = 0;
for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
seq_printf(m, " buffers: %d\n", count);
count = 0;
list_for_each_entry(w, &proc->todo, entry) {
switch (w->type) {
case BINDER_WORK_TRANSACTION:
count++;
break;
default:
break;
}
}
seq_printf(m, " pending transactions: %d\n", count);
print_binder_stats(m, " ", &proc->stats);
}
static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
seq_puts(m, "binder state:\n");
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
print_binder_node(m, node);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
if (do_lock)
binder_unlock(__func__);
return 0;
}
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
seq_puts(m, "binder stats:\n");
print_binder_stats(m, "", &binder_stats);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
if (do_lock)
binder_unlock(__func__);
return 0;
}
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
if (do_lock)
binder_unlock(__func__);
return 0;
}
static int binder_proc_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc = m->private;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, proc, 1);
if (do_lock)
binder_unlock(__func__);
return 0;
}
static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
seq_printf(m,
"%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->to_node,
e->target_handle, e->data_size, e->offsets_size);
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
int i;
if (log->full) {
for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
print_binder_transaction_log_entry(m, &log->entry[i]);
}
for (i = 0; i < log->next; i++)
print_binder_transaction_log_entry(m, &log->entry[i]);
return 0;
}
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
static struct miscdevice binder_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "binder",
.fops = &binder_fops
};
BINDER_DEBUG_ENTRY(state);
BINDER_DEBUG_ENTRY(stats);
BINDER_DEBUG_ENTRY(transactions);
BINDER_DEBUG_ENTRY(transaction_log);
static int __init binder_init(void)
{
int ret;
binder_deferred_workqueue = create_singlethread_workqueue("binder");
if (!binder_deferred_workqueue)
return -ENOMEM;
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
binder_debugfs_dir_entry_root);
ret = misc_register(&binder_miscdev);
if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
S_IRUGO,
binder_debugfs_dir_entry_root,
NULL,
&binder_state_fops);
debugfs_create_file("stats",
S_IRUGO,
binder_debugfs_dir_entry_root,
NULL,
&binder_stats_fops);
debugfs_create_file("transactions",
S_IRUGO,
binder_debugfs_dir_entry_root,
NULL,
&binder_transactions_fops);
debugfs_create_file("transaction_log",
S_IRUGO,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
&binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
S_IRUGO,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
&binder_transaction_log_fops);
}
return ret;
}
device_initcall(binder_init);
#define CREATE_TRACE_POINTS
#include "binder_trace.h"
MODULE_LICENSE("GPL v2");
/*
* Copyright (C) 2008 Google, Inc.
*
* Based on, but no longer compatible with, the original
* OpenBinder.org binder driver interface, which is:
*
* Copyright (c) 2005 Palmsource, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_BINDER_H
#define _UAPI_LINUX_BINDER_H
#include <linux/types.h>
#include <linux/ioctl.h>
#define B_PACK_CHARS(c1, c2, c3, c4) \
((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
#define B_TYPE_LARGE 0x85
enum {
BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
};
enum {
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
};
#ifdef BINDER_IPC_32BIT
typedef __u32 binder_size_t;
typedef __u32 binder_uintptr_t;
#else
typedef __u64 binder_size_t;
typedef __u64 binder_uintptr_t;
#endif
/*
* This is the flattened representation of a Binder object for transfer
* between processes. The 'offsets' supplied as part of a binder transaction
* contains offsets into the data where these structures occur. The Binder
* driver takes care of re-writing the structure type and data as it moves
* between processes.
*/
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
__u32 type;
__u32 flags;
/* 8 bytes of data. */
union {
binder_uintptr_t binder; /* local object */
__u32 handle; /* remote object */
};
/* extra data associated with local object */
binder_uintptr_t cookie;
};
/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses appropriately.
*/
struct binder_write_read {
binder_size_t write_size; /* bytes to write */
binder_size_t write_consumed; /* bytes consumed by driver */
binder_uintptr_t write_buffer;
binder_size_t read_size; /* bytes to read */
binder_size_t read_consumed; /* bytes consumed by driver */
binder_uintptr_t read_buffer;
};
/* Use with BINDER_VERSION, driver fills in fields. */
struct binder_version {
/* driver protocol version -- increment with incompatible change */
__s32 protocol_version;
};
/* This is the current protocol version. */
#ifdef BINDER_IPC_32BIT
#define BINDER_CURRENT_PROTOCOL_VERSION 7
#else
#define BINDER_CURRENT_PROTOCOL_VERSION 8
#endif
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
/*
* NOTE: Two special error codes you should check for when calling
* in to the driver are:
*
* EINTR -- The operation has been interupted. This should be
* handled by retrying the ioctl() until a different error code
* is returned.
*
* ECONNREFUSED -- The driver is no longer accepting operations
* from your process. That is, the process is being destroyed.
* You should handle this by exiting from your process. Note
* that once this error code is returned, all further calls to
* the driver from any thread will return this same code.
*/
enum transaction_flags {
TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
};
struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
/* target descriptor of command transaction */
__u32 handle;
/* target descriptor of return transaction */
binder_uintptr_t ptr;
} target;
binder_uintptr_t cookie; /* target object cookie */
__u32 code; /* transaction command */
/* General information about the transaction. */
__u32 flags;
pid_t sender_pid;
uid_t sender_euid;
binder_size_t data_size; /* number of bytes of data */
binder_size_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
binder_uintptr_t buffer;
/* offsets from buffer to flat_binder_object structs */
binder_uintptr_t offsets;
} ptr;
__u8 buf[8];
} data;
};
struct binder_ptr_cookie {
binder_uintptr_t ptr;
binder_uintptr_t cookie;
};
struct binder_handle_cookie {
__u32 handle;
binder_uintptr_t cookie;
} __packed;
struct binder_pri_desc {
__s32 priority;
__u32 desc;
};
struct binder_pri_ptr_cookie {
__s32 priority;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
};
enum binder_driver_return_protocol {
BR_ERROR = _IOR('r', 0, __s32),
/*
* int: error code
*/
BR_OK = _IO('r', 1),
/* No parameters! */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
* binder_transaction_data: the received command.
*/
BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
/*
* not currently supported
* int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
* Else the remote object has acquired a primary reference.
*/
BR_DEAD_REPLY = _IO('r', 5),
/*
* The target of the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
*/
BR_TRANSACTION_COMPLETE = _IO('r', 6),
/*
* No parameters... always refers to the last transaction requested
* (including replies). Note that this will be sent even for
* asynchronous transactions.
*/
BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
/*
* not currently supported
* int: priority
* void *: ptr to binder
* void *: cookie for binder
*/
BR_NOOP = _IO('r', 12),
/*
* No parameters. Do nothing and examine the next command. It exists
* primarily so that we can replace it with a BR_SPAWN_LOOPER command.
*/
BR_SPAWN_LOOPER = _IO('r', 13),
/*
* No parameters. The driver has determined that a process has no
* threads waiting to service incoming transactions. When a process
* receives this command, it must spawn a new service thread and
* register it via bcENTER_LOOPER.
*/
BR_FINISHED = _IO('r', 14),
/*
* not currently supported
* stop threadpool thread
*/
BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
/*
* void *: cookie
*/
BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
/*
* void *: cookie
*/
BR_FAILED_REPLY = _IO('r', 17),
/*
* The the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
*/
};
enum binder_driver_command_protocol {
BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
/*
* binder_transaction_data: the sent command.
*/
BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
/*
* not currently supported
* int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
* Else you have acquired a primary reference on the object.
*/
BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
/*
* void *: ptr to transaction data received on a read
*/
BC_INCREFS = _IOW('c', 4, __u32),
BC_ACQUIRE = _IOW('c', 5, __u32),
BC_RELEASE = _IOW('c', 6, __u32),
BC_DECREFS = _IOW('c', 7, __u32),
/*
* int: descriptor
*/
BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
/*
* not currently supported
* int: priority
* int: descriptor
*/
BC_REGISTER_LOOPER = _IO('c', 11),
/*
* No parameters.
* Register a spawned looper thread with the device.
*/
BC_ENTER_LOOPER = _IO('c', 12),
BC_EXIT_LOOPER = _IO('c', 13),
/*
* No parameters.
* These two commands are sent as an application-level thread
* enters and exits the binder loop, respectively. They are
* used so the binder can have an accurate count of the number
* of looping threads it has available.
*/
BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
struct binder_handle_cookie),
/*
* int: handle
* void *: cookie
*/
BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
struct binder_handle_cookie),
/*
* int: handle
* void *: cookie
*/
BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
/*
* void *: cookie
*/
};
#endif /* _UAPI_LINUX_BINDER_H */
/*
* Copyright (C) 2012 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM binder
#if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _BINDER_TRACE_H
#include <linux/tracepoint.h>
struct binder_buffer;
struct binder_node;
struct binder_proc;
struct binder_ref;
struct binder_thread;
struct binder_transaction;
TRACE_EVENT(binder_ioctl,
TP_PROTO(unsigned int cmd, unsigned long arg),
TP_ARGS(cmd, arg),
TP_STRUCT__entry(
__field(unsigned int, cmd)
__field(unsigned long, arg)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->arg = arg;
),
TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
);
DECLARE_EVENT_CLASS(binder_lock_class,
TP_PROTO(const char *tag),
TP_ARGS(tag),
TP_STRUCT__entry(
__field(const char *, tag)
),
TP_fast_assign(
__entry->tag = tag;
),
TP_printk("tag=%s", __entry->tag)
);
#define DEFINE_BINDER_LOCK_EVENT(name) \
DEFINE_EVENT(binder_lock_class, name, \
TP_PROTO(const char *func), \
TP_ARGS(func))
DEFINE_BINDER_LOCK_EVENT(binder_lock);
DEFINE_BINDER_LOCK_EVENT(binder_locked);
DEFINE_BINDER_LOCK_EVENT(binder_unlock);
DECLARE_EVENT_CLASS(binder_function_return_class,
TP_PROTO(int ret),
TP_ARGS(ret),
TP_STRUCT__entry(
__field(int, ret)
),
TP_fast_assign(
__entry->ret = ret;
),
TP_printk("ret=%d", __entry->ret)
);
#define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name) \
DEFINE_EVENT(binder_function_return_class, name, \
TP_PROTO(int ret), \
TP_ARGS(ret))
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
TRACE_EVENT(binder_wait_for_work,
TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
TP_ARGS(proc_work, transaction_stack, thread_todo),
TP_STRUCT__entry(
__field(bool, proc_work)
__field(bool, transaction_stack)
__field(bool, thread_todo)
),
TP_fast_assign(
__entry->proc_work = proc_work;
__entry->transaction_stack = transaction_stack;
__entry->thread_todo = thread_todo;
),
TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d",
__entry->proc_work, __entry->transaction_stack,
__entry->thread_todo)
);
TRACE_EVENT(binder_transaction,
TP_PROTO(bool reply, struct binder_transaction *t,
struct binder_node *target_node),
TP_ARGS(reply, t, target_node),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, target_node)
__field(int, to_proc)
__field(int, to_thread)
__field(int, reply)
__field(unsigned int, code)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->target_node = target_node ? target_node->debug_id : 0;
__entry->to_proc = t->to_proc->pid;
__entry->to_thread = t->to_thread ? t->to_thread->pid : 0;
__entry->reply = reply;
__entry->code = t->code;
__entry->flags = t->flags;
),
TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x",
__entry->debug_id, __entry->target_node,
__entry->to_proc, __entry->to_thread,
__entry->reply, __entry->flags, __entry->code)
);
TRACE_EVENT(binder_transaction_received,
TP_PROTO(struct binder_transaction *t),
TP_ARGS(t),
TP_STRUCT__entry(
__field(int, debug_id)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
),
TP_printk("transaction=%d", __entry->debug_id)
);
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
struct binder_ref *ref),
TP_ARGS(t, node, ref),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, node_debug_id)
__field(binder_uintptr_t, node_ptr)
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
(u64)__entry->node_ptr,
__entry->ref_debug_id, __entry->ref_desc)
);
TRACE_EVENT(binder_transaction_ref_to_node,
TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
TP_ARGS(t, ref),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
__field(int, node_debug_id)
__field(binder_uintptr_t, node_ptr)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
__entry->node_debug_id = ref->node->debug_id;
__entry->node_ptr = ref->node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
__entry->ref_debug_id, __entry->ref_desc,
(u64)__entry->node_ptr)
);
TRACE_EVENT(binder_transaction_ref_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
struct binder_ref *dest_ref),
TP_ARGS(t, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, node_debug_id)
__field(int, src_ref_debug_id)
__field(uint32_t, src_ref_desc)
__field(int, dest_ref_debug_id)
__field(uint32_t, dest_ref_desc)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->node_debug_id = src_ref->node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
__entry->dest_ref_desc = dest_ref->desc;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
__entry->src_ref_debug_id, __entry->src_ref_desc,
__entry->dest_ref_debug_id, __entry->dest_ref_desc)
);
TRACE_EVENT(binder_transaction_fd,
TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd),
TP_ARGS(t, src_fd, dest_fd),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, src_fd)
__field(int, dest_fd)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->src_fd = src_fd;
__entry->dest_fd = dest_fd;
),
TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d",
__entry->debug_id, __entry->src_fd, __entry->dest_fd)
);
DECLARE_EVENT_CLASS(binder_buffer_class,
TP_PROTO(struct binder_buffer *buf),
TP_ARGS(buf),
TP_STRUCT__entry(
__field(int, debug_id)
__field(size_t, data_size)
__field(size_t, offsets_size)
),
TP_fast_assign(
__entry->debug_id = buf->debug_id;
__entry->data_size = buf->data_size;
__entry->offsets_size = buf->offsets_size;
),
TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
__entry->debug_id, __entry->data_size, __entry->offsets_size)
);
DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
TP_PROTO(struct binder_buffer *buffer),
TP_ARGS(buffer));
DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release,
TP_PROTO(struct binder_buffer *buffer),
TP_ARGS(buffer));
DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_PROTO(struct binder_buffer *buffer),
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
TP_PROTO(struct binder_proc *proc, bool allocate,
void *start, void *end),
TP_ARGS(proc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
__field(size_t, offset)
__field(size_t, size)
),
TP_fast_assign(
__entry->proc = proc->pid;
__entry->allocate = allocate;
__entry->offset = start - proc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
__entry->proc, __entry->allocate,
__entry->offset, __entry->size)
);
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field(uint32_t, cmd)
),
TP_fast_assign(
__entry->cmd = cmd;
),
TP_printk("cmd=0x%x %s",
__entry->cmd,
_IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ?
binder_command_strings[_IOC_NR(__entry->cmd)] :
"unknown")
);
TRACE_EVENT(binder_return,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field(uint32_t, cmd)
),
TP_fast_assign(
__entry->cmd = cmd;
),
TP_printk("cmd=0x%x %s",
__entry->cmd,
_IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ?
binder_return_strings[_IOC_NR(__entry->cmd)] :
"unknown")
);
#endif /* _BINDER_TRACE_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE binder_trace
#include <trace/define_trace.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/atomic.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "deps.h"
static struct vm_struct *(*get_vm_area_ptr)(unsigned long, unsigned long) = GET_VM_AREA;
static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long, struct zap_details *) = ZAP_PAGE_RANGE;
static int (*map_kernel_range_noflush_ptr)(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) = MAP_KERNEL_RANGE_NOFLUSH;
static void (*unmap_kernel_range_ptr)(unsigned long, unsigned long) = UNMAP_KERNEL_RANGE;
static struct files_struct *(*get_files_struct_ptr)(struct task_struct *) = GET_FILES_STRUCT;
static void (*put_files_struct_ptr)(struct files_struct *) = PUT_FILES_STRUCT;
static struct sighand_struct *(*__lock_task_sighand_ptr)(struct task_struct *, unsigned long *) = __LOCK_TASK_SIGHAND;
static int (*__alloc_fd_ptr)(struct files_struct *files, unsigned start, unsigned end, unsigned flags) = __ALLOC_FD;
static void (*__fd_install_ptr)(struct files_struct *files, unsigned int fd, struct file *file) = __FD_INSTALL;
static int (*__close_fd_ptr)(struct files_struct *files, unsigned int fd) = __CLOSE_FD;
static int (*can_nice_ptr)(const struct task_struct *, const int) = CAN_NICE;
static int (*security_binder_set_context_mgr_ptr)(struct task_struct *mgr) = SECURITY_BINDER_SET_CONTEXT_MGR;
static int (*security_binder_transaction_ptr)(struct task_struct *from, struct task_struct *to) = SECURITY_BINDER_TRANSACTION;
static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, struct task_struct *to) = SECURITY_BINDER_TRANSFER_BINDER;
static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = SECURITY_BINDER_TRANSFER_FILE;
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
return get_vm_area_ptr(size, flags);
}
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details)
{
zap_page_range_ptr(vma, address, size, details);
}
int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages)
{
return map_kernel_range_noflush_ptr(start, size, prot, pages);
}
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
unmap_kernel_range_ptr(addr, size);
}
struct files_struct *get_files_struct(struct task_struct *task)
{
return get_files_struct_ptr(task);
}
void put_files_struct(struct files_struct *files)
{
put_files_struct_ptr(files);
}
struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
{
return __lock_task_sighand_ptr(tsk, flags);
}
int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags)
{
return __alloc_fd_ptr(files, start, end, flags);
}
void __fd_install(struct files_struct *files, unsigned int fd, struct file *file)
{
__fd_install_ptr(files, fd, file);
}
int __close_fd(struct files_struct *files, unsigned int fd)
{
return __close_fd_ptr(files, fd);
}
int can_nice(const struct task_struct *p, const int nice)
{
return can_nice_ptr(p, nice);
}
int security_binder_set_context_mgr(struct task_struct *mgr)
{
return security_binder_set_context_mgr_ptr(mgr);
}
int security_binder_transaction(struct task_struct *from, struct task_struct *to)
{
return security_binder_transaction_ptr(from, to);
}
int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
{
return security_binder_transfer_binder_ptr(from, to);
}
int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
{
return security_binder_transfer_file_ptr(from, to, file);
}
#!/bin/sh
SYMS="get_vm_area zap_page_range map_kernel_range_noflush unmap_kernel_range "\
"get_files_struct put_files_struct __lock_task_sighand "\
"__alloc_fd __fd_install __close_fd can_nice "\
"security_binder_set_context_mgr security_binder_transaction "\
"security_binder_transfer_binder security_binder_transfer_file"
for sym in $SYMS; do
addr=`cat /proc/kallsyms | grep -Ee '^[0-9a-f]+ T '$sym'$' | sed -e 's/\s.*$//g'`
if [ a$addr = 'a' ]; then
echo "Error: can't find symbol $sym"
exit 1
fi
name=`echo $sym | tr '[:lower:]' '[:upper:]'`
echo "#define $name\t(void *)0x$addr"
done
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册