struct kernel_param *kp)
{
int ret;
+
ret = param_set_int(val, kp);
if (binder_stop_on_user_error < 2)
wake_up(&binder_user_error_wait);
struct binder_transaction_log *log)
{
struct binder_transaction_log_entry *e;
+
e = &log->entry[log->next];
memset(e, 0, sizeof(*e));
log->next++;
static void binder_set_nice(long nice)
{
long min_nice;
+
if (can_nice(current, nice)) {
set_user_nice(current, nice);
return;
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
struct page **page_array_ptr;
+
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
BUG_ON(*page);
binder_insert_allocated_buffer(proc, buffer);
if (buffer_size != size) {
struct binder_buffer *new_buffer = (void *)buffer->data + size;
+
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(proc, new_buffer);
if (!list_is_last(&buffer->entry, &proc->buffers)) {
struct binder_buffer *next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
+
if (next->free) {
rb_erase(&next->rb_node, &proc->free_buffers);
binder_delete_free_buffer(proc, next);
if (proc->buffers.next != &buffer->entry) {
struct binder_buffer *prev = list_entry(buffer->entry.prev,
struct binder_buffer, entry);
+
if (prev->free) {
binder_delete_free_buffer(proc, buffer);
rb_erase(&prev->rb_node, &proc->free_buffers);
struct list_head *target_list)
{
int ret;
+
if (strong) {
if (ref->strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
ref->strong--;
if (ref->strong == 0) {
int ret;
+
ret = binder_dec_node(ref->node, strong, 1);
if (ret)
return ret;
uint32_t error_code)
{
struct binder_thread *target_thread;
+
BUG_ON(t->flags & TF_ONE_WAY);
while (1) {
target_thread = t->from;
off_end = (void *)offp + buffer->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
+
if (*offp > buffer->data_size - sizeof(*fp) ||
buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_node *node = binder_get_node(proc, fp->binder);
+
if (node == NULL) {
pr_err("transaction release %d bad node %016llx\n",
debug_id, (u64)fp->binder);
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
debug_id, fp->handle);
} else {
if (tr->target.handle) {
struct binder_ref *ref;
+
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
}
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
+
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
+
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
+
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid,
(u64)ref->node->ptr);
} else {
struct binder_ref *new_ref;
+
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
{
struct binder_transaction_log_entry *fe;
+
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
}
struct binder_work *w;
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
+
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+
if (tmp_death->cookie == cookie) {
death = tmp_death;
break;
const char *cmd_name;
int strong = node->internal_strong_refs || node->local_strong_refs;
int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+
if (weak && !node->has_weak_ref) {
cmd = BR_INCREFS;
cmd_name = "BR_INCREFS";
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
+
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
+
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
static void binder_release_work(struct list_head *list)
{
struct binder_work *w;
+
while (!list_empty(list)) {
w = list_first_entry(list, struct binder_work, entry);
list_del_init(&w->entry);
switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
+
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
static void binder_vma_open(struct vm_area_struct *vma)
{
struct binder_proc *proc = vma->vm_private_data;
+
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
proc->pid, vma->vm_start, vma->vm_end,
static void binder_vma_close(struct vm_area_struct *vma)
{
struct binder_proc *proc = vma->vm_private_data;
+
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
proc->pid, vma->vm_start, vma->vm_end,
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
+
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
{
struct rb_node *n;
int wake_count = 0;
+
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+
thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
wake_up_interruptible(&thread->wait);
static int binder_release(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc = filp->private_data;
+
debugfs_remove(proc->debugfs_entry);
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
struct files_struct *files;
int defer;
+
do {
binder_lock(__func__);
mutex_lock(&binder_deferred_lock);
while (n) {
struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+
if (buffer < entry->buffer)
n = n->rb_left;
else if (buffer > entry->buffer)
{
int serial = -1;
struct rb_node *node;
+
for (node = rb_first(root); node; node = rb_next(node)) {
struct ion_client *client = rb_entry(node, struct ion_client,
node);
+
if (strcmp(client->name, name))
continue;
serial = max(serial, client->display_serial);
static void ion_dma_buf_release(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
+
ion_buffer_put(buffer);
}
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
+
return buffer->vaddr + offset * PAGE_SIZE;
}
case ION_IOC_IMPORT:
{
struct ion_handle *handle;
+
handle = ion_import_dma_buf(client, data.fd.fd);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
struct ion_client *client = rb_entry(n, struct ion_client,
node);
size_t size = ion_debug_heap_total(client, heap->id);
+
if (!size)
continue;
if (client->task) {
if (!debug_file) {
char buf[256], *path;
+
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap debugfs at %s/%s\n",
path, heap->name);
&debug_shrink_fops);
if (!debug_file) {
char buf[256], *path;
+
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
path, debug_name);
if (data->heaps[i].base == 0) {
phys_addr_t paddr;
+
paddr = memblock_alloc_base(data->heaps[i].size,
data->heaps[i].align,
MEMBLOCK_ALLOC_ANYWHERE);
list_for_each_safe(pos, n, &fence->pt_list_head) {
struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+
sync_timeline_remove_pt(pt);
}
}
list_for_each_safe(pos, n, &fence->pt_list_head) {
struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+
sync_pt_free(pt);
}
}
unsigned long arg)
{
struct sync_fence *fence = file->private_data;
+
switch (cmd) {
case SYNC_IOC_WAIT:
return sync_fence_ioctl_wait(fence, arg);
static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
{
int status = pt->status;
+
seq_printf(s, " %s%spt %s",
fence ? pt->parent->name : "",
fence ? "_" : "",
sync_status_str(status));
if (pt->status) {
struct timeval tv = ktime_to_timeval(pt->timestamp);
+
seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
}
if (pt->parent->ops->timeline_value_str &&
pt->parent->ops->pt_value_str) {
char value[64];
+
pt->parent->ops->pt_value_str(pt, value, sizeof(value));
seq_printf(s, ": %s", value);
if (fence) {
if (obj->ops->timeline_value_str) {
char value[64];
+
obj->ops->timeline_value_str(obj, value, sizeof(value));
seq_printf(s, ": %s", value);
} else if (obj->ops->print_obj) {
for (i = 0; i < s.count; i += DUMP_CHUNK) {
if ((s.count - i) > DUMP_CHUNK) {
char c = s.buf[i + DUMP_CHUNK];
+
s.buf[i + DUMP_CHUNK] = 0;
pr_cont("%s", s.buf + i);
s.buf[i + DUMP_CHUNK] = c;