From 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 20:50:03 +0200 Subject: Adding upstream version 6.8.9. Signed-off-by: Daniel Baumann --- drivers/android/binder.c | 31 +- drivers/android/binder_alloc.c | 841 ++++++++++++++++---------------- drivers/android/binder_alloc.h | 61 ++- drivers/android/binder_alloc_selftest.c | 18 +- drivers/android/binder_trace.h | 2 +- drivers/android/binderfs.c | 1 - 6 files changed, 486 insertions(+), 468 deletions(-) (limited to 'drivers/android') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 8460458ebe..d6f14c8e20 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc, size_t object_size = 0; read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); - if (offset > buffer->data_size || read_size < sizeof(*hdr)) + if (offset > buffer->data_size || read_size < sizeof(*hdr) || + !IS_ALIGNED(offset, sizeof(u32))) return 0; + if (u) { if (copy_from_user(object, u + offset, read_size)) return 0; @@ -1931,7 +1933,7 @@ static void binder_deferred_fd_close(int fd) if (!twcb) return; init_task_work(&twcb->twork, binder_do_fd_close); - twcb->file = close_fd_get_file(fd); + twcb->file = file_close_fd(fd); if (twcb->file) { // pin it until binder_do_fd_close(); see comments there get_file(twcb->file); @@ -2087,9 +2089,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, * Convert the address to an offset relative to * the base of the transaction buffer. */ - fda_offset = - (parent->buffer - (uintptr_t)buffer->user_data) + - fda->parent_offset; + fda_offset = parent->buffer - buffer->user_data + + fda->parent_offset; for (fd_index = 0; fd_index < fda->num_fds; fd_index++) { u32 fd; @@ -2607,7 +2608,7 @@ static int binder_translate_fd_array(struct list_head *pf_head, * Convert the address to an offset relative to * the base of the transaction buffer. */ - fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + + fda_offset = parent->buffer - t->buffer->user_data + fda->parent_offset; sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + fda->parent_offset; @@ -2682,8 +2683,9 @@ static int binder_fixup_parent(struct list_head *pf_head, proc->pid, thread->pid); return -EINVAL; } - buffer_offset = bp->parent_offset + - (uintptr_t)parent->buffer - (uintptr_t)b->user_data; + + buffer_offset = bp->parent_offset + parent->buffer - b->user_data; + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); } @@ -3235,7 +3237,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, - !reply && (t->flags & TF_ONE_WAY), current->tgid); + !reply && (t->flags & TF_ONE_WAY)); if (IS_ERR(t->buffer)) { char *s; @@ -3260,7 +3262,7 @@ static void binder_transaction(struct binder_proc *proc, ALIGN(extra_buffers_size, sizeof(void *)) - ALIGN(secctx_sz, sizeof(u64)); - t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; + t->security_ctx = t->buffer->user_data + buf_offset; err = binder_alloc_copy_to_buffer(&target_proc->alloc, t->buffer, buf_offset, secctx, secctx_sz); @@ -3537,8 +3539,7 @@ static void binder_transaction(struct binder_proc *proc, goto err_translate_failed; } /* Fixup buffer pointer to target proc address space */ - bp->buffer = (uintptr_t) - t->buffer->user_data + sg_buf_offset; + bp->buffer = t->buffer->user_data + sg_buf_offset; sg_buf_offset += ALIGN(bp->length, sizeof(u64)); num_valid = (buffer_offset - off_start_offset) / @@ -4708,7 +4709,7 @@ retry: } trd->data_size = t->buffer->data_size; trd->offsets_size = t->buffer->offsets_size; - trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; + trd->data.ptr.buffer = t->buffer->user_data; trd->data.ptr.offsets = trd->data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); @@ -5991,9 +5992,9 @@ static void print_binder_transaction_ilocked(struct seq_file *m, } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %pK\n", + seq_printf(m, " size %zd:%zd offset %lx\n", buffer->data_size, buffer->offsets_size, - buffer->user_data); + proc->alloc.buffer - buffer->user_data); } static void print_binder_work_ilocked(struct seq_file *m, diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index a56cbfd9ba..e0e4dc38b6 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -26,7 +26,7 @@ #include "binder_alloc.h" #include "binder_trace.h" -struct list_lru binder_alloc_lru; +struct list_lru binder_freelist; static DEFINE_MUTEX(binder_alloc_mmap_lock); @@ -125,23 +125,20 @@ static void binder_insert_allocated_buffer_locked( static struct binder_buffer *binder_alloc_prepare_to_free_locked( struct binder_alloc *alloc, - uintptr_t user_ptr) + unsigned long user_ptr) { struct rb_node *n = alloc->allocated_buffers.rb_node; struct binder_buffer *buffer; - void __user *uptr; - - uptr = (void __user *)user_ptr; while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (uptr < buffer->user_data) + if (user_ptr < buffer->user_data) { n = n->rb_left; - else if (uptr > buffer->user_data) + } else if (user_ptr > buffer->user_data) { n = n->rb_right; - else { + } else { /* * Guard against user threads attempting to * free the buffer when in use by kernel or @@ -168,145 +165,168 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( * Return: Pointer to buffer or NULL */ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, - uintptr_t user_ptr) + unsigned long user_ptr) { struct binder_buffer *buffer; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); return buffer; } -static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void __user *start, void __user *end) +static inline void +binder_set_installed_page(struct binder_lru_page *lru_page, + struct page *page) +{ + /* Pairs with acquire in binder_get_installed_page() */ + smp_store_release(&lru_page->page_ptr, page); +} + +static inline struct page * +binder_get_installed_page(struct binder_lru_page *lru_page) +{ + /* Pairs with release in binder_set_installed_page() */ + return smp_load_acquire(&lru_page->page_ptr); +} + +static void binder_lru_freelist_add(struct binder_alloc *alloc, + unsigned long start, unsigned long end) { - void __user *page_addr; - unsigned long user_page_addr; struct binder_lru_page *page; - struct vm_area_struct *vma = NULL; - struct mm_struct *mm = NULL; - bool need_mm = false; + unsigned long page_addr; - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: %s pages %pK-%pK\n", alloc->pid, - allocate ? "allocate" : "free", start, end); + trace_binder_update_page_range(alloc, false, start, end); - if (end <= start) - return 0; + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + size_t index; + int ret; - trace_binder_update_page_range(alloc, allocate, start, end); + index = (page_addr - alloc->buffer) / PAGE_SIZE; + page = &alloc->pages[index]; - if (allocate == 0) - goto free_range; + if (!binder_get_installed_page(page)) + continue; - for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { - page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; - if (!page->page_ptr) { - need_mm = true; - break; - } + trace_binder_free_lru_start(alloc, index); + + ret = list_lru_add_obj(&binder_freelist, &page->lru); + WARN_ON(!ret); + + trace_binder_free_lru_end(alloc, index); } +} + +static int binder_install_single_page(struct binder_alloc *alloc, + struct binder_lru_page *lru_page, + unsigned long addr) +{ + struct page *page; + int ret = 0; - if (need_mm && mmget_not_zero(alloc->mm)) - mm = alloc->mm; + if (!mmget_not_zero(alloc->mm)) + return -ESRCH; - if (mm) { - mmap_write_lock(mm); - vma = alloc->vma; + /* + * Protected with mmap_sem in write mode as multiple tasks + * might race to install the same page. + */ + mmap_write_lock(alloc->mm); + if (binder_get_installed_page(lru_page)) + goto out; + + if (!alloc->vma) { + pr_err("%d: %s failed, no vma\n", alloc->pid, __func__); + ret = -ESRCH; + goto out; } - if (!vma && need_mm) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - alloc->pid); - goto err_no_vma; + page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); + if (!page) { + pr_err("%d: failed to allocate page\n", alloc->pid); + ret = -ENOMEM; + goto out; } - for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + ret = vm_insert_page(alloc->vma, addr, page); + if (ret) { + pr_err("%d: %s failed to insert page at offset %lx with %d\n", + alloc->pid, __func__, addr - alloc->buffer, ret); + __free_page(page); + ret = -ENOMEM; + goto out; + } + + /* Mark page installation complete and safe to use */ + binder_set_installed_page(lru_page, page); +out: + mmap_write_unlock(alloc->mm); + mmput_async(alloc->mm); + return ret; +} + +static int binder_install_buffer_pages(struct binder_alloc *alloc, + struct binder_buffer *buffer, + size_t size) +{ + struct binder_lru_page *page; + unsigned long start, final; + unsigned long page_addr; + + start = buffer->user_data & PAGE_MASK; + final = PAGE_ALIGN(buffer->user_data + size); + + for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) { + unsigned long index; int ret; - bool on_lru; - size_t index; index = (page_addr - alloc->buffer) / PAGE_SIZE; page = &alloc->pages[index]; - if (page->page_ptr) { - trace_binder_alloc_lru_start(alloc, index); - - on_lru = list_lru_del(&binder_alloc_lru, &page->lru); - WARN_ON(!on_lru); - - trace_binder_alloc_lru_end(alloc, index); + if (binder_get_installed_page(page)) continue; - } - - if (WARN_ON(!vma)) - goto err_page_ptr_cleared; trace_binder_alloc_page_start(alloc, index); - page->page_ptr = alloc_page(GFP_KERNEL | - __GFP_HIGHMEM | - __GFP_ZERO); - if (!page->page_ptr) { - pr_err("%d: binder_alloc_buf failed for page at %pK\n", - alloc->pid, page_addr); - goto err_alloc_page_failed; - } - page->alloc = alloc; - INIT_LIST_HEAD(&page->lru); - - user_page_addr = (uintptr_t)page_addr; - ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); - if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", - alloc->pid, user_page_addr); - goto err_vm_insert_page_failed; - } - if (index + 1 > alloc->pages_high) - alloc->pages_high = index + 1; + ret = binder_install_single_page(alloc, page, page_addr); + if (ret) + return ret; trace_binder_alloc_page_end(alloc, index); } - if (mm) { - mmap_write_unlock(mm); - mmput_async(mm); - } + return 0; +} -free_range: - for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) { - bool ret; - size_t index; +/* The range of pages should exclude those shared with other buffers */ +static void binder_lru_freelist_del(struct binder_alloc *alloc, + unsigned long start, unsigned long end) +{ + struct binder_lru_page *page; + unsigned long page_addr; + + trace_binder_update_page_range(alloc, true, start, end); + + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + unsigned long index; + bool on_lru; index = (page_addr - alloc->buffer) / PAGE_SIZE; page = &alloc->pages[index]; - trace_binder_free_lru_start(alloc, index); + if (page->page_ptr) { + trace_binder_alloc_lru_start(alloc, index); - ret = list_lru_add(&binder_alloc_lru, &page->lru); - WARN_ON(!ret); + on_lru = list_lru_del_obj(&binder_freelist, &page->lru); + WARN_ON(!on_lru); - trace_binder_free_lru_end(alloc, index); - if (page_addr == start) - break; - continue; - -err_vm_insert_page_failed: - __free_page(page->page_ptr); - page->page_ptr = NULL; -err_alloc_page_failed: -err_page_ptr_cleared: - if (page_addr == start) - break; - } -err_no_vma: - if (mm) { - mmap_write_unlock(mm); - mmput_async(mm); + trace_binder_alloc_lru_end(alloc, index); + continue; + } + + if (index + 1 > alloc->pages_high) + alloc->pages_high = index + 1; } - return vma ? -ENOMEM : -ESRCH; } static inline void binder_alloc_set_vma(struct binder_alloc *alloc, @@ -323,7 +343,44 @@ static inline struct vm_area_struct *binder_alloc_get_vma( return smp_load_acquire(&alloc->vma); } -static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) +static void debug_no_space_locked(struct binder_alloc *alloc) +{ + size_t largest_alloc_size = 0; + struct binder_buffer *buffer; + size_t allocated_buffers = 0; + size_t largest_free_size = 0; + size_t total_alloc_size = 0; + size_t total_free_size = 0; + size_t free_buffers = 0; + size_t buffer_size; + struct rb_node *n; + + for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + allocated_buffers++; + total_alloc_size += buffer_size; + if (buffer_size > largest_alloc_size) + largest_alloc_size = buffer_size; + } + + for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + free_buffers++; + total_free_size += buffer_size; + if (buffer_size > largest_free_size) + largest_free_size = buffer_size; + } + + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", + total_alloc_size, allocated_buffers, + largest_alloc_size, total_free_size, + free_buffers, largest_free_size); +} + +static bool debug_low_async_space_locked(struct binder_alloc *alloc) { /* * Find the amount and size of buffers allocated by the current caller; @@ -332,10 +389,20 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) * and at some point we'll catch them in the act. This is more efficient * than keeping a map per pid. */ - struct rb_node *n; struct binder_buffer *buffer; size_t total_alloc_size = 0; + int pid = current->tgid; size_t num_buffers = 0; + struct rb_node *n; + + /* + * Only start detecting spammers once we have less than 20% of async + * space left (which is less than 10% of total buffer size). + */ + if (alloc->free_async_space >= alloc->buffer_size / 10) { + alloc->oneway_spam_detected = false; + return false; + } for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) { @@ -365,56 +432,26 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) return false; } +/* Callers preallocate @new_buffer, it is freed by this function if unused */ static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async, - int pid) + struct binder_buffer *new_buffer, + size_t size, + int is_async) { struct rb_node *n = alloc->free_buffers.rb_node; + struct rb_node *best_fit = NULL; struct binder_buffer *buffer; + unsigned long next_used_page; + unsigned long curr_last_page; size_t buffer_size; - struct rb_node *best_fit = NULL; - void __user *has_page_addr; - void __user *end_page_addr; - size_t size, data_offsets_size; - int ret; - - /* Check binder_alloc is fully initialized */ - if (!binder_alloc_get_vma(alloc)) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf, no vma\n", - alloc->pid); - return ERR_PTR(-ESRCH); - } - - data_offsets_size = ALIGN(data_size, sizeof(void *)) + - ALIGN(offsets_size, sizeof(void *)); - - if (data_offsets_size < data_size || data_offsets_size < offsets_size) { - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: got transaction with invalid size %zd-%zd\n", - alloc->pid, data_size, offsets_size); - return ERR_PTR(-EINVAL); - } - size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); - if (size < data_offsets_size || size < extra_buffers_size) { - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: got transaction with invalid extra_buffers_size %zd\n", - alloc->pid, extra_buffers_size); - return ERR_PTR(-EINVAL); - } - - /* Pad 0-size buffers so they get assigned unique addresses */ - size = max(size, sizeof(void *)); if (is_async && alloc->free_async_space < size) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n", alloc->pid, size); - return ERR_PTR(-ENOSPC); + buffer = ERR_PTR(-ENOSPC); + goto out; } while (n) { @@ -425,121 +462,92 @@ static struct binder_buffer *binder_alloc_new_buf_locked( if (size < buffer_size) { best_fit = n; n = n->rb_left; - } else if (size > buffer_size) + } else if (size > buffer_size) { n = n->rb_right; - else { + } else { best_fit = n; break; } } - if (best_fit == NULL) { - size_t allocated_buffers = 0; - size_t largest_alloc_size = 0; - size_t total_alloc_size = 0; - size_t free_buffers = 0; - size_t largest_free_size = 0; - size_t total_free_size = 0; - - for (n = rb_first(&alloc->allocated_buffers); n != NULL; - n = rb_next(n)) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - buffer_size = binder_alloc_buffer_size(alloc, buffer); - allocated_buffers++; - total_alloc_size += buffer_size; - if (buffer_size > largest_alloc_size) - largest_alloc_size = buffer_size; - } - for (n = rb_first(&alloc->free_buffers); n != NULL; - n = rb_next(n)) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - buffer_size = binder_alloc_buffer_size(alloc, buffer); - free_buffers++; - total_free_size += buffer_size; - if (buffer_size > largest_free_size) - largest_free_size = buffer_size; - } + + if (unlikely(!best_fit)) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf size %zd failed, no address space\n", alloc->pid, size); - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", - total_alloc_size, allocated_buffers, - largest_alloc_size, total_free_size, - free_buffers, largest_free_size); - return ERR_PTR(-ENOSPC); + debug_no_space_locked(alloc); + buffer = ERR_PTR(-ENOSPC); + goto out; } - if (n == NULL) { + + if (buffer_size != size) { + /* Found an oversized buffer and needs to be split */ buffer = rb_entry(best_fit, struct binder_buffer, rb_node); buffer_size = binder_alloc_buffer_size(alloc, buffer); + + WARN_ON(n || buffer_size == size); + new_buffer->user_data = buffer->user_data + size; + list_add(&new_buffer->entry, &buffer->entry); + new_buffer->free = 1; + binder_insert_free_buffer(alloc, new_buffer); + new_buffer = NULL; } binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", alloc->pid, size, buffer, buffer_size); - has_page_addr = (void __user *) - (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); - WARN_ON(n && buffer_size != size); - end_page_addr = - (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); - if (end_page_addr > has_page_addr) - end_page_addr = has_page_addr; - ret = binder_update_page_range(alloc, 1, (void __user *) - PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); - if (ret) - return ERR_PTR(ret); - - if (buffer_size != size) { - struct binder_buffer *new_buffer; - - new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!new_buffer) { - pr_err("%s: %d failed to alloc new buffer struct\n", - __func__, alloc->pid); - goto err_alloc_buf_struct_failed; - } - new_buffer->user_data = (u8 __user *)buffer->user_data + size; - list_add(&new_buffer->entry, &buffer->entry); - new_buffer->free = 1; - binder_insert_free_buffer(alloc, new_buffer); - } + /* + * Now we remove the pages from the freelist. A clever calculation + * with buffer_size determines if the last page is shared with an + * adjacent in-use buffer. In such case, the page has been already + * removed from the freelist so we trim our range short. + */ + next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK; + curr_last_page = PAGE_ALIGN(buffer->user_data + size); + binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), + min(next_used_page, curr_last_page)); - rb_erase(best_fit, &alloc->free_buffers); + rb_erase(&buffer->rb_node, &alloc->free_buffers); buffer->free = 0; buffer->allow_user_free = 0; binder_insert_allocated_buffer_locked(alloc, buffer); - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got %pK\n", - alloc->pid, size, buffer); - buffer->data_size = data_size; - buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; - buffer->extra_buffers_size = extra_buffers_size; - buffer->pid = pid; buffer->oneway_spam_suspect = false; if (is_async) { alloc->free_async_space -= size; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); - if (alloc->free_async_space < alloc->buffer_size / 10) { - /* - * Start detecting spammers once we have less than 20% - * of async space left (which is less than 10% of total - * buffer size). - */ - buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); - } else { - alloc->oneway_spam_detected = false; - } + if (debug_low_async_space_locked(alloc)) + buffer->oneway_spam_suspect = true; } + +out: + /* Discard possibly unused new_buffer */ + kfree(new_buffer); return buffer; +} -err_alloc_buf_struct_failed: - binder_update_page_range(alloc, 0, (void __user *) - PAGE_ALIGN((uintptr_t)buffer->user_data), - end_page_addr); - return ERR_PTR(-ENOMEM); +/* Calculate the sanitized total size, returns 0 for invalid request */ +static inline size_t sanitized_size(size_t data_size, + size_t offsets_size, + size_t extra_buffers_size) +{ + size_t total, tmp; + + /* Align to pointer size and check for overflows */ + tmp = ALIGN(data_size, sizeof(void *)) + + ALIGN(offsets_size, sizeof(void *)); + if (tmp < data_size || tmp < offsets_size) + return 0; + total = tmp + ALIGN(extra_buffers_size, sizeof(void *)); + if (total < tmp || total < extra_buffers_size) + return 0; + + /* Pad 0-sized buffers so they get a unique address */ + total = max(total, sizeof(void *)); + + return total; } /** @@ -549,7 +557,6 @@ err_alloc_buf_struct_failed: * @offsets_size: user specified buffer offset * @extra_buffers_size: size of extra space for meta-data (eg, security context) * @is_async: buffer for async transaction - * @pid: pid to attribute allocation to (used for debugging) * * Allocate a new buffer given the requested sizes. Returns * the kernel version of the buffer pointer. The size allocated @@ -562,74 +569,89 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async, - int pid) + int is_async) { - struct binder_buffer *buffer; + struct binder_buffer *buffer, *next; + size_t size; + int ret; + + /* Check binder_alloc is fully initialized */ + if (!binder_alloc_get_vma(alloc)) { + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: binder_alloc_buf, no vma\n", + alloc->pid); + return ERR_PTR(-ESRCH); + } + + size = sanitized_size(data_size, offsets_size, extra_buffers_size); + if (unlikely(!size)) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid size %zd-%zd-%zd\n", + alloc->pid, data_size, offsets_size, + extra_buffers_size); + return ERR_PTR(-EINVAL); + } + + /* Preallocate the next buffer */ + next = kzalloc(sizeof(*next), GFP_KERNEL); + if (!next) + return ERR_PTR(-ENOMEM); + + spin_lock(&alloc->lock); + buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); + if (IS_ERR(buffer)) { + spin_unlock(&alloc->lock); + goto out; + } + + buffer->data_size = data_size; + buffer->offsets_size = offsets_size; + buffer->extra_buffers_size = extra_buffers_size; + buffer->pid = current->tgid; + spin_unlock(&alloc->lock); - mutex_lock(&alloc->mutex); - buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, - extra_buffers_size, is_async, pid); - mutex_unlock(&alloc->mutex); + ret = binder_install_buffer_pages(alloc, buffer, size); + if (ret) { + binder_alloc_free_buf(alloc, buffer); + buffer = ERR_PTR(ret); + } +out: return buffer; } -static void __user *buffer_start_page(struct binder_buffer *buffer) +static unsigned long buffer_start_page(struct binder_buffer *buffer) { - return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); + return buffer->user_data & PAGE_MASK; } -static void __user *prev_buffer_end_page(struct binder_buffer *buffer) +static unsigned long prev_buffer_end_page(struct binder_buffer *buffer) { - return (void __user *) - (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); + return (buffer->user_data - 1) & PAGE_MASK; } static void binder_delete_free_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer) { - struct binder_buffer *prev, *next = NULL; - bool to_free = true; + struct binder_buffer *prev, *next; + + if (PAGE_ALIGNED(buffer->user_data)) + goto skip_freelist; BUG_ON(alloc->buffers.next == &buffer->entry); prev = binder_buffer_prev(buffer); BUG_ON(!prev->free); - if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { - to_free = false; - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %pK share page with %pK\n", - alloc->pid, buffer->user_data, - prev->user_data); - } + if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) + goto skip_freelist; if (!list_is_last(&buffer->entry, &alloc->buffers)) { next = binder_buffer_next(buffer); - if (buffer_start_page(next) == buffer_start_page(buffer)) { - to_free = false; - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %pK share page with %pK\n", - alloc->pid, - buffer->user_data, - next->user_data); - } - } - - if (PAGE_ALIGNED(buffer->user_data)) { - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer start %pK is page aligned\n", - alloc->pid, buffer->user_data); - to_free = false; + if (buffer_start_page(next) == buffer_start_page(buffer)) + goto skip_freelist; } - if (to_free) { - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %pK do not share page with %pK or %pK\n", - alloc->pid, buffer->user_data, - prev->user_data, - next ? next->user_data : NULL); - binder_update_page_range(alloc, 0, buffer_start_page(buffer), - buffer_start_page(buffer) + PAGE_SIZE); - } + binder_lru_freelist_add(alloc, buffer_start_page(buffer), + buffer_start_page(buffer) + PAGE_SIZE); +skip_freelist: list_del(&buffer->entry); kfree(buffer); } @@ -662,10 +684,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, alloc->pid, size, alloc->free_async_space); } - binder_update_page_range(alloc, 0, - (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), - (void __user *)(((uintptr_t) - buffer->user_data + buffer_size) & PAGE_MASK)); + binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), + (buffer->user_data + buffer_size) & PAGE_MASK); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -689,8 +709,68 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_insert_free_buffer(alloc, buffer); } +/** + * binder_alloc_get_page() - get kernel pointer for given buffer offset + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @buffer_offset: offset into @buffer data + * @pgoffp: address to copy final page offset to + * + * Lookup the struct page corresponding to the address + * at @buffer_offset into @buffer->user_data. If @pgoffp is not + * NULL, the byte-offset into the page is written there. + * + * The caller is responsible to ensure that the offset points + * to a valid address within the @buffer and that @buffer is + * not freeable by the user. Since it can't be freed, we are + * guaranteed that the corresponding elements of @alloc->pages[] + * cannot change. + * + * Return: struct page + */ +static struct page *binder_alloc_get_page(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + pgoff_t *pgoffp) +{ + binder_size_t buffer_space_offset = buffer_offset + + (buffer->user_data - alloc->buffer); + pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; + size_t index = buffer_space_offset >> PAGE_SHIFT; + struct binder_lru_page *lru_page; + + lru_page = &alloc->pages[index]; + *pgoffp = pgoff; + return lru_page->page_ptr; +} + +/** + * binder_alloc_clear_buf() - zero out buffer + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be cleared + * + * memset the given buffer to 0 + */ static void binder_alloc_clear_buf(struct binder_alloc *alloc, - struct binder_buffer *buffer); + struct binder_buffer *buffer) +{ + size_t bytes = binder_alloc_buffer_size(alloc, buffer); + binder_size_t buffer_offset = 0; + + while (bytes) { + unsigned long size; + struct page *page; + pgoff_t pgoff; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + memset_page(page, pgoff, 0, size); + bytes -= size; + buffer_offset += size; + } +} + /** * binder_alloc_free_buf() - free a binder buffer * @alloc: binder_alloc for this proc @@ -705,17 +785,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc, * We could eliminate the call to binder_alloc_clear_buf() * from binder_alloc_deferred_release() by moving this to * binder_free_buf_locked(). However, that could - * increase contention for the alloc mutex if clear_on_free - * is used frequently for large buffers. The mutex is not + * increase contention for the alloc->lock if clear_on_free + * is used frequently for large buffers. This lock is not * needed for correctness here. */ if (buffer->clear_on_free) { binder_alloc_clear_buf(alloc, buffer); buffer->clear_on_free = false; } - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); binder_free_buf_locked(alloc, buffer); - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); } /** @@ -734,9 +814,9 @@ void binder_alloc_free_buf(struct binder_alloc *alloc, int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct vm_area_struct *vma) { - int ret; - const char *failure_string; struct binder_buffer *buffer; + const char *failure_string; + int ret, i; if (unlikely(vma->vm_mm != alloc->mm)) { ret = -EINVAL; @@ -754,7 +834,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, SZ_4M); mutex_unlock(&binder_alloc_mmap_lock); - alloc->buffer = (void __user *)vma->vm_start; + alloc->buffer = vma->vm_start; alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, sizeof(alloc->pages[0]), @@ -765,6 +845,11 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_alloc_pages_failed; } + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + alloc->pages[i].alloc = alloc; + INIT_LIST_HEAD(&alloc->pages[i].lru); + } + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { ret = -ENOMEM; @@ -787,7 +872,7 @@ err_alloc_buf_struct_failed: kfree(alloc->pages); alloc->pages = NULL; err_alloc_pages_failed: - alloc->buffer = NULL; + alloc->buffer = 0; mutex_lock(&binder_alloc_mmap_lock); alloc->buffer_size = 0; err_already_mapped: @@ -808,7 +893,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) struct binder_buffer *buffer; buffers = 0; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); BUG_ON(alloc->vma); while ((n = rb_first(&alloc->allocated_buffers))) { @@ -840,25 +925,25 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int i; for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - void __user *page_addr; + unsigned long page_addr; bool on_lru; if (!alloc->pages[i].page_ptr) continue; - on_lru = list_lru_del(&binder_alloc_lru, - &alloc->pages[i].lru); + on_lru = list_lru_del_obj(&binder_freelist, + &alloc->pages[i].lru); page_addr = alloc->buffer + i * PAGE_SIZE; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%s: %d: page %d at %pK %s\n", - __func__, alloc->pid, i, page_addr, + "%s: %d: page %d %s\n", + __func__, alloc->pid, i, on_lru ? "on lru" : "active"); __free_page(alloc->pages[i].page_ptr); page_count++; } kfree(alloc->pages); } - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); if (alloc->mm) mmdrop(alloc->mm); @@ -867,16 +952,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) __func__, alloc->pid, buffers, page_count); } -static void print_binder_buffer(struct seq_file *m, const char *prefix, - struct binder_buffer *buffer) -{ - seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", - prefix, buffer->debug_id, buffer->user_data, - buffer->data_size, buffer->offsets_size, - buffer->extra_buffers_size, - buffer->transaction ? "active" : "delivered"); -} - /** * binder_alloc_print_allocated() - print buffer info * @m: seq_file for output via seq_printf() @@ -888,13 +963,20 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix, void binder_alloc_print_allocated(struct seq_file *m, struct binder_alloc *alloc) { + struct binder_buffer *buffer; struct rb_node *n; - mutex_lock(&alloc->mutex); - for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) - print_binder_buffer(m, " buffer", - rb_entry(n, struct binder_buffer, rb_node)); - mutex_unlock(&alloc->mutex); + spin_lock(&alloc->lock); + for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n", + buffer->debug_id, + buffer->user_data - alloc->buffer, + buffer->data_size, buffer->offsets_size, + buffer->extra_buffers_size, + buffer->transaction ? "active" : "delivered"); + } + spin_unlock(&alloc->lock); } /** @@ -911,7 +993,7 @@ void binder_alloc_print_pages(struct seq_file *m, int lru = 0; int free = 0; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); /* * Make sure the binder_alloc is fully initialized, otherwise we might * read inconsistent state. @@ -927,7 +1009,7 @@ void binder_alloc_print_pages(struct seq_file *m, lru++; } } - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); } @@ -943,10 +1025,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) struct rb_node *n; int count = 0; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) count++; - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); return count; } @@ -979,35 +1061,39 @@ enum lru_status binder_alloc_free_page(struct list_head *item, void *cb_arg) __must_hold(lock) { - struct mm_struct *mm = NULL; - struct binder_lru_page *page = container_of(item, - struct binder_lru_page, - lru); - struct binder_alloc *alloc; - uintptr_t page_addr; - size_t index; + struct binder_lru_page *page = container_of(item, typeof(*page), lru); + struct binder_alloc *alloc = page->alloc; + struct mm_struct *mm = alloc->mm; struct vm_area_struct *vma; + struct page *page_to_free; + unsigned long page_addr; + size_t index; - alloc = page->alloc; - if (!mutex_trylock(&alloc->mutex)) - goto err_get_alloc_mutex_failed; - + if (!mmget_not_zero(mm)) + goto err_mmget; + if (!mmap_read_trylock(mm)) + goto err_mmap_read_lock_failed; + if (!spin_trylock(&alloc->lock)) + goto err_get_alloc_lock_failed; if (!page->page_ptr) goto err_page_already_freed; index = page - alloc->pages; - page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; + page_addr = alloc->buffer + index * PAGE_SIZE; - mm = alloc->mm; - if (!mmget_not_zero(mm)) - goto err_mmget; - if (!mmap_read_trylock(mm)) - goto err_mmap_read_lock_failed; vma = vma_lookup(mm, page_addr); if (vma && vma != binder_alloc_get_vma(alloc)) goto err_invalid_vma; + trace_binder_unmap_kernel_start(alloc, index); + + page_to_free = page->page_ptr; + page->page_ptr = NULL; + + trace_binder_unmap_kernel_end(alloc, index); + list_lru_isolate(lru, item); + spin_unlock(&alloc->lock); spin_unlock(lock); if (vma) { @@ -1017,41 +1103,35 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } + mmap_read_unlock(mm); mmput_async(mm); - - trace_binder_unmap_kernel_start(alloc, index); - - __free_page(page->page_ptr); - page->page_ptr = NULL; - - trace_binder_unmap_kernel_end(alloc, index); + __free_page(page_to_free); spin_lock(lock); - mutex_unlock(&alloc->mutex); return LRU_REMOVED_RETRY; err_invalid_vma: +err_page_already_freed: + spin_unlock(&alloc->lock); +err_get_alloc_lock_failed: mmap_read_unlock(mm); err_mmap_read_lock_failed: mmput_async(mm); err_mmget: -err_page_already_freed: - mutex_unlock(&alloc->mutex); -err_get_alloc_mutex_failed: return LRU_SKIP; } static unsigned long binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { - return list_lru_count(&binder_alloc_lru); + return list_lru_count(&binder_freelist); } static unsigned long binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, + return list_lru_walk(&binder_freelist, binder_alloc_free_page, NULL, sc->nr_to_scan); } @@ -1069,7 +1149,7 @@ void binder_alloc_init(struct binder_alloc *alloc) alloc->pid = current->group_leader->pid; alloc->mm = current->mm; mmgrab(alloc->mm); - mutex_init(&alloc->mutex); + spin_lock_init(&alloc->lock); INIT_LIST_HEAD(&alloc->buffers); } @@ -1077,13 +1157,13 @@ int binder_alloc_shrinker_init(void) { int ret; - ret = list_lru_init(&binder_alloc_lru); + ret = list_lru_init(&binder_freelist); if (ret) return ret; binder_shrinker = shrinker_alloc(0, "android-binder"); if (!binder_shrinker) { - list_lru_destroy(&binder_alloc_lru); + list_lru_destroy(&binder_freelist); return -ENOMEM; } @@ -1098,7 +1178,7 @@ int binder_alloc_shrinker_init(void) void binder_alloc_shrinker_exit(void) { shrinker_free(binder_shrinker); - list_lru_destroy(&binder_alloc_lru); + list_lru_destroy(&binder_freelist); } /** @@ -1133,68 +1213,6 @@ static inline bool check_buffer(struct binder_alloc *alloc, (!buffer->allow_user_free || !buffer->transaction); } -/** - * binder_alloc_get_page() - get kernel pointer for given buffer offset - * @alloc: binder_alloc for this proc - * @buffer: binder buffer to be accessed - * @buffer_offset: offset into @buffer data - * @pgoffp: address to copy final page offset to - * - * Lookup the struct page corresponding to the address - * at @buffer_offset into @buffer->user_data. If @pgoffp is not - * NULL, the byte-offset into the page is written there. - * - * The caller is responsible to ensure that the offset points - * to a valid address within the @buffer and that @buffer is - * not freeable by the user. Since it can't be freed, we are - * guaranteed that the corresponding elements of @alloc->pages[] - * cannot change. - * - * Return: struct page - */ -static struct page *binder_alloc_get_page(struct binder_alloc *alloc, - struct binder_buffer *buffer, - binder_size_t buffer_offset, - pgoff_t *pgoffp) -{ - binder_size_t buffer_space_offset = buffer_offset + - (buffer->user_data - alloc->buffer); - pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; - size_t index = buffer_space_offset >> PAGE_SHIFT; - struct binder_lru_page *lru_page; - - lru_page = &alloc->pages[index]; - *pgoffp = pgoff; - return lru_page->page_ptr; -} - -/** - * binder_alloc_clear_buf() - zero out buffer - * @alloc: binder_alloc for this proc - * @buffer: binder buffer to be cleared - * - * memset the given buffer to 0 - */ -static void binder_alloc_clear_buf(struct binder_alloc *alloc, - struct binder_buffer *buffer) -{ - size_t bytes = binder_alloc_buffer_size(alloc, buffer); - binder_size_t buffer_offset = 0; - - while (bytes) { - unsigned long size; - struct page *page; - pgoff_t pgoff; - - page = binder_alloc_get_page(alloc, buffer, - buffer_offset, &pgoff); - size = min_t(size_t, bytes, PAGE_SIZE - pgoff); - memset_page(page, pgoff, 0, size); - bytes -= size; - buffer_offset += size; - } -} - /** * binder_alloc_copy_user_to_buffer() - copy src user to tgt user * @alloc: binder_alloc for this proc @@ -1289,4 +1307,3 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, dest, bytes); } - diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index dc1e2b01dd..7038723447 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -9,13 +9,13 @@ #include #include #include -#include +#include #include #include #include #include -extern struct list_lru binder_alloc_lru; +extern struct list_lru binder_freelist; struct binder_transaction; /** @@ -49,21 +49,19 @@ struct binder_buffer { unsigned async_transaction:1; unsigned oneway_spam_suspect:1; unsigned debug_id:27; - struct binder_transaction *transaction; - struct binder_node *target_node; size_t data_size; size_t offsets_size; size_t extra_buffers_size; - void __user *user_data; - int pid; + unsigned long user_data; + int pid; }; /** * struct binder_lru_page - page object used for binder shrinker * @page_ptr: pointer to physical page in mmap'd space - * @lru: entry in binder_alloc_lru + * @lru: entry in binder_freelist * @alloc: binder_alloc for a proc */ struct binder_lru_page { @@ -74,7 +72,7 @@ struct binder_lru_page { /** * struct binder_alloc - per-binder proc state for binder allocator - * @mutex: protects binder_alloc fields + * @lock: protects binder_alloc fields * @vma: vm_area_struct passed to mmap_handler * (invariant after mmap) * @mm: copy of task->mm (invariant after open) @@ -98,10 +96,10 @@ struct binder_lru_page { * struct binder_buffer objects used to track the user buffers */ struct binder_alloc { - struct mutex mutex; + spinlock_t lock; struct vm_area_struct *vma; struct mm_struct *mm; - void __user *buffer; + unsigned long buffer; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; @@ -121,27 +119,26 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, spinlock_t *lock, void *cb_arg); -extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async, - int pid); -extern void binder_alloc_init(struct binder_alloc *alloc); -extern int binder_alloc_shrinker_init(void); -extern void binder_alloc_shrinker_exit(void); -extern void binder_alloc_vma_close(struct binder_alloc *alloc); -extern struct binder_buffer * +struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async); +void binder_alloc_init(struct binder_alloc *alloc); +int binder_alloc_shrinker_init(void); +void binder_alloc_shrinker_exit(void); +void binder_alloc_vma_close(struct binder_alloc *alloc); +struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, - uintptr_t user_ptr); -extern void binder_alloc_free_buf(struct binder_alloc *alloc, - struct binder_buffer *buffer); -extern int binder_alloc_mmap_handler(struct binder_alloc *alloc, - struct vm_area_struct *vma); -extern void binder_alloc_deferred_release(struct binder_alloc *alloc); -extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc); -extern void binder_alloc_print_allocated(struct seq_file *m, - struct binder_alloc *alloc); + unsigned long user_ptr); +void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); +int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma); +void binder_alloc_deferred_release(struct binder_alloc *alloc); +int binder_alloc_get_allocated_count(struct binder_alloc *alloc); +void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc); void binder_alloc_print_pages(struct seq_file *m, struct binder_alloc *alloc); @@ -156,9 +153,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc) { size_t free_async_space; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); free_async_space = alloc->free_async_space; - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); return free_async_space; } diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index c2b323bc3b..81442fe20a 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -72,6 +72,10 @@ enum buf_end_align_type { * buf1 ]|[ buf2 | buf2 | buf2 ][ ... */ NEXT_NEXT_UNALIGNED, + /** + * @LOOP_END: The number of enum values in &buf_end_align_type. + * It is used for controlling loop termination. + */ LOOP_END, }; @@ -93,11 +97,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc, struct binder_buffer *buffer, size_t size) { - void __user *page_addr; - void __user *end; + unsigned long page_addr; + unsigned long end; int page_index; - end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); + end = PAGE_ALIGN(buffer->user_data + size); page_addr = buffer->user_data; for (; page_addr < end; page_addr += PAGE_SIZE) { page_index = (page_addr - alloc->buffer) / PAGE_SIZE; @@ -119,7 +123,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc, int i; for (i = 0; i < BUFFER_NUM; i++) { - buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0); + buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); if (IS_ERR(buffers[i]) || !check_buffer_pages_allocated(alloc, buffers[i], sizes[i])) { @@ -158,8 +162,8 @@ static void binder_selftest_free_page(struct binder_alloc *alloc) int i; unsigned long count; - while ((count = list_lru_count(&binder_alloc_lru))) { - list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, + while ((count = list_lru_count(&binder_freelist))) { + list_lru_walk(&binder_freelist, binder_alloc_free_page, NULL, count); } @@ -183,7 +187,7 @@ static void binder_selftest_alloc_free(struct binder_alloc *alloc, /* Allocate from lru. */ binder_selftest_alloc_buf(alloc, buffers, sizes, seq); - if (list_lru_count(&binder_alloc_lru)) + if (list_lru_count(&binder_freelist)) pr_err("lru list should be empty but is not\n"); binder_selftest_free_buf(alloc, buffers, sizes, seq, end); diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 8cc07e6a42..fe38c6fc65 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -317,7 +317,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_update_buffer_release, TRACE_EVENT(binder_update_page_range, TP_PROTO(struct binder_alloc *alloc, bool allocate, - void __user *start, void __user *end), + unsigned long start, unsigned long end), TP_ARGS(alloc, allocate, start, end), TP_STRUCT__entry( __field(int, proc) diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 1224ab7aa0..3001d754ac 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3