diff options
author | Joel Fernandes <joelaf@google.com> | 2017-06-05 18:05:54 -0700 |
---|---|---|
committer | Todd Kjos <tkjos@google.com> | 2018-02-20 13:30:52 -0800 |
commit | 319f8923b1eec17859ee28762913d795b2308b4c (patch) | |
tree | 6aa2cec1b01e95d4e18a8d9ab4a5d4b8c6daee12 | |
parent | 204a81605d2675be663aa259d41b1a913f1818a6 (diff) | |
download | common-319f8923b1eec17859ee28762913d795b2308b4c.tar.gz |
ANDROID: binder: always allocate/map first BINDER_MIN_ALLOC pages
Certain usecases like camera are constantly allocating and freeing
binder buffers beyond the first 4k resulting in mmap_sem contention.
If we expand the allocated range from 4k to something higher, we can
reduce the contention. Tests show that 6 pages is enough to cause very
little update_page_range operations and reduces contention.
Bug: 36727951
Change-Id: I28bc3fb9b33c764c257e28487712fce2a3c1078b
Reported-by: Tim Murray <timmurray@google.com>
Signed-off-by: Joel Fernandes <joelaf@google.com>
Pre-allocate 1 instead of 6 pages as in the original patch,
as we use this pre-allocated page to prevent the first page
from getting unpinned after removing the buffer headers,
rather than pinning pages to speedup larger transactions.
Change-Id: Id027adcfd61b2d6b37f69a3f6009a068e90e84f0
Signed-off-by: Sherry Yang <sherryy@android.com>
-rw-r--r-- | drivers/android/binder_alloc.c | 30 |
1 files changed, 27 insertions, 3 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 0a541fee6784..f0f17799b5af 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -30,6 +30,8 @@ #include "binder_alloc.h" #include "binder_trace.h" +#define BINDER_MIN_ALLOC (1 * PAGE_SIZE) + static DEFINE_MUTEX(binder_alloc_mmap_lock); enum { @@ -182,9 +184,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, return buffer; } -static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void *start, void *end, - struct vm_area_struct *vma) +static int __binder_update_page_range(struct binder_alloc *alloc, int allocate, + void *start, void *end, + struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; @@ -284,6 +286,20 @@ err_no_vma: return vma ? -ENOMEM : -ESRCH; } +static int binder_update_page_range(struct binder_alloc *alloc, int allocate, + void *start, void *end, + struct vm_area_struct *vma) +{ + /* + * For regular updates, move up start if needed since MIN_ALLOC pages + * are always mapped + */ + if (start - alloc->buffer < BINDER_MIN_ALLOC) + start = alloc->buffer + BINDER_MIN_ALLOC; + + return __binder_update_page_range(alloc, allocate, start, end, vma); +} + struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, @@ -667,6 +683,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_alloc_buf_struct_failed; } + if (__binder_update_page_range(alloc, 1, alloc->buffer, + alloc->buffer + BINDER_MIN_ALLOC, vma)) { + ret = -ENOMEM; + failure_string = "alloc small buf"; + goto err_alloc_small_buf_failed; + } buffer->data = alloc->buffer; list_add(&buffer->entry, &alloc->buffers); buffer->free = 1; @@ -678,6 +700,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, return 0; +err_alloc_small_buf_failed: + kfree(buffer); err_alloc_buf_struct_failed: kfree(alloc->pages); alloc->pages = NULL; |