summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarlos Llamas <cmllamas@google.com>2023-12-01 17:21:48 +0000
committerCarlos Llamas <cmllamas@google.com>2024-01-10 00:06:29 +0000
commitbceb3d703c9f76e7ce466d1e189a050cfe0cf03c (patch)
tree76afbea686a1fa872968f69ec24b5e6a027f829b
parent12e9ee6fd1917d5331e8b508c177f0cfdd8e3d19 (diff)
downloadcommon-bceb3d703c9f76e7ce466d1e189a050cfe0cf03c.tar.gz
BACKPORT: FROMGIT: binder: perform page installation outside of locks
Split out the insertion of pages to be outside of the alloc->mutex in a separate binder_install_buffer_pages() routine. Since this is no longer serialized, we must look at the full range of pages used by the buffers. The installation is protected with mmap_sem in write mode since multiple tasks might race to install the same page. Besides avoiding unnecessary nested locking this helps in preparation of switching the alloc->mutex into a spinlock_t in subsequent patches. Signed-off-by: Carlos Llamas <cmllamas@google.com> Reviewed-by: Alice Ryhl <aliceryhl@google.com> Link: https://lore.kernel.org/r/20231201172212.1813387-20-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Bug: 254650075 (cherry picked from commit 37ebbb4f73a0d299fa0c7dd043932a2f5fbbb779 https://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git char-misc-next) Change-Id: I7b0684310b8824194d7e4a51a1fd67944f8ec06a [cmllamas: fixed conflicts due to missing e66b77e50522] Signed-off-by: Carlos Llamas <cmllamas@google.com>
-rw-r--r--drivers/android/binder_alloc.c101
1 files changed, 73 insertions, 28 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index b280e8875833..f1be5b974dfb 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -176,6 +176,21 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
return buffer;
}
+static inline void
+binder_set_installed_page(struct binder_lru_page *lru_page,
+ struct page *page)
+{
+ /* Pairs with acquire in binder_get_installed_page() */
+ smp_store_release(&lru_page->page_ptr, page);
+}
+
+static inline struct page *
+binder_get_installed_page(struct binder_lru_page *lru_page)
+{
+ /* Pairs with release in binder_set_installed_page() */
+ return smp_load_acquire(&lru_page->page_ptr);
+}
+
static void binder_free_page_range(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
@@ -191,6 +206,9 @@ static void binder_free_page_range(struct binder_alloc *alloc,
index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index];
+ if (!binder_get_installed_page(page))
+ continue;
+
trace_binder_free_lru_start(alloc, index);
ret = list_lru_add(&binder_alloc_lru, &page->lru);
@@ -210,7 +228,14 @@ static int binder_install_single_page(struct binder_alloc *alloc,
if (!mmget_not_zero(alloc->vma_vm_mm))
return -ESRCH;
+ /*
+ * Protected with mmap_sem in write mode as multiple tasks
+ * might race to install the same page.
+ */
mmap_write_lock(alloc->vma_vm_mm);
+ if (binder_get_installed_page(lru_page))
+ goto out;
+
if (!alloc->vma) {
pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
ret = -ESRCH;
@@ -233,15 +258,50 @@ static int binder_install_single_page(struct binder_alloc *alloc,
goto out;
}
- lru_page->page_ptr = page;
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(lru_page, page);
out:
mmap_write_unlock(alloc->vma_vm_mm);
mmput_async(alloc->vma_vm_mm);
return ret;
}
-static int binder_allocate_page_range(struct binder_alloc *alloc,
- unsigned long start, unsigned long end)
+static int binder_install_buffer_pages(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ struct binder_lru_page *page;
+ unsigned long start, final;
+ unsigned long page_addr;
+
+ start = buffer->user_data & PAGE_MASK;
+ final = PAGE_ALIGN(buffer->user_data + size);
+
+ for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
+ unsigned long index;
+ int ret;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ if (binder_get_installed_page(page))
+ continue;
+
+ trace_binder_alloc_page_start(alloc, index);
+
+ ret = binder_install_single_page(alloc, page, page_addr);
+ if (ret)
+ return ret;
+
+ trace_binder_alloc_page_end(alloc, index);
+ }
+
+ return 0;
+}
+
+/* The range of pages should exclude those shared with other buffers */
+static void binder_allocate_page_range(struct binder_alloc *alloc,
+ unsigned long start, unsigned long end)
{
struct binder_lru_page *page;
unsigned long page_addr;
@@ -250,15 +310,11 @@ static int binder_allocate_page_range(struct binder_alloc *alloc,
"%d: allocate pages %lx-%lx\n",
alloc->pid, start, end);
- if (end <= start)
- return 0;
-
trace_binder_update_page_range(alloc, true, start, end);
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
unsigned long index;
bool on_lru;
- int ret;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index];
@@ -273,21 +329,9 @@ static int binder_allocate_page_range(struct binder_alloc *alloc,
continue;
}
- trace_binder_alloc_page_start(alloc, index);
-
- ret = binder_install_single_page(alloc, page, page_addr);
- if (ret) {
- binder_free_page_range(alloc, start, page_addr);
- return ret;
- }
-
if (index + 1 > alloc->pages_high)
alloc->pages_high = index + 1;
-
- trace_binder_alloc_page_end(alloc, index);
}
-
- return 0;
}
@@ -421,7 +465,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
unsigned long has_page_addr;
unsigned long end_page_addr;
size_t buffer_size;
- int ret;
trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async);
@@ -467,18 +510,14 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
alloc->pid, size, buffer, buffer_size);
- has_page_addr = (buffer->user_data + buffer_size) & PAGE_MASK;
WARN_ON(n && buffer_size != size);
+
+ has_page_addr = (buffer->user_data + buffer_size) & PAGE_MASK;
end_page_addr = PAGE_ALIGN(buffer->user_data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
- ret = binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
- end_page_addr);
- if (ret) {
- buffer = ERR_PTR(ret);
- goto out;
- }
-
+ binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
+ end_page_addr);
if (buffer_size != size) {
new_buffer->user_data = buffer->user_data + size;
list_add(&new_buffer->entry, &buffer->entry);
@@ -556,6 +595,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
{
struct binder_buffer *buffer, *next;
size_t size;
+ int ret;
/* Check binder_alloc is fully initialized */
if (!binder_alloc_get_vma(alloc)) {
@@ -592,6 +632,11 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->pid = current->tgid;
mutex_unlock(&alloc->mutex);
+ ret = binder_install_buffer_pages(alloc, buffer, size);
+ if (ret) {
+ binder_alloc_free_buf(alloc, buffer);
+ buffer = ERR_PTR(ret);
+ }
out:
return buffer;
}