summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Hong <rurumihong@google.com>2021-09-27 21:30:38 +0800
committerAlex Hong <rurumihong@google.com>2021-09-28 16:26:28 +0800
commitd9336e7bc532fd6e18d3897e0d5cc18036ccf1e1 (patch)
tree3076b4ed92d230c2e54973b16e1a9279205e0e13
parent6a00c026fbe10d86439979cde115451c87b8b311 (diff)
parentd6841c566826a448ad9629b48d6898acd80fd2a8 (diff)
downloadgpu-d9336e7bc532fd6e18d3897e0d5cc18036ccf1e1.tar.gz
Merge android12-gs-pixel-5.10-sc-v2 into android13-gs-pixel-5.10-gs101
Change-Id: Ib3e2c923bbba2dc61aa0e9e77d76bbc416a092e6 Signed-off-by: Alex Hong <rurumihong@google.com>
-rw-r--r--mali_kbase/mali_kbase_defs.h4
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu.c49
2 files changed, 26 insertions, 27 deletions
diff --git a/mali_kbase/mali_kbase_defs.h b/mali_kbase/mali_kbase_defs.h
index 164dded..dbdd9ad 100644
--- a/mali_kbase/mali_kbase_defs.h
+++ b/mali_kbase/mali_kbase_defs.h
@@ -47,7 +47,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/sizes.h>
-
+#include <linux/rtmutex.h>
#if defined(CONFIG_SYNC)
#include <sync.h>
@@ -289,7 +289,7 @@ struct kbase_fault {
*/
struct kbase_mmu_table {
u64 *mmu_teardown_pages;
- struct mutex mmu_lock;
+ struct rt_mutex mmu_lock;
phys_addr_t pgd;
u8 group_id;
struct kbase_context *kctx;
diff --git a/mali_kbase/mmu/mali_kbase_mmu.c b/mali_kbase/mmu/mali_kbase_mmu.c
index e3c5b15..d296956 100644
--- a/mali_kbase/mmu/mali_kbase_mmu.c
+++ b/mali_kbase/mmu/mali_kbase_mmu.c
@@ -1226,7 +1226,7 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
if (nr == 0)
return 0;
- mutex_lock(&kctx->mmu.mmu_lock);
+ rt_mutex_lock(&kctx->mmu.mmu_lock);
while (remain) {
unsigned int i;
@@ -1252,7 +1252,7 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
/* Fill the memory pool with enough pages for
* the page walk to succeed
*/
- mutex_unlock(&kctx->mmu.mmu_lock);
+ rt_mutex_unlock(&kctx->mmu.mmu_lock);
err = kbase_mem_pool_grow(
#ifdef CONFIG_MALI_2MB_ALLOC
&kbdev->mem_pools.large[
@@ -1261,7 +1261,7 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
#endif
kctx->mmu.group_id],
MIDGARD_MMU_BOTTOMLEVEL);
- mutex_lock(&kctx->mmu.mmu_lock);
+ rt_mutex_lock(&kctx->mmu.mmu_lock);
} while (!err);
if (err) {
dev_warn(kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
@@ -1319,12 +1319,12 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
recover_required = true;
recover_count += count;
}
- mutex_unlock(&kctx->mmu.mmu_lock);
+ rt_mutex_unlock(&kctx->mmu.mmu_lock);
kbase_mmu_flush_invalidate(kctx, start_vpfn, nr, false);
return 0;
fail_unlock:
- mutex_unlock(&kctx->mmu.mmu_lock);
+ rt_mutex_unlock(&kctx->mmu.mmu_lock);
kbase_mmu_flush_invalidate(kctx, start_vpfn, nr, false);
return err;
}
@@ -1391,7 +1391,7 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
if (nr == 0)
return 0;
- mutex_lock(&mmut->mmu_lock);
+ rt_mutex_lock(&mmut->mmu_lock);
while (remain) {
unsigned int i;
@@ -1423,7 +1423,7 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
/* Fill the memory pool with enough pages for
* the page walk to succeed
*/
- mutex_unlock(&mmut->mmu_lock);
+ rt_mutex_unlock(&mmut->mmu_lock);
err = kbase_mem_pool_grow(
#ifdef CONFIG_MALI_2MB_ALLOC
&kbdev->mem_pools.large[mmut->group_id],
@@ -1431,7 +1431,7 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
&kbdev->mem_pools.small[mmut->group_id],
#endif
cur_level);
- mutex_lock(&mmut->mmu_lock);
+ rt_mutex_lock(&mmut->mmu_lock);
} while (!err);
if (err) {
@@ -1504,7 +1504,7 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
err = 0;
fail_unlock:
- mutex_unlock(&mmut->mmu_lock);
+ rt_mutex_unlock(&mmut->mmu_lock);
return err;
}
@@ -1746,7 +1746,7 @@ int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
return 0;
}
- mutex_lock(&mmut->mmu_lock);
+ rt_mutex_lock(&mmut->mmu_lock);
mmu_mode = kbdev->mmu_mode;
@@ -1844,7 +1844,7 @@ next:
}
err = 0;
out:
- mutex_unlock(&mmut->mmu_lock);
+ rt_mutex_unlock(&mmut->mmu_lock);
if (mmut->kctx)
kbase_mmu_flush_invalidate(mmut->kctx, start_vpfn, requested_nr,
@@ -1894,7 +1894,7 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
if (nr == 0)
return 0;
- mutex_lock(&kctx->mmu.mmu_lock);
+ rt_mutex_lock(&kctx->mmu.mmu_lock);
kbdev = kctx->kbdev;
@@ -1915,7 +1915,7 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
/* Fill the memory pool with enough pages for
* the page walk to succeed
*/
- mutex_unlock(&kctx->mmu.mmu_lock);
+ rt_mutex_unlock(&kctx->mmu.mmu_lock);
err = kbase_mem_pool_grow(
#ifdef CONFIG_MALI_2MB_ALLOC
&kbdev->mem_pools.large[
@@ -1924,7 +1924,7 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
#endif
kctx->mmu.group_id],
MIDGARD_MMU_BOTTOMLEVEL);
- mutex_lock(&kctx->mmu.mmu_lock);
+ rt_mutex_lock(&kctx->mmu.mmu_lock);
} while (!err);
if (err) {
dev_warn(kbdev->dev,
@@ -1956,11 +1956,11 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
kunmap(pfn_to_page(PFN_DOWN(pgd)));
}
- mutex_unlock(&kctx->mmu.mmu_lock);
+ rt_mutex_unlock(&kctx->mmu.mmu_lock);
return 0;
fail_unlock:
- mutex_unlock(&kctx->mmu.mmu_lock);
+ rt_mutex_unlock(&kctx->mmu.mmu_lock);
return err;
}
@@ -2045,7 +2045,7 @@ int kbase_mmu_init(struct kbase_device *const kbdev,
return -EINVAL;
mmut->group_id = group_id;
- mutex_init(&mmut->mmu_lock);
+ rt_mutex_init(&mmut->mmu_lock);
mmut->kctx = kctx;
/* Preallocate MMU depth of four pages for mmu_teardown_level to use */
@@ -2074,9 +2074,9 @@ int kbase_mmu_init(struct kbase_device *const kbdev,
return -ENOMEM;
}
- mutex_lock(&mmut->mmu_lock);
+ rt_mutex_lock(&mmut->mmu_lock);
mmut->pgd = kbase_mmu_alloc_pgd(kbdev, mmut);
- mutex_unlock(&mmut->mmu_lock);
+ rt_mutex_unlock(&mmut->mmu_lock);
}
return 0;
@@ -2085,17 +2085,16 @@ int kbase_mmu_init(struct kbase_device *const kbdev,
void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut)
{
if (mmut->pgd) {
- mutex_lock(&mmut->mmu_lock);
+ rt_mutex_lock(&mmut->mmu_lock);
mmu_teardown_level(kbdev, mmut, mmut->pgd, MIDGARD_MMU_TOPLEVEL,
mmut->mmu_teardown_pages);
- mutex_unlock(&mmut->mmu_lock);
+ rt_mutex_unlock(&mmut->mmu_lock);
if (mmut->kctx)
KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, mmut->kctx->id, 0);
}
kfree(mmut->mmu_teardown_pages);
- mutex_destroy(&mmut->mmu_lock);
}
void kbase_mmu_as_term(struct kbase_device *kbdev, int i)
@@ -2185,7 +2184,7 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
return NULL;
kaddr = vmalloc_user(size_left);
- mutex_lock(&kctx->mmu.mmu_lock);
+ rt_mutex_lock(&kctx->mmu.mmu_lock);
if (kaddr) {
u64 end_marker = 0xFFULL;
@@ -2233,12 +2232,12 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
memcpy(mmu_dump_buffer, &end_marker, sizeof(u64));
}
- mutex_unlock(&kctx->mmu.mmu_lock);
+ rt_mutex_unlock(&kctx->mmu.mmu_lock);
return kaddr;
fail_free:
vfree(kaddr);
- mutex_unlock(&kctx->mmu.mmu_lock);
+ rt_mutex_unlock(&kctx->mmu.mmu_lock);
return NULL;
}
KBASE_EXPORT_TEST_API(kbase_mmu_dump);