summaryrefslogtreecommitdiff
path: root/mali_kbase/context/mali_kbase_context.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/context/mali_kbase_context.c')
-rw-r--r--mali_kbase/context/mali_kbase_context.c82
1 files changed, 30 insertions, 52 deletions
diff --git a/mali_kbase/context/mali_kbase_context.c b/mali_kbase/context/mali_kbase_context.c
index 0164e74..c9a0cb9 100644
--- a/mali_kbase/context/mali_kbase_context.c
+++ b/mali_kbase/context/mali_kbase_context.c
@@ -25,12 +25,16 @@
#include <linux/version.h>
#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
#include <linux/sched/task.h>
+#endif
+
+#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/signal.h>
#else
#include <linux/sched.h>
#endif
#include <mali_kbase.h>
-#include <gpu/mali_kbase_gpu_regmap.h>
+#include <hw_access/mali_kbase_hw_access_regmap.h>
#include <mali_kbase_mem_linux.h>
#include <mali_kbase_ctx_sched.h>
#include <mali_kbase_mem_pool_group.h>
@@ -53,8 +57,7 @@ static struct kbase_process *find_process_node(struct rb_node *node, pid_t tgid)
/* Check if the kctx creation request is from a existing process.*/
while (node) {
- struct kbase_process *prcs_node =
- rb_entry(node, struct kbase_process, kprcs_node);
+ struct kbase_process *prcs_node = rb_entry(node, struct kbase_process, kprcs_node);
if (prcs_node->tgid == tgid) {
kprcs = prcs_node;
break;
@@ -110,8 +113,7 @@ static int kbase_insert_kctx_to_process(struct kbase_context *kctx)
struct kbase_process *prcs_node;
parent = *new;
- prcs_node = rb_entry(parent, struct kbase_process,
- kprcs_node);
+ prcs_node = rb_entry(parent, struct kbase_process, kprcs_node);
if (tgid < prcs_node->tgid)
new = &(*new)->rb_left;
else
@@ -135,10 +137,6 @@ int kbase_context_common_init(struct kbase_context *kctx)
/* creating a context is considered a disjoint event */
kbase_disjoint_event(kctx->kbdev);
- kctx->process_mm = NULL;
- kctx->task = NULL;
- atomic_set(&kctx->nonmapped_pages, 0);
- atomic_set(&kctx->permanent_mapped_pages, 0);
kctx->tgid = current->tgid;
kctx->pid = current->pid;
@@ -147,7 +145,7 @@ int kbase_context_common_init(struct kbase_context *kctx)
struct pid *pid_struct;
rcu_read_lock();
- pid_struct = find_get_pid(kctx->tgid);
+ pid_struct = get_pid(task_tgid(current));
if (likely(pid_struct)) {
struct task_struct *task = pid_task(pid_struct, PIDTYPE_PID);
@@ -158,16 +156,14 @@ int kbase_context_common_init(struct kbase_context *kctx)
get_task_struct(task);
kctx->task = task;
} else {
- dev_err(kctx->kbdev->dev,
- "Failed to get task pointer for %s/%d",
+ dev_err(kctx->kbdev->dev, "Failed to get task pointer for %s/%d",
current->comm, current->pid);
err = -ESRCH;
}
put_pid(pid_struct);
} else {
- dev_err(kctx->kbdev->dev,
- "Failed to get pid pointer for %s/%d",
+ dev_err(kctx->kbdev->dev, "Failed to get pid pointer for %s/%d",
current->comm, current->pid);
err = -ESRCH;
}
@@ -180,8 +176,6 @@ int kbase_context_common_init(struct kbase_context *kctx)
kctx->process_mm = current->mm;
}
- atomic_set(&kctx->used_pages, 0);
-
mutex_init(&kctx->reg_lock);
spin_lock_init(&kctx->mem_partials_lock);
@@ -190,33 +184,17 @@ int kbase_context_common_init(struct kbase_context *kctx)
spin_lock_init(&kctx->waiting_soft_jobs_lock);
INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
- init_waitqueue_head(&kctx->event_queue);
- atomic_set(&kctx->event_count, 0);
-
-#if !MALI_USE_CSF
- atomic_set(&kctx->event_closed, false);
-#if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
- atomic_set(&kctx->jctx.work_id, 0);
-#endif
-#endif
-
-#if MALI_USE_CSF
- atomic64_set(&kctx->num_fixable_allocs, 0);
- atomic64_set(&kctx->num_fixed_allocs, 0);
-#endif
-
kbase_gpu_vm_lock(kctx);
bitmap_copy(kctx->cookies, &cookies_mask, BITS_PER_LONG);
kbase_gpu_vm_unlock(kctx);
- kctx->id = atomic_add_return(1, &(kctx->kbdev->ctx_num)) - 1;
+ kctx->id = (u32)atomic_add_return(1, &(kctx->kbdev->ctx_num)) - 1;
mutex_lock(&kctx->kbdev->kctx_list_lock);
err = kbase_insert_kctx_to_process(kctx);
mutex_unlock(&kctx->kbdev->kctx_list_lock);
if (err) {
- dev_err(kctx->kbdev->dev,
- "(err:%d) failed to insert kctx to kbase_process", err);
+ dev_err(kctx->kbdev->dev, "(err:%d) failed to insert kctx to kbase_process", err);
if (likely(kctx->kfile)) {
mmdrop(kctx->process_mm);
put_task_struct(kctx->task);
@@ -298,8 +276,7 @@ void kbase_context_common_term(struct kbase_context *kctx)
pages = atomic_read(&kctx->used_pages);
if (pages != 0)
- dev_warn(kctx->kbdev->dev,
- "%s: %d pages in use!\n", __func__, pages);
+ dev_warn(kctx->kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
@@ -328,9 +305,8 @@ void kbase_context_mem_pool_group_term(struct kbase_context *kctx)
int kbase_context_mmu_init(struct kbase_context *kctx)
{
- return kbase_mmu_init(
- kctx->kbdev, &kctx->mmu, kctx,
- kbase_context_mmu_group_id_get(kctx->create_flags));
+ return kbase_mmu_init(kctx->kbdev, &kctx->mmu, kctx,
+ kbase_context_mmu_group_id_get(kctx->create_flags));
}
void kbase_context_mmu_term(struct kbase_context *kctx)
@@ -342,7 +318,7 @@ int kbase_context_mem_alloc_page(struct kbase_context *kctx)
{
struct page *p;
- p = kbase_mem_alloc_page(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK]);
+ p = kbase_mem_alloc_page(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK], false);
if (!p)
return -ENOMEM;
@@ -354,10 +330,8 @@ int kbase_context_mem_alloc_page(struct kbase_context *kctx)
void kbase_context_mem_pool_free(struct kbase_context *kctx)
{
/* drop the aliasing sink page now that it can't be mapped anymore */
- kbase_mem_pool_free(
- &kctx->mem_pools.small[KBASE_MEM_GROUP_SINK],
- as_page(kctx->aliasing_sink_page),
- false);
+ kbase_mem_pool_free(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK],
+ as_page(kctx->aliasing_sink_page), false);
}
void kbase_context_sticky_resource_term(struct kbase_context *kctx)
@@ -369,18 +343,15 @@ void kbase_context_sticky_resource_term(struct kbase_context *kctx)
/* free pending region setups */
pending_regions_to_clean = KBASE_COOKIE_MASK;
- bitmap_andnot(&pending_regions_to_clean, &pending_regions_to_clean,
- kctx->cookies, BITS_PER_LONG);
+ bitmap_andnot(&pending_regions_to_clean, &pending_regions_to_clean, kctx->cookies,
+ BITS_PER_LONG);
while (pending_regions_to_clean) {
- unsigned int cookie = find_first_bit(&pending_regions_to_clean,
- BITS_PER_LONG);
+ unsigned int cookie = find_first_bit(&pending_regions_to_clean, BITS_PER_LONG);
if (!WARN_ON(!kctx->pending_regions[cookie])) {
dev_dbg(kctx->kbdev->dev, "Freeing pending unmapped region\n");
- kbase_mem_phy_alloc_put(
- kctx->pending_regions[cookie]->cpu_alloc);
- kbase_mem_phy_alloc_put(
- kctx->pending_regions[cookie]->gpu_alloc);
+ kbase_mem_phy_alloc_put(kctx->pending_regions[cookie]->cpu_alloc);
+ kbase_mem_phy_alloc_put(kctx->pending_regions[cookie]->gpu_alloc);
kfree(kctx->pending_regions[cookie]);
kctx->pending_regions[cookie] = NULL;
@@ -390,3 +361,10 @@ void kbase_context_sticky_resource_term(struct kbase_context *kctx)
}
kbase_gpu_vm_unlock(kctx);
}
+
+bool kbase_ctx_compat_mode(struct kbase_context *kctx)
+{
+ return !IS_ENABLED(CONFIG_64BIT) ||
+ (IS_ENABLED(CONFIG_64BIT) && kbase_ctx_flag(kctx, KCTX_COMPAT));
+}
+KBASE_EXPORT_TEST_API(kbase_ctx_compat_mode);