summaryrefslogtreecommitdiff
path: root/mali_kbase/context
diff options
context:
space:
mode:
authorJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
committerJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
commit049a542207ed694271316782397b78b2e202086a (patch)
tree105e9378d4d5062dc72109fdd4a77c915bd9425d /mali_kbase/context
parente61eb93296e9f940b32d4ad4b0c3a5557cbeaf17 (diff)
downloadgpu-049a542207ed694271316782397b78b2e202086a.tar.gz
Update KMD to r47p0
Provenance: ipdelivery@ad01e50d640910a99224382bb227e6d4de627657 Change-Id: I19ac9bce34a5c5a319c1b4a388e8b037b3dfe6e7
Diffstat (limited to 'mali_kbase/context')
-rw-r--r--mali_kbase/context/backend/mali_kbase_context_csf.c39
-rw-r--r--mali_kbase/context/backend/mali_kbase_context_jm.c55
-rw-r--r--mali_kbase/context/mali_kbase_context.c82
-rw-r--r--mali_kbase/context/mali_kbase_context.h26
-rw-r--r--mali_kbase/context/mali_kbase_context_internal.h10
5 files changed, 85 insertions, 127 deletions
diff --git a/mali_kbase/context/backend/mali_kbase_context_csf.c b/mali_kbase/context/backend/mali_kbase_context_csf.c
index 8b74de0..8b14108 100644
--- a/mali_kbase/context/backend/mali_kbase_context_csf.c
+++ b/mali_kbase/context/backend/mali_kbase_context_csf.c
@@ -24,12 +24,13 @@
*/
#include <context/mali_kbase_context_internal.h>
-#include <gpu/mali_kbase_gpu_regmap.h>
+#include <hw_access/mali_kbase_hw_access_regmap.h>
#include <mali_kbase.h>
#include <mali_kbase_mem_linux.h>
#include <mali_kbase_mem_pool_group.h>
#include <mmu/mali_kbase_mmu.h>
#include <tl/mali_kbase_timeline.h>
+#include <mali_kbase_ctx_sched.h>
#if IS_ENABLED(CONFIG_DEBUG_FS)
#include <csf/mali_kbase_csf_csg_debugfs.h>
@@ -92,24 +93,20 @@ static const struct kbase_context_init context_init[] = {
"Memory pool group initialization failed" },
{ kbase_mem_evictable_init, kbase_mem_evictable_deinit,
"Memory evictable initialization failed" },
- { kbase_context_mmu_init, kbase_context_mmu_term,
- "MMU initialization failed" },
- { kbase_context_mem_alloc_page, kbase_context_mem_pool_free,
- "Memory alloc page failed" },
+ { kbase_ctx_sched_init_ctx, NULL, NULL },
+ { kbase_context_mmu_init, kbase_context_mmu_term, "MMU initialization failed" },
+ { kbase_context_mem_alloc_page, kbase_context_mem_pool_free, "Memory alloc page failed" },
{ kbase_region_tracker_init, kbase_region_tracker_term,
"Region tracker initialization failed" },
{ kbase_sticky_resource_init, kbase_context_sticky_resource_term,
"Sticky resource initialization failed" },
{ kbase_jit_init, kbase_jit_term, "JIT initialization failed" },
- { kbase_csf_ctx_init, kbase_csf_ctx_term,
- "CSF context initialization failed" },
+ { kbase_csf_ctx_init, kbase_csf_ctx_term, "CSF context initialization failed" },
{ kbase_context_add_to_dev_list, kbase_context_remove_from_dev_list,
"Adding kctx to device failed" },
};
-static void kbase_context_term_partial(
- struct kbase_context *kctx,
- unsigned int i)
+static void kbase_context_term_partial(struct kbase_context *kctx, unsigned int i)
{
while (i-- > 0) {
if (context_init[i].term)
@@ -117,11 +114,10 @@ static void kbase_context_term_partial(
}
}
-struct kbase_context *kbase_create_context(struct kbase_device *kbdev,
- bool is_compat,
- base_context_create_flags const flags,
- unsigned long const api_version,
- struct kbase_file *const kfile)
+struct kbase_context *kbase_create_context(struct kbase_device *kbdev, bool is_compat,
+ base_context_create_flags const flags,
+ unsigned long const api_version,
+ struct kbase_file *const kfile)
{
struct kbase_context *kctx;
unsigned int i = 0;
@@ -159,8 +155,7 @@ struct kbase_context *kbase_create_context(struct kbase_device *kbdev,
err = context_init[i].init(kctx);
if (err) {
- dev_err(kbdev->dev, "%s error = %d\n",
- context_init[i].err_mes, err);
+ dev_err(kbdev->dev, "%s error = %d\n", context_init[i].err_mes, err);
/* kctx should be freed by kbase_context_free().
* Otherwise it will result in memory leak.
@@ -192,12 +187,10 @@ void kbase_destroy_context(struct kbase_context *kctx)
* Customer side that a hang could occur if context termination is
* not blocked until the resume of GPU device.
*/
- while (kbase_pm_context_active_handle_suspend(
- kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
- dev_info(kbdev->dev,
- "Suspend in progress when destroying context");
- wait_event(kbdev->pm.resume_wait,
- !kbase_pm_is_suspending(kbdev));
+ while (kbase_pm_context_active_handle_suspend(kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
+ dev_info(kbdev->dev, "Suspend in progress when destroying context");
+ wait_event(kbdev->pm.resume_wait, !kbase_pm_is_suspending(kbdev));
}
/* Have synchronized against the System suspend and incremented the
diff --git a/mali_kbase/context/backend/mali_kbase_context_jm.c b/mali_kbase/context/backend/mali_kbase_context_jm.c
index 2a983fb..f2eefe9 100644
--- a/mali_kbase/context/backend/mali_kbase_context_jm.c
+++ b/mali_kbase/context/backend/mali_kbase_context_jm.c
@@ -24,7 +24,7 @@
*/
#include <context/mali_kbase_context_internal.h>
-#include <gpu/mali_kbase_gpu_regmap.h>
+#include <hw_access/mali_kbase_hw_access_regmap.h>
#include <mali_kbase.h>
#include <mali_kbase_ctx_sched.h>
#include <mali_kbase_kinstr_jm.h>
@@ -81,8 +81,7 @@ static void kbase_context_kbase_kinstr_jm_term(struct kbase_context *kctx)
static int kbase_context_kbase_timer_setup(struct kbase_context *kctx)
{
- kbase_timer_setup(&kctx->soft_job_timeout,
- kbasep_soft_job_timeout_worker);
+ kbase_timer_setup(&kctx->soft_job_timeout, kbasep_soft_job_timeout_worker);
return 0;
}
@@ -133,29 +132,23 @@ static const struct kbase_context_init context_init[] = {
"Memory pool group initialization failed" },
{ kbase_mem_evictable_init, kbase_mem_evictable_deinit,
"Memory evictable initialization failed" },
- { kbase_context_mmu_init, kbase_context_mmu_term,
- "MMU initialization failed" },
- { kbase_context_mem_alloc_page, kbase_context_mem_pool_free,
- "Memory alloc page failed" },
+ { kbase_ctx_sched_init_ctx, NULL, NULL },
+ { kbase_context_mmu_init, kbase_context_mmu_term, "MMU initialization failed" },
+ { kbase_context_mem_alloc_page, kbase_context_mem_pool_free, "Memory alloc page failed" },
{ kbase_region_tracker_init, kbase_region_tracker_term,
"Region tracker initialization failed" },
{ kbase_sticky_resource_init, kbase_context_sticky_resource_term,
"Sticky resource initialization failed" },
{ kbase_jit_init, kbase_jit_term, "JIT initialization failed" },
- { kbase_context_kbase_kinstr_jm_init,
- kbase_context_kbase_kinstr_jm_term,
+ { kbase_context_kbase_kinstr_jm_init, kbase_context_kbase_kinstr_jm_term,
"JM instrumentation initialization failed" },
- { kbase_context_kbase_timer_setup, NULL,
- "Timers initialization failed" },
- { kbase_event_init, kbase_event_cleanup,
- "Event initialization failed" },
- { kbasep_js_kctx_init, kbasep_js_kctx_term,
- "JS kctx initialization failed" },
+ { kbase_context_kbase_timer_setup, NULL, "Timers initialization failed" },
+ { kbase_event_init, kbase_event_cleanup, "Event initialization failed" },
+ { kbasep_js_kctx_init, kbasep_js_kctx_term, "JS kctx initialization failed" },
{ kbase_jd_init, kbase_jd_exit, "JD initialization failed" },
{ kbase_context_submit_check, NULL, "Enabling job submission failed" },
#if IS_ENABLED(CONFIG_DEBUG_FS)
- { kbase_debug_job_fault_context_init,
- kbase_debug_job_fault_context_term,
+ { kbase_debug_job_fault_context_init, kbase_debug_job_fault_context_term,
"Job fault context initialization failed" },
#endif
{ kbasep_platform_context_init, kbasep_platform_context_term,
@@ -165,9 +158,7 @@ static const struct kbase_context_init context_init[] = {
"Adding kctx to device failed" },
};
-static void kbase_context_term_partial(
- struct kbase_context *kctx,
- unsigned int i)
+static void kbase_context_term_partial(struct kbase_context *kctx, unsigned int i)
{
while (i-- > 0) {
if (context_init[i].term)
@@ -175,11 +166,10 @@ static void kbase_context_term_partial(
}
}
-struct kbase_context *kbase_create_context(struct kbase_device *kbdev,
- bool is_compat,
- base_context_create_flags const flags,
- unsigned long const api_version,
- struct kbase_file *const kfile)
+struct kbase_context *kbase_create_context(struct kbase_device *kbdev, bool is_compat,
+ base_context_create_flags const flags,
+ unsigned long const api_version,
+ struct kbase_file *const kfile)
{
struct kbase_context *kctx;
unsigned int i = 0;
@@ -215,8 +205,7 @@ struct kbase_context *kbase_create_context(struct kbase_device *kbdev,
err = context_init[i].init(kctx);
if (err) {
- dev_err(kbdev->dev, "%s error = %d\n",
- context_init[i].err_mes, err);
+ dev_err(kbdev->dev, "%s error = %d\n", context_init[i].err_mes, err);
/* kctx should be freed by kbase_context_free().
* Otherwise it will result in memory leak.
@@ -243,7 +232,7 @@ void kbase_destroy_context(struct kbase_context *kctx)
if (WARN_ON(!kbdev))
return;
- /* Context termination could happen whilst the system suspend of
+ /* Context termination could happen whilst the system suspend of
* the GPU device is ongoing or has completed. It has been seen on
* Customer side that a hang could occur if context termination is
* not blocked until the resume of GPU device.
@@ -251,12 +240,10 @@ void kbase_destroy_context(struct kbase_context *kctx)
#ifdef CONFIG_MALI_ARBITER_SUPPORT
atomic_inc(&kbdev->pm.gpu_users_waiting);
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
- while (kbase_pm_context_active_handle_suspend(
- kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
- dev_dbg(kbdev->dev,
- "Suspend in progress when destroying context");
- wait_event(kbdev->pm.resume_wait,
- !kbase_pm_is_suspending(kbdev));
+ while (kbase_pm_context_active_handle_suspend(kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
+ dev_dbg(kbdev->dev, "Suspend in progress when destroying context");
+ wait_event(kbdev->pm.resume_wait, !kbase_pm_is_suspending(kbdev));
}
/* Have synchronized against the System suspend and incremented the
diff --git a/mali_kbase/context/mali_kbase_context.c b/mali_kbase/context/mali_kbase_context.c
index 0164e74..c9a0cb9 100644
--- a/mali_kbase/context/mali_kbase_context.c
+++ b/mali_kbase/context/mali_kbase_context.c
@@ -25,12 +25,16 @@
#include <linux/version.h>
#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
#include <linux/sched/task.h>
+#endif
+
+#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/signal.h>
#else
#include <linux/sched.h>
#endif
#include <mali_kbase.h>
-#include <gpu/mali_kbase_gpu_regmap.h>
+#include <hw_access/mali_kbase_hw_access_regmap.h>
#include <mali_kbase_mem_linux.h>
#include <mali_kbase_ctx_sched.h>
#include <mali_kbase_mem_pool_group.h>
@@ -53,8 +57,7 @@ static struct kbase_process *find_process_node(struct rb_node *node, pid_t tgid)
/* Check if the kctx creation request is from a existing process.*/
while (node) {
- struct kbase_process *prcs_node =
- rb_entry(node, struct kbase_process, kprcs_node);
+ struct kbase_process *prcs_node = rb_entry(node, struct kbase_process, kprcs_node);
if (prcs_node->tgid == tgid) {
kprcs = prcs_node;
break;
@@ -110,8 +113,7 @@ static int kbase_insert_kctx_to_process(struct kbase_context *kctx)
struct kbase_process *prcs_node;
parent = *new;
- prcs_node = rb_entry(parent, struct kbase_process,
- kprcs_node);
+ prcs_node = rb_entry(parent, struct kbase_process, kprcs_node);
if (tgid < prcs_node->tgid)
new = &(*new)->rb_left;
else
@@ -135,10 +137,6 @@ int kbase_context_common_init(struct kbase_context *kctx)
/* creating a context is considered a disjoint event */
kbase_disjoint_event(kctx->kbdev);
- kctx->process_mm = NULL;
- kctx->task = NULL;
- atomic_set(&kctx->nonmapped_pages, 0);
- atomic_set(&kctx->permanent_mapped_pages, 0);
kctx->tgid = current->tgid;
kctx->pid = current->pid;
@@ -147,7 +145,7 @@ int kbase_context_common_init(struct kbase_context *kctx)
struct pid *pid_struct;
rcu_read_lock();
- pid_struct = find_get_pid(kctx->tgid);
+ pid_struct = get_pid(task_tgid(current));
if (likely(pid_struct)) {
struct task_struct *task = pid_task(pid_struct, PIDTYPE_PID);
@@ -158,16 +156,14 @@ int kbase_context_common_init(struct kbase_context *kctx)
get_task_struct(task);
kctx->task = task;
} else {
- dev_err(kctx->kbdev->dev,
- "Failed to get task pointer for %s/%d",
+ dev_err(kctx->kbdev->dev, "Failed to get task pointer for %s/%d",
current->comm, current->pid);
err = -ESRCH;
}
put_pid(pid_struct);
} else {
- dev_err(kctx->kbdev->dev,
- "Failed to get pid pointer for %s/%d",
+ dev_err(kctx->kbdev->dev, "Failed to get pid pointer for %s/%d",
current->comm, current->pid);
err = -ESRCH;
}
@@ -180,8 +176,6 @@ int kbase_context_common_init(struct kbase_context *kctx)
kctx->process_mm = current->mm;
}
- atomic_set(&kctx->used_pages, 0);
-
mutex_init(&kctx->reg_lock);
spin_lock_init(&kctx->mem_partials_lock);
@@ -190,33 +184,17 @@ int kbase_context_common_init(struct kbase_context *kctx)
spin_lock_init(&kctx->waiting_soft_jobs_lock);
INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
- init_waitqueue_head(&kctx->event_queue);
- atomic_set(&kctx->event_count, 0);
-
-#if !MALI_USE_CSF
- atomic_set(&kctx->event_closed, false);
-#if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
- atomic_set(&kctx->jctx.work_id, 0);
-#endif
-#endif
-
-#if MALI_USE_CSF
- atomic64_set(&kctx->num_fixable_allocs, 0);
- atomic64_set(&kctx->num_fixed_allocs, 0);
-#endif
-
kbase_gpu_vm_lock(kctx);
bitmap_copy(kctx->cookies, &cookies_mask, BITS_PER_LONG);
kbase_gpu_vm_unlock(kctx);
- kctx->id = atomic_add_return(1, &(kctx->kbdev->ctx_num)) - 1;
+ kctx->id = (u32)atomic_add_return(1, &(kctx->kbdev->ctx_num)) - 1;
mutex_lock(&kctx->kbdev->kctx_list_lock);
err = kbase_insert_kctx_to_process(kctx);
mutex_unlock(&kctx->kbdev->kctx_list_lock);
if (err) {
- dev_err(kctx->kbdev->dev,
- "(err:%d) failed to insert kctx to kbase_process", err);
+ dev_err(kctx->kbdev->dev, "(err:%d) failed to insert kctx to kbase_process", err);
if (likely(kctx->kfile)) {
mmdrop(kctx->process_mm);
put_task_struct(kctx->task);
@@ -298,8 +276,7 @@ void kbase_context_common_term(struct kbase_context *kctx)
pages = atomic_read(&kctx->used_pages);
if (pages != 0)
- dev_warn(kctx->kbdev->dev,
- "%s: %d pages in use!\n", __func__, pages);
+ dev_warn(kctx->kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
@@ -328,9 +305,8 @@ void kbase_context_mem_pool_group_term(struct kbase_context *kctx)
int kbase_context_mmu_init(struct kbase_context *kctx)
{
- return kbase_mmu_init(
- kctx->kbdev, &kctx->mmu, kctx,
- kbase_context_mmu_group_id_get(kctx->create_flags));
+ return kbase_mmu_init(kctx->kbdev, &kctx->mmu, kctx,
+ kbase_context_mmu_group_id_get(kctx->create_flags));
}
void kbase_context_mmu_term(struct kbase_context *kctx)
@@ -342,7 +318,7 @@ int kbase_context_mem_alloc_page(struct kbase_context *kctx)
{
struct page *p;
- p = kbase_mem_alloc_page(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK]);
+ p = kbase_mem_alloc_page(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK], false);
if (!p)
return -ENOMEM;
@@ -354,10 +330,8 @@ int kbase_context_mem_alloc_page(struct kbase_context *kctx)
void kbase_context_mem_pool_free(struct kbase_context *kctx)
{
/* drop the aliasing sink page now that it can't be mapped anymore */
- kbase_mem_pool_free(
- &kctx->mem_pools.small[KBASE_MEM_GROUP_SINK],
- as_page(kctx->aliasing_sink_page),
- false);
+ kbase_mem_pool_free(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK],
+ as_page(kctx->aliasing_sink_page), false);
}
void kbase_context_sticky_resource_term(struct kbase_context *kctx)
@@ -369,18 +343,15 @@ void kbase_context_sticky_resource_term(struct kbase_context *kctx)
/* free pending region setups */
pending_regions_to_clean = KBASE_COOKIE_MASK;
- bitmap_andnot(&pending_regions_to_clean, &pending_regions_to_clean,
- kctx->cookies, BITS_PER_LONG);
+ bitmap_andnot(&pending_regions_to_clean, &pending_regions_to_clean, kctx->cookies,
+ BITS_PER_LONG);
while (pending_regions_to_clean) {
- unsigned int cookie = find_first_bit(&pending_regions_to_clean,
- BITS_PER_LONG);
+ unsigned int cookie = find_first_bit(&pending_regions_to_clean, BITS_PER_LONG);
if (!WARN_ON(!kctx->pending_regions[cookie])) {
dev_dbg(kctx->kbdev->dev, "Freeing pending unmapped region\n");
- kbase_mem_phy_alloc_put(
- kctx->pending_regions[cookie]->cpu_alloc);
- kbase_mem_phy_alloc_put(
- kctx->pending_regions[cookie]->gpu_alloc);
+ kbase_mem_phy_alloc_put(kctx->pending_regions[cookie]->cpu_alloc);
+ kbase_mem_phy_alloc_put(kctx->pending_regions[cookie]->gpu_alloc);
kfree(kctx->pending_regions[cookie]);
kctx->pending_regions[cookie] = NULL;
@@ -390,3 +361,10 @@ void kbase_context_sticky_resource_term(struct kbase_context *kctx)
}
kbase_gpu_vm_unlock(kctx);
}
+
+bool kbase_ctx_compat_mode(struct kbase_context *kctx)
+{
+ return !IS_ENABLED(CONFIG_64BIT) ||
+ (IS_ENABLED(CONFIG_64BIT) && kbase_ctx_flag(kctx, KCTX_COMPAT));
+}
+KBASE_EXPORT_TEST_API(kbase_ctx_compat_mode);
diff --git a/mali_kbase/context/mali_kbase_context.h b/mali_kbase/context/mali_kbase_context.h
index 22cb00c..e2295d0 100644
--- a/mali_kbase/context/mali_kbase_context.h
+++ b/mali_kbase/context/mali_kbase_context.h
@@ -66,11 +66,10 @@ void kbase_context_debugfs_term(struct kbase_context *const kctx);
*
* Return: new kbase context or NULL on failure
*/
-struct kbase_context *
-kbase_create_context(struct kbase_device *kbdev, bool is_compat,
- base_context_create_flags const flags,
- unsigned long api_version,
- struct kbase_file *const kfile);
+struct kbase_context *kbase_create_context(struct kbase_device *kbdev, bool is_compat,
+ base_context_create_flags const flags,
+ unsigned long api_version,
+ struct kbase_file *const kfile);
/**
* kbase_destroy_context - Destroy a kernel base context.
@@ -87,10 +86,9 @@ void kbase_destroy_context(struct kbase_context *kctx);
*
* Return: true if @flag is set on @kctx, false if not.
*/
-static inline bool kbase_ctx_flag(struct kbase_context *kctx,
- enum kbase_context_flags flag)
+static inline bool kbase_ctx_flag(struct kbase_context *kctx, enum kbase_context_flags flag)
{
- return atomic_read(&kctx->flags) & flag;
+ return atomic_read(&kctx->flags) & (int)flag;
}
/**
@@ -100,11 +98,7 @@ static inline bool kbase_ctx_flag(struct kbase_context *kctx,
*
* Return: True if needs to maintain compatibility, False otherwise.
*/
-static inline bool kbase_ctx_compat_mode(struct kbase_context *kctx)
-{
- return !IS_ENABLED(CONFIG_64BIT) ||
- (IS_ENABLED(CONFIG_64BIT) && kbase_ctx_flag(kctx, KCTX_COMPAT));
-}
+bool kbase_ctx_compat_mode(struct kbase_context *kctx);
/**
* kbase_ctx_flag_clear - Clear @flag on @kctx
@@ -117,8 +111,7 @@ static inline bool kbase_ctx_compat_mode(struct kbase_context *kctx)
* Some flags have locking requirements, check the documentation for the
* respective flags.
*/
-static inline void kbase_ctx_flag_clear(struct kbase_context *kctx,
- enum kbase_context_flags flag)
+static inline void kbase_ctx_flag_clear(struct kbase_context *kctx, enum kbase_context_flags flag)
{
atomic_andnot(flag, &kctx->flags);
}
@@ -134,8 +127,7 @@ static inline void kbase_ctx_flag_clear(struct kbase_context *kctx,
* Some flags have locking requirements, check the documentation for the
* respective flags.
*/
-static inline void kbase_ctx_flag_set(struct kbase_context *kctx,
- enum kbase_context_flags flag)
+static inline void kbase_ctx_flag_set(struct kbase_context *kctx, enum kbase_context_flags flag)
{
atomic_or(flag, &kctx->flags);
}
diff --git a/mali_kbase/context/mali_kbase_context_internal.h b/mali_kbase/context/mali_kbase_context_internal.h
index 1cde739..8d9b394 100644
--- a/mali_kbase/context/mali_kbase_context_internal.h
+++ b/mali_kbase/context/mali_kbase_context_internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -36,6 +36,14 @@ struct kbase_context_init {
char *err_mes;
};
+/**
+ * kbase_context_common_init() - Initialize kbase context
+ * @kctx: Pointer to the kbase context to be initialized.
+ *
+ * This function must be called only when a kbase context is instantiated.
+ *
+ * Return: 0 on success.
+ */
int kbase_context_common_init(struct kbase_context *kctx);
void kbase_context_common_term(struct kbase_context *kctx);