summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_js.h
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/mali_kbase_js.h')
-rw-r--r--mali_kbase/mali_kbase_js.h74
1 files changed, 37 insertions, 37 deletions
diff --git a/mali_kbase/mali_kbase_js.h b/mali_kbase/mali_kbase_js.h
index 66b2132..8969222 100644
--- a/mali_kbase/mali_kbase_js.h
+++ b/mali_kbase/mali_kbase_js.h
@@ -27,6 +27,7 @@
#include "mali_kbase_js_defs.h"
#include "mali_kbase_js_policy.h"
+#include "mali_kbase_context.h"
#include "mali_kbase_defs.h"
#include "mali_kbase_debug.h"
@@ -151,8 +152,7 @@ void kbasep_js_kctx_term(struct kbase_context *kctx);
*
* The following locking conditions are made on the caller:
* - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
- * - it must \em not hold kbasep_js_device_data::runpool_irq::lock (as this will be
- * obtained internally)
+ * - it must \em not hold hwaccess_lock (as this will be obtained internally)
* - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
* obtained internally)
* - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
@@ -213,8 +213,8 @@ void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx
*
* The following locking conditions are made on the caller:
* - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
- * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, (as this will be
- * obtained internally)
+ * - it must \em not hold the hwaccess_lock, (as this will be obtained
+ * internally)
* - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be
* obtained internally)
*
@@ -233,8 +233,7 @@ bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
* @note This function can safely be called from IRQ context.
*
* The following locking conditions are made on the caller:
- * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because
- * it will be used internally.
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
*
* @return value != false if the retain succeeded, and the context will not be scheduled out.
* @return false if the retain failed (because the context is being/has been scheduled out).
@@ -248,7 +247,7 @@ bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_conte
* @note This function can safely be called from IRQ context.
*
* The following locks must be held by the caller:
- * - kbasep_js_device_data::runpool_irq::lock
+ * - hwaccess_lock
*
* @return value != false if the retain succeeded, and the context will not be scheduled out.
* @return false if the retain failed (because the context is being/has been scheduled out).
@@ -266,9 +265,9 @@ bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbas
* @note This function can safely be called from IRQ context.
*
* The following locking conditions are made on the caller:
- * - it must \em not hold the kbasep_js_device_data::runpoool_irq::lock, because
- * it will be used internally. If the runpool_irq::lock is already held, then
- * the caller should use kbasep_js_runpool_lookup_ctx_nolock() instead.
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * If the hwaccess_lock is already held, then the caller should use
+ * kbasep_js_runpool_lookup_ctx_nolock() instead.
*
* @return a valid struct kbase_context on success, which has been refcounted as being busy.
* @return NULL on failure, indicating that no context was found in \a as_nr
@@ -288,7 +287,7 @@ struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, i
* Note: This function can safely be called from IRQ context.
*
* The following locking conditions are made on the caller:
- * - it must the kbasep_js_device_data::runpoool_irq::lock.
+ * - it must the hold the hwaccess_lock
*
* Return: a valid struct kbase_context on success, which has been refcounted as
* being busy.
@@ -362,12 +361,12 @@ void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kb
* scheduled, or that already has a zero refcount.
*
* The following locking conditions are made on the caller:
- * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because
- * it will be used internally.
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
* - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
* - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
* obtained internally)
- * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
* - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
* obtained internally)
*
@@ -411,11 +410,11 @@ void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
* kbasep_js_release_privileged_ctx is called).
*
* The following locking conditions are made on the caller:
- * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because
- * it will be used internally.
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
* - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
* obtained internally)
- * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
* - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
* - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
* be used internally.
@@ -429,12 +428,12 @@ void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_
* See kbasep_js_runpool_release_ctx for potential side effects.
*
* The following locking conditions are made on the caller:
- * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because
- * it will be used internally.
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
* - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
* - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
* obtained internally)
- * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
*
*/
void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
@@ -444,7 +443,7 @@ void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_c
*
* The following locks may be used:
* - kbasep_js_device_data::runpool_mutex
- * - kbasep_js_device_data::runpool_irq::lock
+ * - hwaccess_lock
*/
void kbase_js_try_run_jobs(struct kbase_device *kbdev);
@@ -564,8 +563,10 @@ bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
*
* @param[in] katom Pointer to the atom to complete
* @param[in] end_timestamp The time that the atom completed (may be NULL)
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
*/
-void kbase_js_complete_atom(struct kbase_jd_atom *katom,
+struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
ktime_t *end_timestamp);
/**
@@ -630,7 +631,7 @@ void kbase_js_set_timeouts(struct kbase_device *kbdev);
*
* As with any bool, never test the return value with true.
*
- * The caller must hold kbasep_js_device_data::runpool_irq::lock.
+ * The caller must hold hwaccess_lock.
*/
static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
{
@@ -638,7 +639,7 @@ static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_
/* Ensure context really is scheduled in */
KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.is_scheduled);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
test_bit = (u16) (1u << kctx->as_nr);
@@ -651,7 +652,7 @@ static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_
* The purpose of this abstraction is to hide the underlying data size, and wrap up
* the long repeated line of code.
*
- * The caller must hold kbasep_js_device_data::runpool_irq::lock.
+ * The caller must hold hwaccess_lock.
*/
static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
{
@@ -659,7 +660,7 @@ static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js
/* Ensure context really is scheduled in */
KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.is_scheduled);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
set_bit = (u16) (1u << kctx->as_nr);
@@ -674,7 +675,7 @@ static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js
* The purpose of this abstraction is to hide the underlying data size, and wrap up
* the long repeated line of code.
*
- * The caller must hold kbasep_js_device_data::runpool_irq::lock.
+ * The caller must hold hwaccess_lock.
*/
static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
{
@@ -683,7 +684,7 @@ static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *
/* Ensure context really is scheduled in */
KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.is_scheduled);
+ KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
clear_bit = (u16) (1u << kctx->as_nr);
clear_mask = ~clear_bit;
@@ -792,7 +793,7 @@ static inline bool kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_a
/**
* Debug Check the refcount of a context. Only use within ASSERTs
*
- * Obtains kbasep_js_device_data::runpool_irq::lock
+ * Obtains hwaccess_lock
*
* @return negative value if the context is not scheduled in
* @return current refcount of the context if it is scheduled in. The refcount
@@ -809,12 +810,12 @@ static inline int kbasep_js_debug_check_ctx_refcount(struct kbase_device *kbdev,
KBASE_DEBUG_ASSERT(kctx != NULL);
js_devdata = &kbdev->js_data;
- spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
as_nr = kctx->as_nr;
if (as_nr != KBASEP_AS_NR_INVALID)
result = js_devdata->runpool_irq.per_as_data[as_nr].as_busy_refcount;
- spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
return result;
}
@@ -829,8 +830,7 @@ static inline int kbasep_js_debug_check_ctx_refcount(struct kbase_device *kbdev,
* when there is no ctx in \a as_nr (NULL returned).
*
* The following locking conditions are made on the caller:
- * - it must \em not hold the kbasep_js_device_data::runpoool_irq::lock, because
- * it will be used internally.
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
*
* @return a valid struct kbase_context on success, with a refcount that is guarenteed
* to be non-zero and unmodified by this function.
@@ -848,12 +848,12 @@ static inline struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct
js_devdata = &kbdev->js_data;
js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
- spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
found_kctx = js_per_as_data->kctx;
KBASE_DEBUG_ASSERT(found_kctx == NULL || js_per_as_data->as_busy_refcount > 0);
- spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
return found_kctx;
}
@@ -948,7 +948,7 @@ static inline void kbase_js_runpool_inc_context_count(
KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
++(js_devdata->nr_all_contexts_running);
- if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) {
+ if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
/* Track contexts that can submit jobs */
KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running <
S8_MAX);
@@ -981,7 +981,7 @@ static inline void kbase_js_runpool_dec_context_count(
--(js_devdata->nr_all_contexts_running);
KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
- if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) {
+ if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
/* Track contexts that can submit jobs */
--(js_devdata->nr_user_contexts_running);
KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);