summaryrefslogtreecommitdiff
path: root/mali_kbase/jm
diff options
context:
space:
mode:
authorJack Diver <diverj@google.com>2022-09-02 11:38:04 +0000
committerJack Diver <diverj@google.com>2022-09-02 14:33:02 +0000
commitc30533582604fe0365bc3ce4e9e8e19dec3109da (patch)
tree2dc4d074c820b535e9f18b8cd81d7e91bff042e5 /mali_kbase/jm
parent88d7d984fed1c2a4358ce2bbc334e82d71e3a391 (diff)
downloadgpu-c30533582604fe0365bc3ce4e9e8e19dec3109da.tar.gz
Mali Valhall Android DDK r38p1-01eac0
VX504X08X-BU-00000-r38p1-01eac0 - Valhall Android DDK VX504X08X-BU-60000-r38p1-01eac0 - Valhall Android Document Bundle VX504X08X-DC-11001-r38p1-01eac0 - Valhall Android DDK Software Errata VX504X08X-SW-99006-r38p1-01eac0 - Valhall Android Renderscript AOSP parts Signed-off-by: Jack Diver <diverj@google.com> Change-Id: I242060ad8ddc14475bda657cbbbe6b6c26ecfd57
Diffstat (limited to 'mali_kbase/jm')
-rw-r--r--mali_kbase/jm/mali_kbase_jm_defs.h31
-rw-r--r--mali_kbase/jm/mali_kbase_jm_js.h78
-rw-r--r--mali_kbase/jm/mali_kbase_js_defs.h2
3 files changed, 53 insertions, 58 deletions
diff --git a/mali_kbase/jm/mali_kbase_jm_defs.h b/mali_kbase/jm/mali_kbase_jm_defs.h
index 13da5e3..da81981 100644
--- a/mali_kbase/jm/mali_kbase_jm_defs.h
+++ b/mali_kbase/jm/mali_kbase_jm_defs.h
@@ -186,8 +186,6 @@ struct kbase_jd_atom_dependency {
static inline const struct kbase_jd_atom *
kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep)
{
- KBASE_DEBUG_ASSERT(dep != NULL);
-
return (const struct kbase_jd_atom *)(dep->atom);
}
@@ -201,8 +199,6 @@ kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep)
static inline u8 kbase_jd_katom_dep_type(
const struct kbase_jd_atom_dependency *dep)
{
- KBASE_DEBUG_ASSERT(dep != NULL);
-
return dep->dep_type;
}
@@ -219,8 +215,6 @@ static inline void kbase_jd_katom_dep_set(
{
struct kbase_jd_atom_dependency *dep;
- KBASE_DEBUG_ASSERT(const_dep != NULL);
-
dep = (struct kbase_jd_atom_dependency *)const_dep;
dep->atom = a;
@@ -237,8 +231,6 @@ static inline void kbase_jd_katom_dep_clear(
{
struct kbase_jd_atom_dependency *dep;
- KBASE_DEBUG_ASSERT(const_dep != NULL);
-
dep = (struct kbase_jd_atom_dependency *)const_dep;
dep->atom = NULL;
@@ -353,19 +345,6 @@ enum kbase_atom_exit_protected_state {
};
/**
- * struct kbase_ext_res - Contains the info for external resources referred
- * by an atom, which have been mapped on GPU side.
- * @gpu_address: Start address of the memory region allocated for
- * the resource from GPU virtual address space.
- * @alloc: pointer to physical pages tracking object, set on
- * mapping the external resource on GPU side.
- */
-struct kbase_ext_res {
- u64 gpu_address;
- struct kbase_mem_phy_alloc *alloc;
-};
-
-/**
* struct kbase_jd_atom - object representing the atom, containing the complete
* state and attributes of an atom.
* @work: work item for the bottom half processing of the atom,
@@ -398,7 +377,8 @@ struct kbase_ext_res {
* each allocation is read in order to enforce an
* overall physical memory usage limit.
* @nr_extres: number of external resources referenced by the atom.
- * @extres: pointer to the location containing info about
+ * @extres: Pointer to @nr_extres VA regions containing the external
+ * resource allocation and other information.
* @nr_extres external resources referenced by the atom.
* @device_nr: indicates the coregroup with which the atom is
* associated, when
@@ -508,7 +488,6 @@ struct kbase_ext_res {
* BASE_JD_REQ_START_RENDERPASS set in its core requirements
* with an atom that has BASE_JD_REQ_END_RENDERPASS set.
* @jc_fragment: Set of GPU fragment job chains
- * @retry_count: TODO: Not used,to be removed
*/
struct kbase_jd_atom {
struct work_struct work;
@@ -528,7 +507,7 @@ struct kbase_jd_atom {
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
u16 nr_extres;
- struct kbase_ext_res *extres;
+ struct kbase_va_region **extres;
u32 device_nr;
u64 jc;
@@ -619,8 +598,6 @@ struct kbase_jd_atom {
u32 atom_flags;
- int retry_count;
-
enum kbase_atom_gpu_rb_state gpu_rb_state;
bool need_cache_flush_cores_retained;
@@ -664,7 +641,7 @@ static inline bool kbase_jd_katom_is_protected(
}
/**
- * kbase_atom_is_younger - query if one atom is younger by age than another
+ * kbase_jd_atom_is_younger - query if one atom is younger by age than another
*
* @katom_a: the first atom
* @katom_b: the second atom
diff --git a/mali_kbase/jm/mali_kbase_jm_js.h b/mali_kbase/jm/mali_kbase_jm_js.h
index f01e8bb..d03bcc0 100644
--- a/mali_kbase/jm/mali_kbase_jm_js.h
+++ b/mali_kbase/jm/mali_kbase_jm_js.h
@@ -29,6 +29,8 @@
#include "mali_kbase_js_ctx_attr.h"
+#define JS_MAX_RUNNING_JOBS 8
+
/**
* kbasep_js_devdata_init - Initialize the Job Scheduler
* @kbdev: The kbase_device to operate on
@@ -618,7 +620,7 @@ bool kbase_js_atom_blocked_on_x_dep(struct kbase_jd_atom *katom);
void kbase_js_sched(struct kbase_device *kbdev, int js_mask);
/**
- * kbase_jd_zap_context - Attempt to deschedule a context that is being
+ * kbase_js_zap_context - Attempt to deschedule a context that is being
* destroyed
* @kctx: Context pointer
*
@@ -705,8 +707,10 @@ static inline bool kbasep_js_is_submit_allowed(
bool is_allowed;
/* Ensure context really is scheduled in */
- KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ if (WARN((kctx->as_nr == KBASEP_AS_NR_INVALID) || !kbase_ctx_flag(kctx, KCTX_SCHEDULED),
+ "%s: kctx %pK has assigned AS %d and context flag %d\n", __func__, (void *)kctx,
+ kctx->as_nr, atomic_read(&kctx->flags)))
+ return false;
test_bit = (u16) (1u << kctx->as_nr);
@@ -733,8 +737,10 @@ static inline void kbasep_js_set_submit_allowed(
u16 set_bit;
/* Ensure context really is scheduled in */
- KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ if (WARN((kctx->as_nr == KBASEP_AS_NR_INVALID) || !kbase_ctx_flag(kctx, KCTX_SCHEDULED),
+ "%s: kctx %pK has assigned AS %d and context flag %d\n", __func__, (void *)kctx,
+ kctx->as_nr, atomic_read(&kctx->flags)))
+ return;
set_bit = (u16) (1u << kctx->as_nr);
@@ -763,8 +769,10 @@ static inline void kbasep_js_clear_submit_allowed(
u16 clear_mask;
/* Ensure context really is scheduled in */
- KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+ if (WARN((kctx->as_nr == KBASEP_AS_NR_INVALID) || !kbase_ctx_flag(kctx, KCTX_SCHEDULED),
+ "%s: kctx %pK has assigned AS %d and context flag %d\n", __func__, (void *)kctx,
+ kctx->as_nr, atomic_read(&kctx->flags)))
+ return;
clear_bit = (u16) (1u << kctx->as_nr);
clear_mask = ~clear_bit;
@@ -798,7 +806,7 @@ static inline void kbasep_js_atom_retained_state_init_invalid(
* @retained_state: where to copy
* @katom: where to copy from
*
- * Copy atom state that can be made available after jd_done_nolock() is called
+ * Copy atom state that can be made available after kbase_jd_done_nolock() is called
* on that atom.
*/
static inline void kbasep_js_atom_retained_state_copy(
@@ -872,9 +880,6 @@ static inline void kbase_js_runpool_inc_context_count(
struct kbasep_js_device_data *js_devdata;
struct kbasep_js_kctx_info *js_kctx_info;
- KBASE_DEBUG_ASSERT(kbdev != NULL);
- KBASE_DEBUG_ASSERT(kctx != NULL);
-
js_devdata = &kbdev->js_data;
js_kctx_info = &kctx->jctx.sched_info;
@@ -882,13 +887,12 @@ static inline void kbase_js_runpool_inc_context_count(
lockdep_assert_held(&js_devdata->runpool_mutex);
/* Track total contexts */
- KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
+ WARN_ON_ONCE(js_devdata->nr_all_contexts_running >= JS_MAX_RUNNING_JOBS);
++(js_devdata->nr_all_contexts_running);
if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
/* Track contexts that can submit jobs */
- KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running <
- S8_MAX);
+ WARN_ON_ONCE(js_devdata->nr_user_contexts_running >= JS_MAX_RUNNING_JOBS);
++(js_devdata->nr_user_contexts_running);
}
}
@@ -909,9 +913,6 @@ static inline void kbase_js_runpool_dec_context_count(
struct kbasep_js_device_data *js_devdata;
struct kbasep_js_kctx_info *js_kctx_info;
- KBASE_DEBUG_ASSERT(kbdev != NULL);
- KBASE_DEBUG_ASSERT(kctx != NULL);
-
js_devdata = &kbdev->js_data;
js_kctx_info = &kctx->jctx.sched_info;
@@ -920,12 +921,12 @@ static inline void kbase_js_runpool_dec_context_count(
/* Track total contexts */
--(js_devdata->nr_all_contexts_running);
- KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
+ WARN_ON_ONCE(js_devdata->nr_all_contexts_running < 0);
if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
/* Track contexts that can submit jobs */
--(js_devdata->nr_user_contexts_running);
- KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);
+ WARN_ON_ONCE(js_devdata->nr_user_contexts_running < 0);
}
}
@@ -950,8 +951,8 @@ extern const base_jd_prio
kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
/**
- * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio)
- * to relative ordering
+ * kbasep_js_atom_prio_to_sched_prio - Convert atom priority (base_jd_prio)
+ * to relative ordering.
* @atom_prio: Priority ID to translate.
*
* Atom priority values for @ref base_jd_prio cannot be compared directly to
@@ -980,16 +981,33 @@ static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)
return kbasep_js_atom_priority_to_relative[atom_prio];
}
-static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio)
+/**
+ * kbasep_js_sched_prio_to_atom_prio - Convert relative scheduler priority
+ * to atom priority (base_jd_prio).
+ *
+ * @kbdev: Device pointer
+ * @sched_prio: Relative scheduler priority to translate.
+ *
+ * This function will convert relative scheduler priority back into base_jd_prio
+ * values. It takes values which priorities are monotonically increasing
+ * and converts them to the corresponding base_jd_prio values. If an invalid number is
+ * passed in (i.e. not within the expected range) an error code is returned instead.
+ *
+ * The mapping is 1:1 and the size of the valid input range is the same as the
+ * size of the valid output range, i.e.
+ * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS
+ *
+ * Return: On success: a value in the inclusive range
+ * 0..BASE_JD_NR_PRIO_LEVELS-1. On failure: BASE_JD_PRIO_INVALID.
+ */
+static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(struct kbase_device *kbdev,
+ int sched_prio)
{
- unsigned int prio_idx;
-
- KBASE_DEBUG_ASSERT(sched_prio >= 0 &&
- sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT);
-
- prio_idx = (unsigned int)sched_prio;
-
- return kbasep_js_relative_priority_to_atom[prio_idx];
+ if (likely(sched_prio >= 0 && sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT))
+ return kbasep_js_relative_priority_to_atom[sched_prio];
+ /* Invalid priority value if reached here */
+ dev_warn(kbdev->dev, "Unknown JS scheduling priority %d", sched_prio);
+ return BASE_JD_PRIO_INVALID;
}
/**
diff --git a/mali_kbase/jm/mali_kbase_js_defs.h b/mali_kbase/jm/mali_kbase_js_defs.h
index 652f383..924a685 100644
--- a/mali_kbase/jm/mali_kbase_js_defs.h
+++ b/mali_kbase/jm/mali_kbase_js_defs.h
@@ -387,7 +387,7 @@ struct kbasep_js_kctx_info {
* @sched_priority: priority
* @device_nr: Core group atom was executed on
*
- * Subset of atom state that can be available after jd_done_nolock() is called
+ * Subset of atom state that can be available after kbase_jd_done_nolock() is called
* on that atom. A copy must be taken via kbasep_js_atom_retained_state_copy(),
* because the original atom could disappear.
*/