summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
diff options
context:
space:
mode:
authorJack Diver <diverj@google.com>2022-09-02 11:38:04 +0000
committerJack Diver <diverj@google.com>2022-09-02 14:33:02 +0000
commitc30533582604fe0365bc3ce4e9e8e19dec3109da (patch)
tree2dc4d074c820b535e9f18b8cd81d7e91bff042e5 /mali_kbase/backend/gpu/mali_kbase_jm_rb.c
parent88d7d984fed1c2a4358ce2bbc334e82d71e3a391 (diff)
downloadgpu-c30533582604fe0365bc3ce4e9e8e19dec3109da.tar.gz
Mali Valhall Android DDK r38p1-01eac0
VX504X08X-BU-00000-r38p1-01eac0 - Valhall Android DDK VX504X08X-BU-60000-r38p1-01eac0 - Valhall Android Document Bundle VX504X08X-DC-11001-r38p1-01eac0 - Valhall Android DDK Software Errata VX504X08X-SW-99006-r38p1-01eac0 - Valhall Android Renderscript AOSP parts Signed-off-by: Jack Diver <diverj@google.com> Change-Id: I242060ad8ddc14475bda657cbbbe6b6c26ecfd57
Diffstat (limited to 'mali_kbase/backend/gpu/mali_kbase_jm_rb.c')
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_jm_rb.c102
1 files changed, 50 insertions, 52 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
index e1a298b..9960beb 100644
--- a/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
+++ b/mali_kbase/backend/gpu/mali_kbase_jm_rb.c
@@ -347,16 +347,35 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
katom->protected_state.exit !=
KBASE_ATOM_EXIT_PROTECTED_CHECK)
kbdev->protected_mode_transition = false;
+
+ /* If the atom is at KBASE_ATOM_ENTER_PROTECTED_HWCNT state, it means
+ * one of two events prevented it from progressing to the next state and
+ * ultimately reach protected mode:
+ * - hwcnts were enabled, and the atom had to schedule a worker to
+ * disable them.
+ * - the hwcnts were already disabled, but some other error occurred.
+ * In the first case, if the worker has not yet completed
+ * (kbdev->protected_mode_hwcnt_disabled == false), we need to re-enable
+ * them and signal to the worker they have already been enabled
+ */
+ if (kbase_jd_katom_is_protected(katom) &&
+ (katom->protected_state.enter == KBASE_ATOM_ENTER_PROTECTED_HWCNT)) {
+ kbdev->protected_mode_hwcnt_desired = true;
+ if (kbdev->protected_mode_hwcnt_disabled) {
+ kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+ kbdev->protected_mode_hwcnt_disabled = false;
+ }
+ }
+
/* If the atom has suspended hwcnt but has not yet entered
* protected mode, then resume hwcnt now. If the GPU is now in
* protected mode then hwcnt will be resumed by GPU reset so
* don't resume it here.
*/
if (kbase_jd_katom_is_protected(katom) &&
- ((katom->protected_state.enter ==
- KBASE_ATOM_ENTER_PROTECTED_IDLE_L2) ||
- (katom->protected_state.enter ==
- KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY))) {
+ ((katom->protected_state.enter == KBASE_ATOM_ENTER_PROTECTED_IDLE_L2) ||
+ (katom->protected_state.enter == KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY) ||
+ (katom->protected_state.enter == KBASE_ATOM_ENTER_PROTECTED_FINISHED))) {
WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
kbdev->protected_mode_hwcnt_desired = true;
if (kbdev->protected_mode_hwcnt_disabled) {
@@ -507,17 +526,14 @@ static int kbase_jm_protected_entry(struct kbase_device *kbdev,
KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev, kbdev);
if (err) {
/*
- * Failed to switch into protected mode, resume
- * GPU hwcnt and fail atom.
+ * Failed to switch into protected mode.
+ *
+ * At this point we expect:
+ * katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION &&
+ * katom->protected_state.enter = KBASE_ATOM_ENTER_PROTECTED_FINISHED
+ * ==>
+ * kbdev->protected_mode_hwcnt_disabled = false
*/
- WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
- kbdev->protected_mode_hwcnt_desired = true;
- if (kbdev->protected_mode_hwcnt_disabled) {
- kbase_hwcnt_context_enable(
- kbdev->hwcnt_gpu_ctx);
- kbdev->protected_mode_hwcnt_disabled = false;
- }
-
katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
/*
@@ -537,12 +553,9 @@ static int kbase_jm_protected_entry(struct kbase_device *kbdev,
/*
* Protected mode sanity checks.
*/
- KBASE_DEBUG_ASSERT_MSG(
- kbase_jd_katom_is_protected(katom[idx]) ==
- kbase_gpu_in_protected_mode(kbdev),
- "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
- kbase_jd_katom_is_protected(katom[idx]),
- kbase_gpu_in_protected_mode(kbdev));
+ WARN(kbase_jd_katom_is_protected(katom[idx]) != kbase_gpu_in_protected_mode(kbdev),
+ "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+ kbase_jd_katom_is_protected(katom[idx]), kbase_gpu_in_protected_mode(kbdev));
katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_READY;
@@ -952,18 +965,6 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
cores_ready = kbase_pm_cores_requested(kbdev,
true);
- if (katom[idx]->event_code ==
- BASE_JD_EVENT_PM_EVENT) {
- KBASE_KTRACE_ADD_JM_SLOT_INFO(
- kbdev, JM_MARK_FOR_RETURN_TO_JS,
- katom[idx]->kctx, katom[idx],
- katom[idx]->jc, js,
- katom[idx]->event_code);
- katom[idx]->gpu_rb_state =
- KBASE_ATOM_GPU_RB_RETURN_TO_JS;
- break;
- }
-
if (!cores_ready)
break;
@@ -1012,9 +1013,10 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
kbase_pm_request_gpu_cycle_counter_l2_is_on(
kbdev);
- kbase_job_hw_submit(kbdev, katom[idx], js);
- katom[idx]->gpu_rb_state =
- KBASE_ATOM_GPU_RB_SUBMITTED;
+ if (!kbase_job_hw_submit(kbdev, katom[idx], js))
+ katom[idx]->gpu_rb_state = KBASE_ATOM_GPU_RB_SUBMITTED;
+ else
+ break;
/* ***TRANSITION TO HIGHER STATE*** */
fallthrough;
@@ -1349,11 +1351,9 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
} else {
char js_string[16];
- trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
- js_string,
- sizeof(js_string)),
- ktime_to_ns(ktime_get()), 0, 0,
- 0);
+ trace_gpu_sched_switch(kbasep_make_job_slot_string(js, js_string,
+ sizeof(js_string)),
+ ktime_to_ns(ktime_get_raw()), 0, 0, 0);
}
}
#endif
@@ -1409,14 +1409,14 @@ void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
if (katom->protected_state.exit ==
KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT) {
/* protected mode sanity checks */
- KBASE_DEBUG_ASSERT_MSG(
- kbase_jd_katom_is_protected(katom) == kbase_gpu_in_protected_mode(kbdev),
- "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
- kbase_jd_katom_is_protected(katom), kbase_gpu_in_protected_mode(kbdev));
- KBASE_DEBUG_ASSERT_MSG(
- (kbase_jd_katom_is_protected(katom) && js == 0) ||
- !kbase_jd_katom_is_protected(katom),
- "Protected atom on JS%d not supported", js);
+ WARN(kbase_jd_katom_is_protected(katom) !=
+ kbase_gpu_in_protected_mode(kbdev),
+ "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+ kbase_jd_katom_is_protected(katom),
+ kbase_gpu_in_protected_mode(kbdev));
+ WARN(!(kbase_jd_katom_is_protected(katom) && js == 0) &&
+ kbase_jd_katom_is_protected(katom),
+ "Protected atom on JS%d not supported", js);
}
if ((katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED) &&
!kbase_ctx_flag(katom->kctx, KCTX_DYING))
@@ -1807,11 +1807,9 @@ void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
base_jd_core_req core_req)
{
if (!kbdev->pm.active_count) {
- mutex_lock(&kbdev->js_data.runpool_mutex);
- mutex_lock(&kbdev->pm.lock);
+ kbase_pm_lock(kbdev);
kbase_pm_update_active(kbdev);
- mutex_unlock(&kbdev->pm.lock);
- mutex_unlock(&kbdev->js_data.runpool_mutex);
+ kbase_pm_unlock(kbdev);
}
}