summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuzanne Candanedo <suzanne.candanedo@arm.com>2023-12-23 06:56:05 +0000
committerMichael Stokes <mjstokes@google.com>2024-01-03 17:05:11 +0000
commitcfb55729953d62d99f66b0adc59963b189e9394b (patch)
tree1ade04332b8044de8169b599fbef7d4d514f8817
parent3b9d05436064ad14aa01f73d42e1c7408ab8b6b4 (diff)
downloadgpu-android-14.0.0_r0.66.tar.gz
This commit adds: - In Bifrost JM GPUs, L2 cache is explicitly flushed before the power down of shader cores and cores are powered down before L2 is powered down. - On older CSF GPUs, MCU will be halted before L2 is powered down and FW will flush the L2 on halt. Bug: 316204986 Test: Local SST Provenance: https://code.ipdelivery.arm.com/c/GPU/mali-ddk/+/6275 Signed-off-by: Michael Stokes <mjstokes@google.com> Change-Id: Ia8ba830cbaab6d9739c96e2a4851eef5a33f236e
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_backend.c4
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_defs.h3
-rw-r--r--mali_kbase/mali_base_hwconfig_issues.h54
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_hw_direct.c68
4 files changed, 120 insertions, 9 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
index 46c5ffd..311ce90 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
@@ -223,6 +223,10 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TURSEHW_1997) &&
kbdev->pm.backend.callback_power_runtime_gpu_active &&
kbdev->pm.backend.callback_power_runtime_gpu_idle;
+
+ kbdev->pm.backend.apply_hw_issue_TITANHW_2938_wa =
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TITANHW_2938) &&
+ kbdev->pm.backend.gpu_sleep_supported;
#endif
if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED)) {
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_defs.h b/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
index 66ca0b6..ad49019 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
@@ -347,6 +347,8 @@ struct kbase_pm_event_log {
* @callback_power_off_sc_rails: Callback invoked to turn off the shader core
* power rails. See &struct kbase_pm_callback_conf.
* @ca_cores_enabled: Cores that are currently available
+ * @apply_hw_issue_TITANHW_2938_wa: Indicates if the workaround for BASE_HW_ISSUE_TITANHW_2938
+ * needs to be applied when unmapping memory from GPU.
* @mcu_state: The current state of the micro-control unit, only applicable
* to GPUs that have such a component
* @l2_state: The current state of the L2 cache state machine. See
@@ -516,6 +518,7 @@ struct kbase_pm_backend_data {
u64 ca_cores_enabled;
#if MALI_USE_CSF
+ bool apply_hw_issue_TITANHW_2938_wa;
enum kbase_mcu_state mcu_state;
#endif
enum kbase_l2_core_state l2_state;
diff --git a/mali_kbase/mali_base_hwconfig_issues.h b/mali_kbase/mali_base_hwconfig_issues.h
index 91b9b83..003edda 100644
--- a/mali_kbase/mali_base_hwconfig_issues.h
+++ b/mali_kbase/mali_base_hwconfig_issues.h
@@ -67,6 +67,7 @@ enum base_hw_issue {
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -93,6 +94,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tMIx_r0p0
BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -115,6 +117,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tMIx_r0p0
BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -137,6 +140,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tMIx_r0p1
BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -154,6 +158,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tMI
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -169,6 +174,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p0
BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -184,6 +190,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p1
BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -199,6 +206,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p2
BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -213,6 +221,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tHEx_r0p3
BASE_HW_ISSUE_GPU2017_1336,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -225,6 +234,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tHE
BASE_HW_ISSUE_TSIX_2033,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -240,6 +250,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r0p0
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -255,6 +266,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r0p1
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -269,6 +281,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r1p0
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -282,6 +295,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tSIx_r1p1
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -294,6 +308,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tSI
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -307,6 +322,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tDVx_r0p0
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -319,6 +335,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tDV
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -333,6 +350,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tNOx_r0p0
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -345,6 +363,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tNO
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -359,6 +378,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tGOx_r0p0
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -373,6 +393,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tGOx_r1p0
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -385,6 +406,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tGO
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -403,6 +425,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTRx_r0p0
BASE_HW_ISSUE_TTRX_3485,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -421,6 +444,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTRx_r0p1
BASE_HW_ISSUE_TTRX_3485,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -438,6 +462,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTRx_r0p2
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -452,6 +477,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tTR
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -470,6 +496,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tNAx_r0p0
BASE_HW_ISSUE_TTRX_3485,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -487,6 +514,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tNAx_r0p1
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -501,6 +529,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tNA
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -517,6 +546,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r0p0
BASE_HW_ISSUE_TTRX_3485,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -532,6 +562,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r0p1
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -547,6 +578,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r1p0
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -562,6 +594,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBEx_r1p1
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -576,6 +609,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tBE
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -592,6 +626,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_lBEx_r1p0
BASE_HW_ISSUE_TTRX_3485,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -607,6 +642,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_lBEx_r1p1
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -622,6 +658,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBAx_r0p0
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -637,6 +674,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tBAx_r1p0
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -651,6 +689,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tBA
BASE_HW_ISSUE_TTRX_3464,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -662,6 +701,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tODx_r0p0
BASE_HW_ISSUE_GPU2019_3901,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -673,6 +713,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tOD
BASE_HW_ISSUE_GPU2019_3901,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -683,6 +724,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tGRx_r0p0
BASE_HW_ISSUE_GPU2019_3901,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -693,6 +735,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tGR
BASE_HW_ISSUE_GPU2019_3901,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -703,6 +746,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tVAx_r0p0
BASE_HW_ISSUE_GPU2019_3901,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -713,6 +757,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tVA
BASE_HW_ISSUE_GPU2019_3901,
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -727,6 +772,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r0p0
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -741,6 +787,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r0p1
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -754,6 +801,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tTU
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -767,6 +815,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r1p0
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -780,6 +829,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r1p1
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -793,6 +843,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r1p2
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -806,6 +857,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTUx_r1p3
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -817,6 +869,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_model_tTI
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
@@ -828,6 +881,7 @@ __attribute__((unused)) static const enum base_hw_issue base_hw_issues_tTIx_r0p0
BASE_HW_ISSUE_TITANHW_2710,
BASE_HW_ISSUE_TITANHW_2679,
BASE_HW_ISSUE_GPU2022PRO_148,
+ BASE_HW_ISSUE_TITANHW_2938,
BASE_HW_ISSUE_END
};
diff --git a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
index ca9f060..d5411bd 100644
--- a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
+++ b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
@@ -219,7 +219,40 @@ static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
return status;
}
-#if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_NO_MALI)
+#if MALI_USE_CSF
+static int wait_l2_power_trans_complete(struct kbase_device *kbdev)
+{
+ const ktime_t wait_loop_start = ktime_get_raw();
+ const u32 pwr_trans_wait_time_ms = kbdev->mmu_or_gpu_cache_op_wait_time_ms;
+ s64 diff;
+ u64 value;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ do {
+ unsigned int i;
+
+ for (i = 0; i < 1000; i++) {
+ value = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_PWRTRANS_HI));
+ value <<= 32;
+ value |= kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_PWRTRANS_LO));
+
+ if (!value)
+ return 0;
+ }
+
+ diff = ktime_to_ms(ktime_sub(ktime_get_raw(), wait_loop_start));
+ } while (diff < pwr_trans_wait_time_ms);
+
+ dev_warn(kbdev->dev, "L2_PWRTRANS %016llx set for too long", value);
+
+ if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_NONE))
+ kbase_reset_gpu_locked(kbdev);
+
+ return -ETIMEDOUT;
+}
+
+#if !IS_ENABLED(CONFIG_MALI_NO_MALI)
static int wait_cores_power_trans_complete(struct kbase_device *kbdev)
{
#define WAIT_TIMEOUT 50000 /* 50ms timeout */
@@ -308,7 +341,8 @@ static int apply_hw_issue_GPU2019_3901_wa(struct kbase_device *kbdev, u32 *mmu_c
return ret;
}
-#endif
+#endif /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
+#endif /* MALI_USE_CSF */
void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
{
@@ -535,6 +569,7 @@ static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
int ret;
u64 lock_addr = 0x0;
u32 mmu_cmd = AS_COMMAND_FLUSH_MEM;
+ const enum kbase_mmu_op_type flush_op = op_param->op;
if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
return -EINVAL;
@@ -542,15 +577,14 @@ static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
/* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
* this point would be unexpected.
*/
- if (op_param->op != KBASE_MMU_OP_FLUSH_PT &&
- op_param->op != KBASE_MMU_OP_FLUSH_MEM) {
+ if (flush_op != KBASE_MMU_OP_FLUSH_PT && flush_op != KBASE_MMU_OP_FLUSH_MEM) {
dev_err(kbdev->dev, "Unexpected flush operation received");
return -EINVAL;
}
lockdep_assert_held(&kbdev->mmu_hw_mutex);
- if (op_param->op == KBASE_MMU_OP_FLUSH_PT)
+ if (flush_op == KBASE_MMU_OP_FLUSH_PT)
mmu_cmd = AS_COMMAND_FLUSH_PT;
/* Lock the region that needs to be updated */
@@ -589,9 +623,16 @@ static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
if (likely(!ret))
ret = wait_ready(kbdev, as->number);
- if (likely(!ret))
+ if (likely(!ret)) {
mmu_command_instr(kbdev, op_param->kctx_id, mmu_cmd, lock_addr,
op_param->mmu_sync_info);
+#if MALI_USE_CSF
+ if (flush_op == KBASE_MMU_OP_FLUSH_MEM &&
+ kbdev->pm.backend.apply_hw_issue_TITANHW_2938_wa &&
+ kbdev->pm.backend.l2_state == KBASE_L2_PEND_OFF)
+ ret = wait_l2_power_trans_complete(kbdev);
+#endif
+ }
return ret;
}
@@ -615,6 +656,7 @@ int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_a
{
int ret, ret2;
u32 gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2_LSC;
+ const enum kbase_mmu_op_type flush_op = op_param->op;
if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
return -EINVAL;
@@ -622,8 +664,7 @@ int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_a
/* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
* this point would be unexpected.
*/
- if (op_param->op != KBASE_MMU_OP_FLUSH_PT &&
- op_param->op != KBASE_MMU_OP_FLUSH_MEM) {
+ if (flush_op != KBASE_MMU_OP_FLUSH_PT && flush_op != KBASE_MMU_OP_FLUSH_MEM) {
dev_err(kbdev->dev, "Unexpected flush operation received");
return -EINVAL;
}
@@ -631,7 +672,7 @@ int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_a
lockdep_assert_held(&kbdev->hwaccess_lock);
lockdep_assert_held(&kbdev->mmu_hw_mutex);
- if (op_param->op == KBASE_MMU_OP_FLUSH_PT)
+ if (flush_op == KBASE_MMU_OP_FLUSH_PT)
gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2;
/* 1. Issue MMU_AS_CONTROL.COMMAND.LOCK operation. */
@@ -645,6 +686,15 @@ int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_a
/* 3. Issue MMU_AS_CONTROL.COMMAND.UNLOCK operation. */
ret2 = kbase_mmu_hw_do_unlock_no_addr(kbdev, as, op_param);
+#if MALI_USE_CSF
+ if (!ret && !ret2) {
+ if (flush_op == KBASE_MMU_OP_FLUSH_MEM &&
+ kbdev->pm.backend.apply_hw_issue_TITANHW_2938_wa &&
+ kbdev->pm.backend.l2_state == KBASE_L2_PEND_OFF)
+ ret = wait_l2_power_trans_complete(kbdev);
+ }
+#endif
+
return ret ?: ret2;
}