summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu/mali_kbase_time.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/backend/gpu/mali_kbase_time.c')
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_time.c278
1 files changed, 226 insertions, 52 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_time.c b/mali_kbase/backend/gpu/mali_kbase_time.c
index a83206a..28365c0 100644
--- a/mali_kbase/backend/gpu/mali_kbase_time.c
+++ b/mali_kbase/backend/gpu/mali_kbase_time.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2014-2016, 2018-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -21,9 +21,47 @@
#include <mali_kbase.h>
#include <mali_kbase_hwaccess_time.h>
+#if MALI_USE_CSF
+#include <asm/arch_timer.h>
+#include <linux/gcd.h>
+#include <csf/mali_kbase_csf_timeout.h>
+#endif
#include <device/mali_kbase_device.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <mali_kbase_config_defaults.h>
+#include <linux/version_compat_defs.h>
+
+struct kbase_timeout_info {
+ char *selector_str;
+ u64 timeout_cycles;
+};
+
+#if MALI_USE_CSF
+static struct kbase_timeout_info timeout_info[KBASE_TIMEOUT_SELECTOR_COUNT] = {
+ [CSF_FIRMWARE_TIMEOUT] = { "CSF_FIRMWARE_TIMEOUT", MIN(CSF_FIRMWARE_TIMEOUT_CYCLES,
+ CSF_FIRMWARE_PING_TIMEOUT_CYCLES) },
+ [CSF_PM_TIMEOUT] = { "CSF_PM_TIMEOUT", CSF_PM_TIMEOUT_CYCLES },
+ [CSF_GPU_RESET_TIMEOUT] = { "CSF_GPU_RESET_TIMEOUT", CSF_GPU_RESET_TIMEOUT_CYCLES },
+ [CSF_CSG_SUSPEND_TIMEOUT] = { "CSF_CSG_SUSPEND_TIMEOUT", CSF_CSG_SUSPEND_TIMEOUT_CYCLES },
+ [CSF_FIRMWARE_BOOT_TIMEOUT] = { "CSF_FIRMWARE_BOOT_TIMEOUT",
+ CSF_FIRMWARE_BOOT_TIMEOUT_CYCLES },
+ [CSF_FIRMWARE_PING_TIMEOUT] = { "CSF_FIRMWARE_PING_TIMEOUT",
+ CSF_FIRMWARE_PING_TIMEOUT_CYCLES },
+ [CSF_SCHED_PROTM_PROGRESS_TIMEOUT] = { "CSF_SCHED_PROTM_PROGRESS_TIMEOUT",
+ DEFAULT_PROGRESS_TIMEOUT_CYCLES },
+ [MMU_AS_INACTIVE_WAIT_TIMEOUT] = { "MMU_AS_INACTIVE_WAIT_TIMEOUT",
+ MMU_AS_INACTIVE_WAIT_TIMEOUT_CYCLES },
+ [KCPU_FENCE_SIGNAL_TIMEOUT] = { "KCPU_FENCE_SIGNAL_TIMEOUT",
+ KCPU_FENCE_SIGNAL_TIMEOUT_CYCLES },
+};
+#else
+static struct kbase_timeout_info timeout_info[KBASE_TIMEOUT_SELECTOR_COUNT] = {
+ [MMU_AS_INACTIVE_WAIT_TIMEOUT] = { "MMU_AS_INACTIVE_WAIT_TIMEOUT",
+ MMU_AS_INACTIVE_WAIT_TIMEOUT_CYCLES },
+ [JM_DEFAULT_JS_FREE_TIMEOUT] = { "JM_DEFAULT_JS_FREE_TIMEOUT",
+ JM_DEFAULT_JS_FREE_TIMEOUT_CYCLES },
+};
+#endif
void kbase_backend_get_gpu_time_norequest(struct kbase_device *kbdev,
u64 *cycle_counter,
@@ -103,72 +141,132 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
#endif
}
-unsigned int kbase_get_timeout_ms(struct kbase_device *kbdev,
- enum kbase_timeout_selector selector)
+static u64 kbase_device_get_scaling_frequency(struct kbase_device *kbdev)
{
+ u64 freq_khz = kbdev->lowest_gpu_freq_khz;
+
+ if (!freq_khz) {
+ dev_dbg(kbdev->dev,
+ "Lowest frequency uninitialized! Using reference frequency for scaling");
+ return DEFAULT_REF_TIMEOUT_FREQ_KHZ;
+ }
+
+ return freq_khz;
+}
+
+void kbase_device_set_timeout_ms(struct kbase_device *kbdev, enum kbase_timeout_selector selector,
+ unsigned int timeout_ms)
+{
+ char *selector_str;
+
+ if (unlikely(selector >= KBASE_TIMEOUT_SELECTOR_COUNT)) {
+ selector = KBASE_DEFAULT_TIMEOUT;
+ selector_str = timeout_info[selector].selector_str;
+ dev_warn(kbdev->dev,
+ "Unknown timeout selector passed, falling back to default: %s\n",
+ timeout_info[selector].selector_str);
+ }
+ selector_str = timeout_info[selector].selector_str;
+
+ kbdev->backend_time.device_scaled_timeouts[selector] = timeout_ms;
+ dev_dbg(kbdev->dev, "\t%-35s: %ums\n", selector_str, timeout_ms);
+}
+
+void kbase_device_set_timeout(struct kbase_device *kbdev, enum kbase_timeout_selector selector,
+ u64 timeout_cycles, u32 cycle_multiplier)
+{
+ u64 final_cycles;
+ u64 timeout;
+ u64 freq_khz = kbase_device_get_scaling_frequency(kbdev);
+
+ if (unlikely(selector >= KBASE_TIMEOUT_SELECTOR_COUNT)) {
+ selector = KBASE_DEFAULT_TIMEOUT;
+ dev_warn(kbdev->dev,
+ "Unknown timeout selector passed, falling back to default: %s\n",
+ timeout_info[selector].selector_str);
+ }
+
+ /* If the multiplication overflows, we will have unsigned wrap-around, and so might
+ * end up with a shorter timeout. In those cases, we then want to have the largest
+ * timeout possible that will not run into these issues. Note that this will not
+ * wait for U64_MAX/frequency ms, as it will be clamped to a max of UINT_MAX
+ * milliseconds by subsequent steps.
+ */
+ if (check_mul_overflow(timeout_cycles, (u64)cycle_multiplier, &final_cycles))
+ final_cycles = U64_MAX;
+
/* Timeout calculation:
* dividing number of cycles by freq in KHz automatically gives value
* in milliseconds. nr_cycles will have to be multiplied by 1e3 to
* get result in microseconds, and 1e6 to get result in nanoseconds.
*/
+ timeout = div_u64(final_cycles, freq_khz);
- u64 timeout, nr_cycles = 0;
- /* Default value to mean 'no cap' */
- u64 timeout_cap = U64_MAX;
- u64 freq_khz = kbdev->lowest_gpu_freq_khz;
- /* Only for debug messages, safe default in case it's mis-maintained */
- const char *selector_str = "(unknown)";
+ if (unlikely(timeout > UINT_MAX)) {
+ dev_dbg(kbdev->dev,
+ "Capping excessive timeout %llums for %s at freq %llukHz to UINT_MAX ms",
+ timeout, timeout_info[selector].selector_str,
+ kbase_device_get_scaling_frequency(kbdev));
+ timeout = UINT_MAX;
+ }
- WARN_ON(!freq_khz);
+ kbase_device_set_timeout_ms(kbdev, selector, (unsigned int)timeout);
+}
- switch (selector) {
- case KBASE_TIMEOUT_SELECTOR_COUNT:
- default:
-#if !MALI_USE_CSF
- WARN(1, "Invalid timeout selector used! Using default value");
- nr_cycles = JM_DEFAULT_TIMEOUT_CYCLES;
- break;
-#else
- /* Use Firmware timeout if invalid selection */
- WARN(1,
- "Invalid timeout selector used! Using CSF Firmware timeout");
- fallthrough;
- case CSF_FIRMWARE_TIMEOUT:
- selector_str = "CSF_FIRMWARE_TIMEOUT";
- nr_cycles = CSF_FIRMWARE_TIMEOUT_CYCLES;
- /* Setup a cap on CSF FW timeout to FIRMWARE_PING_INTERVAL_MS,
- * if calculated timeout exceeds it. This should be adapted to
- * a direct timeout comparison once the
- * FIRMWARE_PING_INTERVAL_MS option is added to this timeout
- * function. A compile-time check such as BUILD_BUG_ON can also
- * be done once the firmware ping interval in cycles becomes
- * available as a macro.
+/**
+ * kbase_timeout_scaling_init - Initialize the table of scaled timeout
+ * values associated with a @kbase_device.
+ *
+ * @kbdev: KBase device pointer.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int kbase_timeout_scaling_init(struct kbase_device *kbdev)
+{
+ int err;
+ enum kbase_timeout_selector selector;
+
+ /* First, we initialize the minimum and maximum device frequencies, which
+ * are used to compute the timeouts.
+ */
+ err = kbase_pm_gpu_freq_init(kbdev);
+ if (unlikely(err < 0)) {
+ dev_dbg(kbdev->dev, "Could not initialize GPU frequency\n");
+ return err;
+ }
+
+ dev_dbg(kbdev->dev, "Scaling kbase timeouts:\n");
+ for (selector = 0; selector < KBASE_TIMEOUT_SELECTOR_COUNT; selector++) {
+ u32 cycle_multiplier = 1;
+ u64 nr_cycles = timeout_info[selector].timeout_cycles;
+#if MALI_USE_CSF
+ /* Special case: the scheduler progress timeout can be set manually,
+ * and does not have a canonical length defined in the headers. Hence,
+ * we query it once upon startup to get a baseline, and change it upon
+ * every invocation of the appropriate functions
*/
- timeout_cap = FIRMWARE_PING_INTERVAL_MS;
- break;
- case CSF_PM_TIMEOUT:
- selector_str = "CSF_PM_TIMEOUT";
- nr_cycles = CSF_PM_TIMEOUT_CYCLES;
- break;
- case CSF_GPU_RESET_TIMEOUT:
- selector_str = "CSF_GPU_RESET_TIMEOUT";
- nr_cycles = CSF_GPU_RESET_TIMEOUT_CYCLES;
- break;
+ if (selector == CSF_SCHED_PROTM_PROGRESS_TIMEOUT)
+ nr_cycles = kbase_csf_timeout_get(kbdev);
#endif
+
+ /* Since we are in control of the iteration bounds for the selector,
+ * we don't have to worry about bounds checking when setting the timeout.
+ */
+ kbase_device_set_timeout(kbdev, selector, nr_cycles, cycle_multiplier);
}
+ return 0;
+}
- timeout = div_u64(nr_cycles, freq_khz);
- if (timeout > timeout_cap) {
- dev_dbg(kbdev->dev, "Capped %s %llu to %llu", selector_str,
- (unsigned long long)timeout, (unsigned long long)timeout_cap);
- timeout = timeout_cap;
+unsigned int kbase_get_timeout_ms(struct kbase_device *kbdev, enum kbase_timeout_selector selector)
+{
+ if (unlikely(selector >= KBASE_TIMEOUT_SELECTOR_COUNT)) {
+ dev_warn(kbdev->dev, "Querying wrong selector, falling back to default\n");
+ selector = KBASE_DEFAULT_TIMEOUT;
}
- if (WARN(timeout > UINT_MAX,
- "Capping excessive timeout %llums for %s at freq %llukHz to UINT_MAX ms",
- (unsigned long long)timeout, selector_str, (unsigned long long)freq_khz))
- timeout = UINT_MAX;
- return (unsigned int)timeout;
+
+ return kbdev->backend_time.device_scaled_timeouts[selector];
}
+KBASE_EXPORT_TEST_API(kbase_get_timeout_ms);
u64 kbase_backend_get_cycle_cnt(struct kbase_device *kbdev)
{
@@ -186,3 +284,79 @@ u64 kbase_backend_get_cycle_cnt(struct kbase_device *kbdev)
return lo | (((u64) hi1) << 32);
}
+
+#if MALI_USE_CSF
+u64 __maybe_unused kbase_backend_time_convert_gpu_to_cpu(struct kbase_device *kbdev, u64 gpu_ts)
+{
+ if (WARN_ON(!kbdev))
+ return 0;
+
+ return div64_u64(gpu_ts * kbdev->backend_time.multiplier, kbdev->backend_time.divisor) +
+ kbdev->backend_time.offset;
+}
+
+/**
+ * get_cpu_gpu_time() - Get current CPU and GPU timestamps.
+ *
+ * @kbdev: Kbase device.
+ * @cpu_ts: Output CPU timestamp.
+ * @gpu_ts: Output GPU timestamp.
+ * @gpu_cycle: Output GPU cycle counts.
+ */
+static void get_cpu_gpu_time(struct kbase_device *kbdev, u64 *cpu_ts, u64 *gpu_ts, u64 *gpu_cycle)
+{
+ struct timespec64 ts;
+
+ kbase_backend_get_gpu_time(kbdev, gpu_cycle, gpu_ts, &ts);
+
+ if (cpu_ts)
+ *cpu_ts = ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+}
+#endif
+
+int kbase_backend_time_init(struct kbase_device *kbdev)
+{
+ int err = 0;
+#if MALI_USE_CSF
+ u64 cpu_ts = 0;
+ u64 gpu_ts = 0;
+ u64 freq;
+ u64 common_factor;
+
+ kbase_pm_register_access_enable(kbdev);
+ get_cpu_gpu_time(kbdev, &cpu_ts, &gpu_ts, NULL);
+ freq = arch_timer_get_cntfrq();
+
+ if (!freq) {
+ dev_warn(kbdev->dev, "arch_timer_get_rate() is zero!");
+ err = -EINVAL;
+ goto disable_registers;
+ }
+
+ common_factor = gcd(NSEC_PER_SEC, freq);
+
+ kbdev->backend_time.multiplier = div64_u64(NSEC_PER_SEC, common_factor);
+ kbdev->backend_time.divisor = div64_u64(freq, common_factor);
+
+ if (!kbdev->backend_time.divisor) {
+ dev_warn(kbdev->dev, "CPU to GPU divisor is zero!");
+ err = -EINVAL;
+ goto disable_registers;
+ }
+
+ kbdev->backend_time.offset = cpu_ts - div64_u64(gpu_ts * kbdev->backend_time.multiplier,
+ kbdev->backend_time.divisor);
+#endif
+
+ if (kbase_timeout_scaling_init(kbdev)) {
+ dev_warn(kbdev->dev, "Could not initialize timeout scaling");
+ err = -EINVAL;
+ }
+
+#if MALI_USE_CSF
+disable_registers:
+ kbase_pm_register_access_disable(kbdev);
+#endif
+
+ return err;
+}