summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVamsidhar reddy Gaddam <gvamsi@google.com>2024-02-06 11:45:42 +0000
committerVamsidhar reddy Gaddam <gvamsi@google.com>2024-02-07 17:24:10 +0000
commit0781d08e1eb2fce0dc82582a41ecd183d75fad71 (patch)
treeba227cefdd74ebdb680ec0a81e94fdb5d5f7cdb4
parent88eb15d9c46c4992d178ce83216f97b8fb939cfe (diff)
downloadgpu-0781d08e1eb2fce0dc82582a41ecd183d75fad71.tar.gz
Remove IFPO from KMD
IFPO feature has been unusued and only creates overhead for smooth driver updates. Bug: 324019470 Change-Id: Iab65e95b9ba21d74cc158557e3fa78d4478c9a71 Signed-off-by: Vamsidhar reddy Gaddam <gvamsi@google.com>
-rw-r--r--mali_kbase/Kbuild5
-rw-r--r--mali_kbase/Kconfig9
-rw-r--r--mali_kbase/Makefile2
-rw-r--r--mali_kbase/Mconfig10
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_backend.c10
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_defs.h13
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_driver.c49
-rw-r--r--mali_kbase/build.bp3
-rw-r--r--mali_kbase/csf/mali_kbase_csf.c15
-rw-r--r--mali_kbase/csf/mali_kbase_csf.h1
-rw-r--r--mali_kbase/csf/mali_kbase_csf_defs.h25
-rw-r--r--mali_kbase/csf/mali_kbase_csf_firmware.c96
-rw-r--r--mali_kbase/csf/mali_kbase_csf_firmware_cfg.c34
-rw-r--r--mali_kbase/csf/mali_kbase_csf_firmware_cfg.h16
-rw-r--r--mali_kbase/csf/mali_kbase_csf_firmware_no_mali.c53
-rw-r--r--mali_kbase/csf/mali_kbase_csf_scheduler.c666
-rw-r--r--mali_kbase/csf/mali_kbase_csf_scheduler.h13
-rw-r--r--mali_kbase/debug/backend/mali_kbase_debug_ktrace_codes_csf.h8
-rw-r--r--mali_kbase/debug/backend/mali_kbase_debug_linux_ktrace_csf.h9
-rw-r--r--mali_kbase/debug/mali_kbase_debug_ktrace_codes.h4
-rw-r--r--mali_kbase/debug/mali_kbase_debug_linux_ktrace.h4
-rw-r--r--mali_kbase/device/backend/mali_kbase_device_hw_csf.c38
-rw-r--r--mali_kbase/mali_kbase_config.h18
-rw-r--r--mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c16
-rw-r--r--mali_kbase/platform/meson/mali_kbase_runtime_pm.c17
-rw-r--r--mali_kbase/platform/pixel/mali_kbase_config_platform.h3
-rw-r--r--mali_kbase/platform/pixel/pixel_gpu.c36
-rw-r--r--mali_kbase/platform/pixel/pixel_gpu_power.c94
-rw-r--r--mali_kbase/platform/pixel/pixel_gpu_sysfs.c54
29 files changed, 37 insertions, 1284 deletions
diff --git a/mali_kbase/Kbuild b/mali_kbase/Kbuild
index a28fc11..2775ef8 100644
--- a/mali_kbase/Kbuild
+++ b/mali_kbase/Kbuild
@@ -94,11 +94,6 @@ ifeq ($(CONFIG_MALI_CSF_SUPPORT),y)
MALI_JIT_PRESSURE_LIMIT_BASE = 0
MALI_USE_CSF = 1
ccflags-y += -DCONFIG_MALI_PIXEL_GPU_SSCD
-ifeq ($(CONFIG_SOC_GS201),y)
-ifeq ($(CONFIG_MALI_HOST_CONTROLS_SC_RAILS),y)
- ccflags-y += -DCONFIG_MALI_HOST_CONTROLS_SC_RAILS
-endif
-endif
else
MALI_JIT_PRESSURE_LIMIT_BASE ?= 1
MALI_USE_CSF ?= 0
diff --git a/mali_kbase/Kconfig b/mali_kbase/Kconfig
index 0ea576e..c49c49b 100644
--- a/mali_kbase/Kconfig
+++ b/mali_kbase/Kconfig
@@ -376,15 +376,6 @@ config MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE
tree using the property, opp-mali-errata-1485982. Otherwise the
slowest clock will be selected.
-config MALI_HOST_CONTROLS_SC_RAILS
- bool "Enable Host based control of the shader core power rails"
- depends on MALI_CSF_SUPPORT
- default n
- help
- This option enables the Host based control of the power rails for
- shader cores. It is recommended to use PDCA (Power Domain Control
- Adapter) inside the GPU to handshake with SoC PMU to control the
- power of cores.
endif
config MALI_ARBITRATION
diff --git a/mali_kbase/Makefile b/mali_kbase/Makefile
index beecca2..9259b89 100644
--- a/mali_kbase/Makefile
+++ b/mali_kbase/Makefile
@@ -131,7 +131,6 @@ ifeq ($(MALI_KCONFIG_EXT_PREFIX),)
CONFIG_MALI_ERROR_INJECT = n
CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED = n
CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE = n
- CONFIG_MALI_HOST_CONTROLS_SC_RAILS = n
CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS = n
CONFIG_MALI_DEBUG = n
CONFIG_MALI_MIDGARD_ENABLE_TRACE = n
@@ -193,7 +192,6 @@ ifeq ($(MALI_KCONFIG_EXT_PREFIX),)
CONFIG_MALI_ERROR_INJECT \
CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED \
CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE \
- CONFIG_MALI_HOST_CONTROLS_SC_RAILS \
CONFIG_MALI_PRFCNT_SET_PRIMARY \
CONFIG_MALI_PRFCNT_SET_SECONDARY \
CONFIG_MALI_PRFCNT_SET_TERTIARY \
diff --git a/mali_kbase/Mconfig b/mali_kbase/Mconfig
index 342ac8c..b3c5323 100644
--- a/mali_kbase/Mconfig
+++ b/mali_kbase/Mconfig
@@ -352,16 +352,6 @@ config MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE
tree using the property, opp-mali-errata-1485982. Otherwise the
slowest clock will be selected.
-config MALI_HOST_CONTROLS_SC_RAILS
- bool "Enable Host based control of the shader core power rails"
- depends on MALI_EXPERT && MALI_CSF_SUPPORT
- default n
- help
- This option enables the Host based control of the power rails for
- shader cores. It is recommended to use PDCA (Power Domain Control
- Adapter) inside the GPU to handshake with SoC PMU to control the
- power of cores.
-
config MALI_TRACE_POWER_GPU_WORK_PERIOD
bool "Enable per-application GPU metrics tracepoints"
depends on MALI_MIDGARD
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
index 0cb205b..d8d55f3 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
@@ -75,12 +75,6 @@ int kbase_pm_runtime_init(struct kbase_device *kbdev)
callbacks->power_runtime_gpu_idle_callback;
kbdev->pm.backend.callback_power_runtime_gpu_active =
callbacks->power_runtime_gpu_active_callback;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- kbdev->pm.backend.callback_power_on_sc_rails =
- callbacks->power_on_sc_rails_callback;
- kbdev->pm.backend.callback_power_off_sc_rails =
- callbacks->power_off_sc_rails_callback;
-#endif
if (callbacks->power_runtime_init_callback)
return callbacks->power_runtime_init_callback(kbdev);
@@ -101,10 +95,6 @@ int kbase_pm_runtime_init(struct kbase_device *kbdev)
kbdev->pm.backend.callback_hardware_reset = NULL;
kbdev->pm.backend.callback_power_runtime_gpu_idle = NULL;
kbdev->pm.backend.callback_power_runtime_gpu_active = NULL;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- kbdev->pm.backend.callback_power_on_sc_rails = NULL;
- kbdev->pm.backend.callback_power_off_sc_rails = NULL;
-#endif
return 0;
}
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_defs.h b/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
index 9b17092..edec761 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_defs.h
@@ -361,10 +361,6 @@ struct kbase_pm_event_log {
* @callback_power_runtime_gpu_idle was
* called previously.
* See &struct kbase_pm_callback_conf.
- * @callback_power_on_sc_rails: Callback invoked to turn on the shader core
- * power rails. See &struct kbase_pm_callback_conf.
- * @callback_power_off_sc_rails: Callback invoked to turn off the shader core
- * power rails. See &struct kbase_pm_callback_conf.
* @ca_cores_enabled: Cores that are currently available
* @apply_hw_issue_TITANHW_2938_wa: Indicates if the workaround for BASE_HW_ISSUE_TITANHW_2938
* needs to be applied when unmapping memory from GPU.
@@ -536,10 +532,6 @@ struct kbase_pm_backend_data {
void (*callback_hardware_reset)(struct kbase_device *kbdev);
void (*callback_power_runtime_gpu_idle)(struct kbase_device *kbdev);
void (*callback_power_runtime_gpu_active)(struct kbase_device *kbdev);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- void (*callback_power_on_sc_rails)(struct kbase_device *kbdev);
- void (*callback_power_off_sc_rails)(struct kbase_device *kbdev);
-#endif
u64 ca_cores_enabled;
@@ -558,11 +550,6 @@ struct kbase_pm_backend_data {
struct mutex policy_change_lock;
struct workqueue_struct *core_idle_wq;
struct work_struct core_idle_work;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- struct work_struct sc_rails_on_work;
- bool sc_power_rails_off;
- bool sc_pwroff_safe;
-#endif
#ifdef KBASE_PM_RUNTIME
bool gpu_sleep_supported;
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
index 142f819..4637861 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
@@ -802,10 +802,6 @@ static void kbasep_pm_toggle_power_interrupt(struct kbase_device *kbdev, bool en
irq_mask = kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(GPU_IRQ_MASK));
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /* For IFPO, we require the POWER_CHANGED_ALL interrupt to be always on */
- enable = true;
-#endif
if (enable) {
irq_mask |= POWER_CHANGED_ALL;
kbase_reg_write32(kbdev, GPU_CONTROL_ENUM(GPU_IRQ_CLEAR), POWER_CHANGED_ALL);
@@ -938,14 +934,6 @@ static int kbase_pm_mcu_update_state(struct kbase_device *kbdev)
backend->shaders_avail = backend->shaders_desired_mask;
backend->pm_shaders_core_mask = 0;
if (kbdev->csf.firmware_hctl_core_pwr) {
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /* On rail up, this state machine will be re-invoked */
- if (backend->sc_power_rails_off) {
- /* The work should already be queued or executing */
- WARN_ON(!work_busy(&backend->sc_rails_on_work));
- break;
- }
-#endif
kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
backend->shaders_avail, ACTION_PWRON);
backend->mcu_state = KBASE_MCU_HCTL_SHADERS_PEND_ON;
@@ -1311,31 +1299,6 @@ static void core_idle_worker(struct work_struct *work)
}
#endif
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-static void sc_rails_on_worker(struct work_struct *work)
-{
- struct kbase_device *kbdev =
- container_of(work, struct kbase_device, pm.backend.sc_rails_on_work);
- unsigned long flags;
-
- /*
- * Intentionally not synchronized using the scheduler.lock, as the scheduler may be waiting
- * on the SC rail to power up
- */
- kbase_pm_lock(kbdev);
-
- kbase_pm_turn_on_sc_power_rails_locked(kbdev);
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- /* Push the state machine forward in case it was waiting on SC rail power up */
- kbase_pm_update_state(kbdev);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
- kbase_pm_unlock(kbdev);
-}
-#endif /* CONFIG_MALI_HOST_CONTROLS_SC_RAILS */
-
-
static const char *kbase_l2_core_state_to_string(enum kbase_l2_core_state state)
{
const char *const strings[] = {
@@ -2475,9 +2438,6 @@ int kbase_pm_state_machine_init(struct kbase_device *kbdev)
}
INIT_WORK(&kbdev->pm.backend.core_idle_work, core_idle_worker);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- INIT_WORK(&kbdev->pm.backend.sc_rails_on_work, sc_rails_on_worker);
-#endif
#endif
return 0;
@@ -2591,10 +2551,6 @@ void kbase_gpu_timeout_debug_message(struct kbase_device *kbdev, const char *tim
kbase_pm_is_l2_desired(kbdev), kbdev->pm.backend.policy_change_clamp_state_to_off);
dev_err(kbdev->dev, "\tL2 sw state = %d\n",
kbdev->pm.backend.l2_state);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- dev_err(kbdev->dev, "\tbackend.sc_power_rails_off = %d\n",
- kbdev->pm.backend.sc_power_rails_off);
-#endif
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
#endif
dev_err(kbdev->dev, "Current state :\n");
@@ -3642,11 +3598,6 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
kbdev->pm.backend.gpu_powered = true;
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /* Ensure the SC rail is up otherwise the FW will get stuck during reset */
- kbase_pm_turn_on_sc_power_rails_locked(kbdev);
-#endif
-
/* Ensure interrupts are off to begin with, this also clears any
* outstanding interrupts
diff --git a/mali_kbase/build.bp b/mali_kbase/build.bp
index a0bb99e..77e193a 100644
--- a/mali_kbase/build.bp
+++ b/mali_kbase/build.bp
@@ -125,9 +125,6 @@ bob_defaults {
mali_hw_errata_1485982_use_clock_alternative: {
kbuild_options: ["CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE=y"],
},
- mali_host_controls_sc_rails: {
- kbuild_options: ["CONFIG_MALI_HOST_CONTROLS_SC_RAILS=y"],
- },
platform_is_fpga: {
kbuild_options: ["CONFIG_MALI_IS_FPGA=y"],
},
diff --git a/mali_kbase/csf/mali_kbase_csf.c b/mali_kbase/csf/mali_kbase_csf.c
index 7fcd45e..6ebbd1e 100644
--- a/mali_kbase/csf/mali_kbase_csf.c
+++ b/mali_kbase/csf/mali_kbase_csf.c
@@ -1985,13 +1985,6 @@ static void kbase_queue_oom_event(struct kbase_queue *const queue)
kbase_csf_scheduler_lock(kbdev);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- if (kbdev->csf.scheduler.sc_power_rails_off) {
- dev_warn(kctx->kbdev->dev, "SC power rails off unexpectedly when handling OoM event");
- goto unlock;
- }
-#endif
-
slot_num = kbase_csf_scheduler_group_get_slot(group);
/* The group could have gone off slot before this work item got
@@ -3030,17 +3023,9 @@ void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val)
/* Handle IDLE Hysteresis notification event */
if ((glb_req ^ glb_ack) & GLB_REQ_IDLE_EVENT_MASK) {
dev_dbg(kbdev->dev, "Idle-hysteresis event flagged");
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- if (kbase_csf_scheduler_process_gpu_idle_event(kbdev)) {
- kbase_csf_firmware_global_input_mask(
- global_iface, GLB_REQ, glb_ack,
- GLB_REQ_IDLE_EVENT_MASK);
- }
-#else
kbase_csf_firmware_global_input_mask(
global_iface, GLB_REQ, glb_ack,
GLB_REQ_IDLE_EVENT_MASK);
-#endif
glb_idle_irq_received = true;
/* Defer handling this IRQ to account for a race condition
diff --git a/mali_kbase/csf/mali_kbase_csf.h b/mali_kbase/csf/mali_kbase_csf.h
index e47d18f..8e067cb 100644
--- a/mali_kbase/csf/mali_kbase_csf.h
+++ b/mali_kbase/csf/mali_kbase_csf.h
@@ -49,7 +49,6 @@
#define KBASEP_TICK_PROTM_PEND_SCAN_SEQ_NR_INVALID (U32_MAX)
/* 60ms optimizes power while minimizing latency impact for UI test cases. */
-#define MALI_HOST_CONTROLS_SC_RAILS_IDLE_TIMER_NS (600 * 1000)
#define FIRMWARE_IDLE_HYSTERESIS_TIME_NS (60 * 1000 * 1000) /* Default 60 milliseconds */
/* Idle hysteresis time can be scaled down when GPU sleep feature is used */
diff --git a/mali_kbase/csf/mali_kbase_csf_defs.h b/mali_kbase/csf/mali_kbase_csf_defs.h
index 082a214..018bf89 100644
--- a/mali_kbase/csf/mali_kbase_csf_defs.h
+++ b/mali_kbase/csf/mali_kbase_csf_defs.h
@@ -1092,21 +1092,6 @@ struct kbase_csf_mcu_shared_regions {
* protected mode execution compared to other such
* groups. It is updated on every tick/tock.
* @interrupt_lock is used to serialize the access.
- * @sc_rails_off_work: Work item enqueued on GPU idle notification to
- * turn off the shader core power rails.
- * @sc_power_rails_off: Flag to keep a track of the status of shader core
- * power rails, set to true when power rails are
- * turned off.
- * @gpu_idle_work_pending: Flag to indicate that the power down of GPU is
- * pending and it is set after turning off the
- * shader core power rails. The power down is skipped
- * if the flag is cleared. @lock is used to serialize
- * the access. Scheduling actions are skipped whilst
- * this flag is set.
- * @gpu_idle_fw_timer_enabled: Flag to keep a track if GPU idle event reporting
- * is disabled on FW side. It is set for the power
- * policy where the power managment of shader cores
- * needs to be done by the Host.
* @csg_scan_sched_count: Scheduling action counter used to assign the sched_act_seq_num
* for each group added to Scheduler's schedulable list in a
* tick/tock.
@@ -1151,11 +1136,7 @@ struct kbase_csf_scheduler {
struct kbase_context *top_kctx;
struct kbase_queue_group *top_grp;
struct kbase_queue_group *active_protm_grp;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- struct delayed_work gpu_idle_work;
-#else
struct work_struct gpu_idle_work;
-#endif
struct workqueue_struct *idle_wq;
bool fast_gpu_idle_handling;
atomic_t gpu_no_longer_idle;
@@ -1164,12 +1145,6 @@ struct kbase_csf_scheduler {
u32 pm_active_count;
unsigned int csg_scheduling_period_ms;
u32 tick_protm_pending_seq;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- struct work_struct sc_rails_off_work;
- bool sc_power_rails_off;
- bool gpu_idle_work_pending;
- bool gpu_idle_fw_timer_enabled;
-#endif
u32 csg_scan_sched_count;
ktime_t protm_enter_time;
struct kbase_csf_sched_heap_reclaim_mgr reclaim_mgr;
diff --git a/mali_kbase/csf/mali_kbase_csf_firmware.c b/mali_kbase/csf/mali_kbase_csf_firmware.c
index 407a06e..2d56d03 100644
--- a/mali_kbase/csf/mali_kbase_csf_firmware.c
+++ b/mali_kbase/csf/mali_kbase_csf_firmware.c
@@ -1774,13 +1774,6 @@ static void global_init(struct kbase_device *const kbdev, u64 core_mask)
const struct kbase_csf_global_iface *const global_iface = &kbdev->csf.global_iface;
unsigned long flags;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /* If the power_policy will grant host control over FW PM, we need to turn on the SC rail*/
- if (kbdev->csf.firmware_hctl_core_pwr) {
- queue_work(system_highpri_wq, &kbdev->pm.backend.sc_rails_on_work);
- }
-#endif
-
kbase_csf_scheduler_spin_lock(kbdev, &flags);
kbasep_enable_rtu(kbdev);
@@ -1789,13 +1782,11 @@ static void global_init(struct kbase_device *const kbdev, u64 core_mask)
enable_endpoints_global(global_iface, core_mask);
set_shader_poweroff_timer(kbdev, global_iface);
-#ifndef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
/* The GPU idle timer is always enabled for simplicity. Checks will be
* done before scheduling the GPU idle worker to see if it is
* appropriate for the current power policy.
*/
enable_gpu_idle_timer(kbdev);
-#endif
set_timeout_global(global_iface, kbase_csf_timeout_get(kbdev));
@@ -1867,9 +1858,7 @@ void kbase_csf_firmware_global_reinit(struct kbase_device *kbdev, u64 core_mask)
bool kbase_csf_firmware_global_reinit_complete(struct kbase_device *kbdev)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
-#ifndef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
WARN_ON(!kbdev->csf.glb_init_request_pending);
-#endif
if (global_request_complete(kbdev, CSF_GLB_REQ_CFG_MASK))
kbdev->csf.glb_init_request_pending = false;
@@ -1936,12 +1925,6 @@ static void kbase_csf_firmware_reload_worker(struct work_struct *work)
if (WARN_ON(err))
return;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- err = kbase_csf_firmware_cfg_enable_host_ctrl_sc_rails(kbdev);
- if (WARN_ON(err))
- return;
-#endif
-
err = kbase_csf_firmware_cfg_fw_wa_enable(kbdev);
if (WARN_ON(err))
return;
@@ -2052,11 +2035,7 @@ u32 kbase_csf_firmware_set_gpu_idle_hysteresis_time(struct kbase_device *kbdev,
unsigned long flags;
u32 no_modifier = 0;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- const u32 hysteresis_val = convert_dur_to_idle_count(kbdev, MALI_HOST_CONTROLS_SC_RAILS_IDLE_TIMER_NS, &no_modifier);
-#else
const u32 hysteresis_val = convert_dur_to_idle_count(kbdev, dur_ns, &no_modifier);
-#endif
/* The 'fw_load_lock' is taken to synchronize against the deferred
* loading of FW, where the idle timer will be enabled.
@@ -2089,7 +2068,6 @@ u32 kbase_csf_firmware_set_gpu_idle_hysteresis_time(struct kbase_device *kbdev,
return kbdev->csf.gpu_idle_dur_count;
}
-#ifndef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
/* The 'reg_lock' is also taken and is held till the update is not
* complete, to ensure the update of idle timer value by multiple Users
* gets serialized.
@@ -2098,43 +2076,24 @@ u32 kbase_csf_firmware_set_gpu_idle_hysteresis_time(struct kbase_device *kbdev,
/* The firmware only reads the new idle timer value when the timer is
* disabled.
*/
-#endif
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- kbase_csf_scheduler_lock(kbdev);
- if (kbdev->csf.scheduler.gpu_idle_fw_timer_enabled) {
-#endif
- /* The firmware only reads the new idle timer value when the timer is
- * disabled.
- */
- kbase_csf_scheduler_spin_lock(kbdev, &flags);
- kbase_csf_firmware_disable_gpu_idle_timer(kbdev);
- kbase_csf_scheduler_spin_unlock(kbdev, flags);
- /* Ensure that the request has taken effect */
- wait_for_global_request(kbdev, GLB_REQ_IDLE_DISABLE_MASK);
+ /* The firmware only reads the new idle timer value when the timer is
+ * disabled.
+ */
+ kbase_csf_scheduler_spin_lock(kbdev, &flags);
+ kbase_csf_firmware_disable_gpu_idle_timer(kbdev);
+ kbase_csf_scheduler_spin_unlock(kbdev, flags);
+ /* Ensure that the request has taken effect */
+ wait_for_global_request(kbdev, GLB_REQ_IDLE_DISABLE_MASK);
- kbase_csf_scheduler_spin_lock(kbdev, &flags);
- kbdev->csf.gpu_idle_hysteresis_ns = dur_ns;
- kbdev->csf.gpu_idle_dur_count = hysteresis_val;
- kbdev->csf.gpu_idle_dur_count_no_modifier = no_modifier;
- kbase_csf_firmware_enable_gpu_idle_timer(kbdev);
- kbase_csf_scheduler_spin_unlock(kbdev, flags);
- wait_for_global_request(kbdev, GLB_REQ_IDLE_ENABLE_MASK);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- } else {
- /* Record the new values. Would be used later when timer is
- * enabled
- */
- kbase_csf_scheduler_spin_lock(kbdev, &flags);
- kbdev->csf.gpu_idle_hysteresis_ns = dur_ns;
- kbdev->csf.gpu_idle_dur_count = hysteresis_val;
- kbdev->csf.gpu_idle_dur_count_no_modifier = no_modifier;
- kbase_csf_scheduler_spin_unlock(kbdev, flags);
- }
- kbase_csf_scheduler_unlock(kbdev);
-#else
+ kbase_csf_scheduler_spin_lock(kbdev, &flags);
+ kbdev->csf.gpu_idle_hysteresis_ns = dur_ns;
+ kbdev->csf.gpu_idle_dur_count = hysteresis_val;
+ kbdev->csf.gpu_idle_dur_count_no_modifier = no_modifier;
+ kbase_csf_firmware_enable_gpu_idle_timer(kbdev);
+ kbase_csf_scheduler_spin_unlock(kbdev, flags);
+ wait_for_global_request(kbdev, GLB_REQ_IDLE_ENABLE_MASK);
mutex_unlock(&kbdev->csf.reg_lock);
-#endif
dev_dbg(kbdev->dev, "GPU suspend timeout updated: 0x%lld ns (0x%.8x)",
kbdev->csf.gpu_idle_hysteresis_ns,
@@ -2324,17 +2283,10 @@ int kbase_csf_firmware_early_init(struct kbase_device *kbdev)
init_waitqueue_head(&kbdev->csf.event_wait);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /* Set to the lowest possible value for FW to immediately write
- * to the power off register to disable the cores.
- */
- kbdev->csf.mcu_core_pwroff_dur_count = 1;
-#else
kbdev->csf.mcu_core_pwroff_dur_ns = DEFAULT_GLB_PWROFF_TIMEOUT_NS;
kbdev->csf.mcu_core_pwroff_dur_count = convert_dur_to_core_pwroff_count(
kbdev, DEFAULT_GLB_PWROFF_TIMEOUT_NS, &no_modifier);
kbdev->csf.mcu_core_pwroff_dur_count_no_modifier = no_modifier;
-#endif
kbase_csf_firmware_reset_mcu_core_pwroff_time(kbdev);
INIT_LIST_HEAD(&kbdev->csf.firmware_interfaces);
@@ -2375,15 +2327,6 @@ int kbase_csf_firmware_late_init(struct kbase_device *kbdev)
#endif
WARN_ON(!kbdev->csf.gpu_idle_hysteresis_ns);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- kbdev->csf.gpu_idle_dur_count = convert_dur_to_idle_count(
- kbdev, MALI_HOST_CONTROLS_SC_RAILS_IDLE_TIMER_NS, &no_modifier);
-
- /* Set to the lowest possible value for FW to immediately write
- * to the power off register to disable the cores.
- */
- kbdev->csf.mcu_core_pwroff_dur_count = 1;
-#else
kbdev->csf.gpu_idle_dur_count = convert_dur_to_idle_count(
kbdev, kbdev->csf.gpu_idle_hysteresis_ns, &no_modifier);
kbdev->csf.gpu_idle_dur_count_no_modifier = no_modifier;
@@ -2391,7 +2334,6 @@ int kbase_csf_firmware_late_init(struct kbase_device *kbdev)
kbdev->csf.mcu_core_pwroff_dur_count = convert_dur_to_core_pwroff_count(
kbdev, DEFAULT_GLB_PWROFF_TIMEOUT_NS, &no_modifier);
kbdev->csf.mcu_core_pwroff_dur_count_no_modifier = no_modifier;
-#endif
return 0;
}
@@ -2566,14 +2508,6 @@ int kbase_csf_firmware_load_init(struct kbase_device *kbdev)
goto err_out;
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- ret = kbase_csf_firmware_cfg_enable_host_ctrl_sc_rails(kbdev);
- if (ret != 0) {
- dev_err(kbdev->dev, "Failed to enable SC PM WA");
- goto error;
- }
-#endif
-
ret = kbase_csf_firmware_cfg_fw_wa_init(kbdev);
if (ret != 0) {
dev_err(kbdev->dev, "Failed to initialize firmware workarounds");
diff --git a/mali_kbase/csf/mali_kbase_csf_firmware_cfg.c b/mali_kbase/csf/mali_kbase_csf_firmware_cfg.c
index 96a1481..70c70d2 100644
--- a/mali_kbase/csf/mali_kbase_csf_firmware_cfg.c
+++ b/mali_kbase/csf/mali_kbase_csf_firmware_cfg.c
@@ -31,10 +31,6 @@
#define CSF_FIRMWARE_CFG_LOG_VERBOSITY_ENTRY_NAME "Log verbosity"
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-#define HOST_CONTROLS_SC_RAILS_CFG_ENTRY_NAME "Host controls SC rails"
-#endif
-
#define CSF_FIRMWARE_CFG_WA_CFG0_ENTRY_NAME "WA_CFG0"
/**
@@ -138,11 +134,6 @@ static ssize_t store_fw_cfg(struct kobject *kobj, struct attribute *attr, const
return -EINVAL;
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- if (!strcmp(config->name,
- HOST_CONTROLS_SC_RAILS_CFG_ENTRY_NAME))
- return -EPERM;
-#endif
if (!strcmp(config->name, CSF_FIRMWARE_CFG_WA_CFG0_ENTRY_NAME))
return -EPERM;
@@ -380,24 +371,6 @@ int kbase_csf_firmware_cfg_fw_wa_enable(struct kbase_device *kbdev)
return -ENOENT;
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-int kbase_csf_firmware_cfg_enable_host_ctrl_sc_rails(struct kbase_device *kbdev)
-{
- struct firmware_config *config;
-
- list_for_each_entry(config, &kbdev->csf.firmware_config, node) {
- if (strcmp(config->name,
- HOST_CONTROLS_SC_RAILS_CFG_ENTRY_NAME))
- continue;
-
- kbase_csf_update_firmware_memory(kbdev, config->address, 1);
- return 0;
- }
-
- return -ENOENT;
-}
-#endif
-
int kbase_csf_firmware_cfg_fw_wa_init(struct kbase_device *kbdev)
{
int ret;
@@ -465,13 +438,6 @@ int kbase_csf_firmware_cfg_option_entry_parse(struct kbase_device *kbdev,
return 0;
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-int kbase_csf_firmware_cfg_enable_host_ctrl_sc_rails(struct kbase_device *kbdev)
-{
- return 0;
-}
-#endif
-
int kbase_csf_firmware_cfg_fw_wa_enable(struct kbase_device *kbdev)
{
return 0;
diff --git a/mali_kbase/csf/mali_kbase_csf_firmware_cfg.h b/mali_kbase/csf/mali_kbase_csf_firmware_cfg.h
index f565290..6dee708 100644
--- a/mali_kbase/csf/mali_kbase_csf_firmware_cfg.h
+++ b/mali_kbase/csf/mali_kbase_csf_firmware_cfg.h
@@ -70,22 +70,6 @@ int kbase_csf_firmware_cfg_option_entry_parse(struct kbase_device *kbdev,
const struct kbase_csf_mcu_fw *const fw,
const u32 *entry, unsigned int size, bool updatable);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-/**
- * kbase_csf_firmware_cfg_enable_host_ctrl_sc_rails() - Enable the config in FW to support
- * Host based control of SC power rails
- *
- * Look for the config entry that enables support in FW for the Host based
- * control of shader core power rails and set it before the intial boot
- * or reload of firmware.
- *
- * @kbdev: Kbase device structure
- *
- * Return: 0 if successful, negative error code on failure
- */
-int kbase_csf_firmware_cfg_enable_host_ctrl_sc_rails(struct kbase_device *kbdev);
-#endif
-
/**
* kbase_csf_firmware_cfg_find_config_address() - Get a FW config option address
*
diff --git a/mali_kbase/csf/mali_kbase_csf_firmware_no_mali.c b/mali_kbase/csf/mali_kbase_csf_firmware_no_mali.c
index 5cdc1bd..8edf031 100644
--- a/mali_kbase/csf/mali_kbase_csf_firmware_no_mali.c
+++ b/mali_kbase/csf/mali_kbase_csf_firmware_no_mali.c
@@ -768,14 +768,11 @@ static void global_init(struct kbase_device *const kbdev, u64 core_mask)
set_timeout_global(global_iface, kbase_csf_timeout_get(kbdev));
-#ifndef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
/* The GPU idle timer is always enabled for simplicity. Checks will be
* done before scheduling the GPU idle worker to see if it is
* appropriate for the current power policy.
*/
enable_gpu_idle_timer(kbdev);
-#endif
-
/* Unmask the interrupts */
kbase_csf_firmware_global_input(global_iface, GLB_ACK_IRQ_MASK, ack_irq_mask);
@@ -992,49 +989,29 @@ u32 kbase_csf_firmware_set_gpu_idle_hysteresis_time(struct kbase_device *kbdev,
return kbdev->csf.gpu_idle_dur_count;
}
-#ifndef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
/* The 'reg_lock' is also taken and is held till the update is not
* complete, to ensure the update of idle timer value by multiple Users
* gets serialized.
*/
mutex_lock(&kbdev->csf.reg_lock);
-#endif
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- kbase_csf_scheduler_lock(kbdev);
- if (kbdev->csf.scheduler.gpu_idle_fw_timer_enabled) {
-#endif /* CONFIG_MALI_HOST_CONTROLS_SC_RAILS */
- /* The firmware only reads the new idle timer value when the timer is
- * disabled.
- */
- kbase_csf_scheduler_spin_lock(kbdev, &flags);
- kbase_csf_firmware_disable_gpu_idle_timer(kbdev);
- kbase_csf_scheduler_spin_unlock(kbdev, flags);
- /* Ensure that the request has taken effect */
- wait_for_global_request(kbdev, GLB_REQ_IDLE_DISABLE_MASK);
+ /* The firmware only reads the new idle timer value when the timer is
+ * disabled.
+ */
+ kbase_csf_scheduler_spin_lock(kbdev, &flags);
+ kbase_csf_firmware_disable_gpu_idle_timer(kbdev);
+ kbase_csf_scheduler_spin_unlock(kbdev, flags);
+ /* Ensure that the request has taken effect */
+ wait_for_global_request(kbdev, GLB_REQ_IDLE_DISABLE_MASK);
- kbase_csf_scheduler_spin_lock(kbdev, &flags);
- kbdev->csf.gpu_idle_hysteresis_ns = dur_ns;
- kbdev->csf.gpu_idle_dur_count = hysteresis_val;
- kbdev->csf.gpu_idle_dur_count_no_modifier = no_modifier;
- kbase_csf_firmware_enable_gpu_idle_timer(kbdev);
- kbase_csf_scheduler_spin_unlock(kbdev, flags);
- wait_for_global_request(kbdev, GLB_REQ_IDLE_ENABLE_MASK);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- } else {
- /* Record the new values. Would be used later when timer is
- * enabled
- */
- kbase_csf_scheduler_spin_lock(kbdev, &flags);
- kbdev->csf.gpu_idle_hysteresis_ns = dur_ns;
- kbdev->csf.gpu_idle_dur_count = hysteresis_val;
- kbdev->csf.gpu_idle_dur_count_no_modifier = no_modifier;
- kbase_csf_scheduler_spin_unlock(kbdev, flags);
- }
- kbase_csf_scheduler_unlock(kbdev);
-#else
+ kbase_csf_scheduler_spin_lock(kbdev, &flags);
+ kbdev->csf.gpu_idle_hysteresis_ns = dur_ns;
+ kbdev->csf.gpu_idle_dur_count = hysteresis_val;
+ kbdev->csf.gpu_idle_dur_count_no_modifier = no_modifier;
+ kbase_csf_firmware_enable_gpu_idle_timer(kbdev);
+ kbase_csf_scheduler_spin_unlock(kbdev, flags);
+ wait_for_global_request(kbdev, GLB_REQ_IDLE_ENABLE_MASK);
mutex_unlock(&kbdev->csf.reg_lock);
-#endif
kbase_csf_scheduler_pm_idle(kbdev);
kbase_reset_gpu_allow(kbdev);
diff --git a/mali_kbase/csf/mali_kbase_csf_scheduler.c b/mali_kbase/csf/mali_kbase_csf_scheduler.c
index 6dc4c20..f7f7ed0 100644
--- a/mali_kbase/csf/mali_kbase_csf_scheduler.c
+++ b/mali_kbase/csf/mali_kbase_csf_scheduler.c
@@ -93,124 +93,10 @@ static void scheduler_enable_tick_timer_nolock(struct kbase_device *kbdev);
static int suspend_active_queue_groups(struct kbase_device *kbdev, unsigned long *slot_mask);
static int suspend_active_groups_on_powerdown(struct kbase_device *kbdev, bool system_suspend);
static void schedule_in_cycle(struct kbase_queue_group *group, bool force);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-static bool evaluate_sync_update(struct kbase_queue *queue);
-#endif
static bool queue_group_scheduled_locked(struct kbase_queue_group *group);
#define kctx_as_enabled(kctx) (!kbase_ctx_flag(kctx, KCTX_AS_DISABLED_ON_FAULT))
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-void turn_on_sc_power_rails(struct kbase_device *kbdev)
-{
- lockdep_assert_held(&kbdev->csf.scheduler.lock);
-
- WARN_ON(kbdev->csf.scheduler.state == SCHED_SUSPENDED);
-
- if (kbdev->csf.scheduler.sc_power_rails_off) {
- if (kbdev->pm.backend.callback_power_on_sc_rails)
- kbdev->pm.backend.callback_power_on_sc_rails(kbdev);
- kbdev->csf.scheduler.sc_power_rails_off = false;
- }
-}
-
-/**
- * turn_off_sc_power_rails - Turn off the shader core power rails.
- *
- * @kbdev: Pointer to the device.
- *
- * This function is called to synchronously turn off the shader core power rails.
- */
-static void turn_off_sc_power_rails(struct kbase_device *kbdev)
-{
- lockdep_assert_held(&kbdev->csf.scheduler.lock);
-
- WARN_ON(kbdev->csf.scheduler.state == SCHED_SUSPENDED);
-
- if (!kbdev->csf.scheduler.sc_power_rails_off) {
- if (kbdev->pm.backend.callback_power_off_sc_rails)
- kbdev->pm.backend.callback_power_off_sc_rails(kbdev);
- kbdev->csf.scheduler.sc_power_rails_off = true;
- }
-}
-
-/**
- * gpu_idle_event_is_pending - Check if there is a pending GPU idle event
- *
- * @kbdev: Pointer to the device.
- */
-static bool gpu_idle_event_is_pending(struct kbase_device *kbdev)
-{
- struct kbase_csf_global_iface *global_iface = &kbdev->csf.global_iface;
-
- lockdep_assert_held(&kbdev->csf.scheduler.lock);
- lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock);
-
- return (kbase_csf_firmware_global_input_read(global_iface, GLB_REQ) ^
- kbase_csf_firmware_global_output(global_iface, GLB_ACK)) &
- GLB_REQ_IDLE_EVENT_MASK;
-}
-
-/**
- * ack_gpu_idle_event - Acknowledge the GPU idle event
- *
- * @kbdev: Pointer to the device.
- *
- * This function is called to acknowledge the GPU idle event. It is expected
- * that firmware will re-enable the User submission only when it receives a
- * CSI kernel doorbell after the idle event acknowledgement.
- */
-static void ack_gpu_idle_event(struct kbase_device *kbdev)
-{
- struct kbase_csf_global_iface *global_iface = &kbdev->csf.global_iface;
- u32 glb_req, glb_ack;
- unsigned long flags;
-
- lockdep_assert_held(&kbdev->csf.scheduler.lock);
-
- spin_lock_irqsave(&kbdev->csf.scheduler.interrupt_lock, flags);
- glb_req = kbase_csf_firmware_global_input_read(global_iface, GLB_REQ);
- glb_ack = kbase_csf_firmware_global_output(global_iface, GLB_ACK);
- if ((glb_req ^ glb_ack) & GLB_REQ_IDLE_EVENT_MASK) {
- kbase_csf_firmware_global_input_mask(
- global_iface, GLB_REQ, glb_ack,
- GLB_REQ_IDLE_EVENT_MASK);
- }
- spin_unlock_irqrestore(&kbdev->csf.scheduler.interrupt_lock, flags);
-}
-
-static void cancel_gpu_idle_work(struct kbase_device *kbdev)
-{
- lockdep_assert_held(&kbdev->csf.scheduler.lock);
-
- kbdev->csf.scheduler.gpu_idle_work_pending = false;
- cancel_delayed_work(&kbdev->csf.scheduler.gpu_idle_work);
-}
-
-static bool queue_empty_or_blocked(struct kbase_queue *queue)
-{
- bool empty = false;
- bool blocked = false;
-
- if (CS_STATUS_WAIT_SYNC_WAIT_GET(queue->status_wait)) {
- if (!evaluate_sync_update(queue))
- blocked = true;
- else
- queue->status_wait = 0;
- }
-
- if (!blocked) {
- u64 *input_addr = (u64 *)queue->user_io_addr;
- u64 *output_addr = (u64 *)(queue->user_io_addr + PAGE_SIZE);
-
- empty = (input_addr[CS_INSERT_LO / sizeof(u64)] ==
- output_addr[CS_EXTRACT_LO / sizeof(u64)]);
- }
-
- return (empty || blocked);
-}
-#endif
-
#if IS_ENABLED(CONFIG_MALI_TRACE_POWER_GPU_WORK_PERIOD)
/**
* gpu_metrics_ctx_init() - Take a reference on GPU metrics context if it exists,
@@ -854,7 +740,6 @@ static void scheduler_doorbell_init(struct kbase_device *kbdev)
WARN_ON(doorbell_nr != CSF_KERNEL_DOORBELL_NR);
}
-#ifndef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
/**
* update_on_slot_queues_offsets - Update active queues' INSERT & EXTRACT ofs
*
@@ -905,21 +790,11 @@ static void update_on_slot_queues_offsets(struct kbase_device *kbdev)
}
}
}
-#endif
-static void enqueue_gpu_idle_work(struct kbase_csf_scheduler *const scheduler,
- unsigned long delay)
+static void enqueue_gpu_idle_work(struct kbase_csf_scheduler *const scheduler)
{
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- lockdep_assert_held(&scheduler->lock);
-
- scheduler->gpu_idle_work_pending = true;
- mod_delayed_work(system_highpri_wq, &scheduler->gpu_idle_work, delay);
-#else
- CSTD_UNUSED(delay);
atomic_set(&scheduler->gpu_no_longer_idle, false);
queue_work(scheduler->idle_wq, &scheduler->gpu_idle_work);
-#endif
}
bool kbase_csf_scheduler_process_gpu_idle_event(struct kbase_device *kbdev)
@@ -935,18 +810,6 @@ bool kbase_csf_scheduler_process_gpu_idle_event(struct kbase_device *kbdev)
KBASE_KTRACE_ADD(kbdev, SCHEDULER_GPU_IDLE_EVENT_CAN_SUSPEND, NULL,
(((u64)can_suspend_on_idle) << 32));
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- if (can_suspend_on_idle) {
- /* If FW is managing the cores then we need to turn off the
- * the power rails.
- */
- if (!kbase_pm_no_mcu_core_pwroff(kbdev)) {
- queue_work(system_highpri_wq,
- &scheduler->sc_rails_off_work);
- ack_gpu_idle_event = false;
- }
- }
-#else
if (can_suspend_on_idle) {
/* fast_gpu_idle_handling is protected by the
* interrupt_lock, which would prevent this from being
@@ -976,14 +839,13 @@ bool kbase_csf_scheduler_process_gpu_idle_event(struct kbase_device *kbdev)
* finished. It's queued before to reduce the time it takes till execution
* but it'll eventually be blocked by the scheduler->interrupt_lock.
*/
- enqueue_gpu_idle_work(scheduler, 0);
+ enqueue_gpu_idle_work(scheduler);
}
/* The extract offsets are unused in fast GPU idle handling */
if (!scheduler->fast_gpu_idle_handling)
update_on_slot_queues_offsets(kbdev);
-#endif
return invoke_pm_state_machine;
}
@@ -1338,71 +1200,6 @@ static void scheduler_pm_idle_before_sleep(struct kbase_device *kbdev)
}
#endif
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-static void enable_gpu_idle_fw_timer(struct kbase_device *kbdev)
-{
- struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
- unsigned long flags;
-
- lockdep_assert_held(&scheduler->lock);
-
- spin_lock_irqsave(&scheduler->interrupt_lock, flags);
- if (!scheduler->gpu_idle_fw_timer_enabled) {
- kbase_csf_firmware_enable_gpu_idle_timer(kbdev);
- scheduler->gpu_idle_fw_timer_enabled = true;
- }
- spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
-}
-
-static void disable_gpu_idle_fw_timer(struct kbase_device *kbdev)
-{
- struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
- unsigned long flags;
-
- lockdep_assert_held(&scheduler->lock);
-
- spin_lock_irqsave(&scheduler->interrupt_lock, flags);
- if (scheduler->gpu_idle_fw_timer_enabled) {
- kbase_csf_firmware_disable_gpu_idle_timer(kbdev);
- scheduler->gpu_idle_fw_timer_enabled = false;
- }
- spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
-}
-
-/**
- * update_gpu_idle_timer_on_scheduler_wakeup() - Update the GPU idle state
- * reporting as per the power policy in use.
- *
- * @kbdev: Pointer to the device
- *
- * This function disables the GPU idle state reporting in FW if as per the
- * power policy the power management of shader cores needs to be done by the
- * Host. This prevents the needless disabling of User submissions in FW on
- * reporting the GPU idle event to Host if power rail for shader cores is
- * controlled by the Host.
- * Scheduler is suspended when switching and out of such power policy, so on
- * the wakeup of Scheduler can enable or disable the GPU idle state reporting.
- */
-static void update_gpu_idle_timer_on_scheduler_wakeup(struct kbase_device *kbdev)
-{
- struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
- unsigned long flags;
-
- lockdep_assert_held(&scheduler->lock);
-
- WARN_ON(scheduler->state != SCHED_SUSPENDED);
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- if (kbase_pm_no_mcu_core_pwroff(kbdev))
- disable_gpu_idle_fw_timer(kbdev);
- else
- enable_gpu_idle_fw_timer(kbdev);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
- return;
-}
-#endif
-
static void scheduler_wakeup(struct kbase_device *kbdev, bool kick)
{
struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
@@ -1442,10 +1239,6 @@ static void scheduler_wakeup(struct kbase_device *kbdev, bool kick)
return;
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- update_gpu_idle_timer_on_scheduler_wakeup(kbdev);
-#endif
-
scheduler->state = SCHED_INACTIVE;
KBASE_KTRACE_ADD(kbdev, SCHED_INACTIVE, NULL, scheduler->state);
@@ -2078,48 +1871,6 @@ static void program_cs(struct kbase_device *kbdev, struct kbase_queue *queue,
update_hw_active(queue, true);
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-static void start_stream_sync(struct kbase_queue *queue)
-{
- struct kbase_queue_group *group = queue->group;
- struct kbase_device *kbdev = queue->kctx->kbdev;
- struct kbase_csf_global_iface *global_iface = &kbdev->csf.global_iface;
- struct kbase_csf_cmd_stream_group_info *ginfo;
- struct kbase_csf_cmd_stream_info *stream;
- int csi_index = queue->csi_index;
- long remaining = kbase_csf_timeout_in_jiffies(kbdev->csf.fw_timeout_ms);
-
- lockdep_assert_held(&kbdev->csf.scheduler.lock);
-
- if (WARN_ON(!group) ||
- WARN_ON(!kbasep_csf_scheduler_group_is_on_slot_locked(group)))
- return;
-
- ginfo = &global_iface->groups[group->csg_nr];
- stream = &ginfo->streams[csi_index];
-
- program_cs(kbdev, queue, true);
-
- /* Timed wait */
- remaining = wait_event_timeout(kbdev->csf.event_wait,
- (CS_ACK_STATE_GET(kbase_csf_firmware_cs_output(stream, CS_ACK))
- == CS_ACK_STATE_START), remaining);
-
- if (!remaining) {
- pixel_gpu_uevent_kmd_error_send(kbdev, GPU_UEVENT_INFO_QUEUE_START);
- dev_warn(kbdev->dev, "[%llu] Timeout (%d ms) waiting for queue to start on csi %d bound to group %d on slot %d",
- kbase_backend_get_cycle_cnt(kbdev), kbdev->csf.fw_timeout_ms,
- csi_index, group->handle, group->csg_nr);
-
- /* TODO GPUCORE-25328: The CSG can't be terminated, the GPU
- * will be reset as a work-around.
- */
- if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_NONE))
- kbase_reset_gpu(kbdev);
- }
-}
-#endif
-
static int onslot_csg_add_new_queue(struct kbase_queue *queue)
{
struct kbase_device *kbdev = queue->kctx->kbdev;
@@ -2173,34 +1924,8 @@ int kbase_csf_scheduler_queue_start(struct kbase_queue *queue)
if (!err) {
queue->enabled = true;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /* If the kicked GPU queue can make progress, then only
- * need to abort the GPU power down.
- */
- if (!queue_empty_or_blocked(queue))
- cancel_gpu_idle_work(kbdev);
-#endif
if (kbasep_csf_scheduler_group_is_on_slot_locked(group)) {
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /* The shader core power rails need to be turned
- * on before FW resumes the execution on HW and
- * that would happen when the CSI kernel doorbell
- * is rung from the following code.
- */
- turn_on_sc_power_rails(kbdev);
-#endif
if (cs_enabled) {
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- spin_lock_irqsave(&kbdev->csf.scheduler.interrupt_lock,
- flags);
- kbase_csf_ring_cs_kernel_doorbell(kbdev,
- queue->csi_index, group->csg_nr,
- true);
- spin_unlock_irqrestore(
- &kbdev->csf.scheduler.interrupt_lock, flags);
- } else {
- start_stream_sync(queue);
-#else
/* In normal situation, when a queue is
* already running, the queue update
* would be a doorbell kick on user
@@ -2235,7 +1960,6 @@ int kbase_csf_scheduler_queue_start(struct kbase_queue *queue)
rt_mutex_unlock(&kbdev->csf.scheduler.lock);
return (err != -EIO) ? -EBUSY : err;
}
-#endif
}
}
queue_delayed_work(system_long_wq, &kbdev->csf.scheduler.ping_work,
@@ -2792,7 +2516,7 @@ static void remove_group_from_runnable(struct kbase_csf_scheduler *const schedul
cancel_tick_work(scheduler);
WARN_ON(atomic_read(&scheduler->non_idle_offslot_grps));
if (scheduler->state != SCHED_SUSPENDED)
- enqueue_gpu_idle_work(scheduler, 0);
+ enqueue_gpu_idle_work(scheduler);
}
KBASE_KTRACE_ADD_CSF_GRP(kctx->kbdev, SCHEDULER_TOP_GRP, scheduler->top_grp,
scheduler->num_active_address_spaces |
@@ -5136,7 +4860,6 @@ static int suspend_active_groups_on_powerdown(struct kbase_device *kbdev, bool s
return 0;
}
-#ifndef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
/**
* all_on_slot_groups_remained_idle - Live check for all groups' idleness
*
@@ -5191,7 +4914,6 @@ static bool all_on_slot_groups_remained_idle(struct kbase_device *kbdev)
return true;
}
-#endif
static bool scheduler_idle_suspendable(struct kbase_device *kbdev)
{
@@ -5216,7 +4938,6 @@ static bool scheduler_idle_suspendable(struct kbase_device *kbdev)
} else
suspend = kbase_pm_no_runnables_sched_suspendable(kbdev);
-#ifndef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
if (suspend && unlikely(atomic_read(&scheduler->gpu_no_longer_idle)))
suspend = false;
@@ -5229,7 +4950,6 @@ static bool scheduler_idle_suspendable(struct kbase_device *kbdev)
dev_dbg(kbdev->dev, "GPU suspension skipped due to active CSGs");
suspend = false;
}
-#endif
scheduler->fast_gpu_idle_handling = false;
spin_unlock(&scheduler->interrupt_lock);
@@ -5289,11 +5009,6 @@ static bool scheduler_suspend_on_idle(struct kbase_device *kbdev)
return false;
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- turn_off_sc_power_rails(kbdev);
- ack_gpu_idle_event(kbdev);
-#endif
-
dev_dbg(kbdev->dev, "Scheduler to be suspended on GPU becoming idle");
scheduler_suspend(kbdev);
cancel_tick_work(scheduler);
@@ -5302,13 +5017,8 @@ static bool scheduler_suspend_on_idle(struct kbase_device *kbdev)
static void gpu_idle_worker(struct work_struct *work)
{
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- struct kbase_device *kbdev = container_of(
- work, struct kbase_device, csf.scheduler.gpu_idle_work.work);
-#else
struct kbase_device *kbdev =
container_of(work, struct kbase_device, csf.scheduler.gpu_idle_work);
-#endif
struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
bool scheduler_is_idle_suspendable = false;
bool all_groups_suspended = false;
@@ -5327,13 +5037,6 @@ static void gpu_idle_worker(struct work_struct *work)
kbase_debug_csf_fault_wait_completion(kbdev);
rt_mutex_lock(&scheduler->lock);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- if (!scheduler->gpu_idle_work_pending)
- goto unlock;
-
- scheduler->gpu_idle_work_pending = false;
-#endif
-
#if IS_ENABLED(CONFIG_DEBUG_FS)
if (unlikely(scheduler->state == SCHED_BUSY)) {
rt_mutex_unlock(&scheduler->lock);
@@ -5357,9 +5060,6 @@ static void gpu_idle_worker(struct work_struct *work)
KBASE_KTRACE_ADD(kbdev, SCHEDULER_GPU_IDLE_WORKER_HANDLING_END, NULL, 0u);
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-unlock:
-#endif
rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
KBASE_KTRACE_ADD(kbdev, SCHEDULER_GPU_IDLE_WORKER_END, NULL,
@@ -5368,265 +5068,6 @@ unlock:
#undef __ENCODE_KTRACE_INFO
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-/**
- * wait_csg_db_ack - Wait for the previously sent CSI kernel DBs for a CSG to
- * get acknowledged.
- *
- * @kbdev: Pointer to the device.
- * @csg_nr: The CSG number.
- *
- * This function is called to wait for the previously sent CSI kernel DBs
- * for a CSG to get acknowledged before acknowledging the GPU idle event.
- * This is to ensure when @sc_rails_off_worker is doing the GPU idleness
- * reevaluation the User submissions remain disabled.
- * For firmware to re-enable User submission, two conditions are required to
- * be met.
- * 1. GLB_IDLE_EVENT acknowledgement
- * 2. CSI kernel DB ring
- *
- * If GLB_IDLE_EVENT is acknowledged and FW notices the previously rung CS kernel
- * DB, then it would re-enable the User submission and @sc_rails_off_worker might
- * end up turning off the SC rails.
- */
-static void wait_csg_db_ack(struct kbase_device *kbdev, int csg_nr)
-{
-#define WAIT_TIMEOUT 10 /* 1ms timeout */
-#define DELAY_TIME_IN_US 100
- struct kbase_csf_cmd_stream_group_info *const ginfo =
- &kbdev->csf.global_iface.groups[csg_nr];
- const int max_iterations = WAIT_TIMEOUT;
- int loop;
-
- for (loop = 0; loop < max_iterations; loop++) {
- if (kbase_csf_firmware_csg_input_read(ginfo, CSG_DB_REQ) ==
- kbase_csf_firmware_csg_output(ginfo, CSG_DB_ACK))
- break;
-
- udelay(DELAY_TIME_IN_US);
- }
-
- if (loop == max_iterations) {
- dev_err(kbdev->dev,
- "Timeout for csg %d CSG_DB_REQ %x != CSG_DB_ACK %x",
- csg_nr,
- kbase_csf_firmware_csg_input_read(ginfo, CSG_DB_REQ),
- kbase_csf_firmware_csg_output(ginfo, CSG_DB_ACK));
- }
-}
-
-/**
- * recheck_gpu_idleness - Recheck the idleness of the GPU before turning off
- * the SC power rails.
- *
- * @kbdev: Pointer to the device.
- *
- * This function is called on the GPU idle notification to recheck the idleness
- * of GPU before turning off the SC power rails. The reevaluation of idleness
- * is done by sending CSG status update requests. An additional check is done
- * for the CSGs that are reported as idle that whether the associated queues
- * are empty or blocked.
- *
- * Return: true if the GPU was reevaluated as idle.
- */
-static bool recheck_gpu_idleness(struct kbase_device *kbdev)
-{
- struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
- DECLARE_BITMAP(csg_bitmap, MAX_SUPPORTED_CSGS) = { 0 };
- long wt = kbase_csf_timeout_in_jiffies(kbdev->csf.fw_timeout_ms);
- u32 num_groups = kbdev->csf.global_iface.group_num;
- unsigned long flags, i;
-
- lockdep_assert_held(&scheduler->lock);
-
- spin_lock_irqsave(&scheduler->interrupt_lock, flags);
- for_each_set_bit(i, scheduler->csg_slots_idle_mask, num_groups) {
- struct kbase_csf_cmd_stream_group_info *const ginfo =
- &kbdev->csf.global_iface.groups[i];
- u32 csg_req = kbase_csf_firmware_csg_output(ginfo, CSG_ACK);
-
- csg_req ^= CSG_REQ_STATUS_UPDATE_MASK;
- kbase_csf_firmware_csg_input_mask(ginfo, CSG_REQ, csg_req,
- CSG_REQ_STATUS_UPDATE_MASK);
- set_bit(i, csg_bitmap);
- wait_csg_db_ack(kbdev, i);
- }
- kbase_csf_ring_csg_slots_doorbell(kbdev, csg_bitmap[0]);
- spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
-
- if (wait_csg_slots_handshake_ack(kbdev,
- CSG_REQ_STATUS_UPDATE_MASK, csg_bitmap, wt)) {
- dev_warn(
- kbdev->dev,
- "[%llu] Timeout (%d ms) on STATUS_UPDATE, treat GPU as not idle: slot mask=0x%lx",
- kbase_backend_get_cycle_cnt(kbdev),
- kbdev->csf.fw_timeout_ms,
- csg_bitmap[0]);
- return false;
- }
-
- KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_SLOT_IDLE_SET, NULL,
- scheduler->csg_slots_idle_mask[0]);
-
- ack_gpu_idle_event(kbdev);
- for_each_set_bit(i, scheduler->csg_slots_idle_mask, num_groups) {
- struct kbase_csf_cmd_stream_group_info *const ginfo =
- &kbdev->csf.global_iface.groups[i];
- struct kbase_csf_csg_slot *csg_slot = &scheduler->csg_slots[i];
- struct kbase_queue_group *group = csg_slot->resident_group;
- bool group_idle = true;
- int j;
-
- if (!group_on_slot_is_idle(kbdev, i))
- group_idle = false;
-
- for (j = 0; j < ginfo->stream_num; j++) {
- struct kbase_queue *const queue =
- group->bound_queues[j];
- u32 *output_addr;
-
- if (!queue || !queue->enabled)
- continue;
-
- output_addr = (u32 *)(queue->user_io_addr + PAGE_SIZE);
-
- if (output_addr[CS_ACTIVE / sizeof(u32)]) {
- dev_warn(
- kbdev->dev,
- "queue %d bound to group %d on slot %d active unexpectedly",
- queue->csi_index, queue->group->handle,
- queue->group->csg_nr);
- group_idle = false;
- }
-
- if (group_idle) {
- if (!save_slot_cs(ginfo, queue) &&
- !confirm_cmd_buf_empty(queue))
- group_idle = false;
- }
-
- if (!group_idle) {
- spin_lock_irqsave(&scheduler->interrupt_lock, flags);
- kbase_csf_ring_cs_kernel_doorbell(kbdev,
- queue->csi_index, group->csg_nr, true);
- spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
- KBASE_KTRACE_ADD_CSF_GRP(kbdev, SC_RAIL_RECHECK_NOT_IDLE, group, i);
- return false;
- }
- }
- }
- KBASE_KTRACE_ADD_CSF_GRP(kbdev, SC_RAIL_RECHECK_IDLE, NULL, (u64)scheduler->csg_slots_idle_mask);
- return true;
-}
-
-/**
- * can_turn_off_sc_rails - Check if the conditions are met to turn off the
- * SC power rails.
- *
- * @kbdev: Pointer to the device.
- *
- * This function checks both the on-slots and off-slots groups idle status and
- * if firmware is managing the cores. If the groups are not idle or Host is
- * managing the cores then the rails need to be kept on.
- * Additionally, we must check that the Idle event has not already been acknowledged
- * as that would indicate that the idle worker has run and potentially re-enabled
- * user-submission.
- *
- * Return: true if the SC power rails can be turned off.
- */
-static bool can_turn_off_sc_rails(struct kbase_device *kbdev)
-{
- struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
- bool turn_off_sc_rails;
- bool idle_event_pending;
- bool all_csg_idle;
- bool non_idle_offslot;
- unsigned long flags;
-
- lockdep_assert_held(&scheduler->lock);
-
- if (scheduler->state == SCHED_SUSPENDED)
- return false;
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- spin_lock(&scheduler->interrupt_lock);
- /* Ensure the SC power off sequence is complete before powering off the rail.
- * If shader rail is turned off during job, APM generates fatal error and GPU firmware
- * will generate error interrupt and try to reset.
- * Note that this will avert the case when a power off is not complete, but it is not
- * designed to handle a situation where a power on races with this code. That situation
- * should be prevented by trapping new work through the kernel.
- */
- if (!kbdev->pm.backend.sc_pwroff_safe) {
- trace_clock_set_rate("rail_off_aborted.", 1, raw_smp_processor_id());
- dev_info(kbdev->dev, "SC Rail off aborted, power sequence incomplete");
- }
-
- idle_event_pending = gpu_idle_event_is_pending(kbdev);
- all_csg_idle = kbase_csf_scheduler_all_csgs_idle(kbdev);
- non_idle_offslot = !atomic_read(&scheduler->non_idle_offslot_grps);
- turn_off_sc_rails = kbdev->pm.backend.sc_pwroff_safe &&
- idle_event_pending &&
- all_csg_idle &&
- non_idle_offslot &&
- !kbase_pm_no_mcu_core_pwroff(kbdev) &&
- !scheduler->sc_power_rails_off;
- KBASE_KTRACE_ADD_CSF_GRP(kbdev, SC_RAIL_CAN_TURN_OFF, NULL,
- kbdev->pm.backend.sc_pwroff_safe |
- idle_event_pending << 1 |
- all_csg_idle << 2 |
- non_idle_offslot << 3 |
- !kbase_pm_no_mcu_core_pwroff(kbdev) << 4 |
- !scheduler->sc_power_rails_off << 5);
-
- spin_unlock(&scheduler->interrupt_lock);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
- return turn_off_sc_rails;
-}
-
-static void sc_rails_off_worker(struct work_struct *work)
-{
- struct kbase_device *kbdev = container_of(
- work, struct kbase_device, csf.scheduler.sc_rails_off_work);
- struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
-
- KBASE_KTRACE_ADD(kbdev, SCHEDULER_ENTER_SC_RAIL, NULL,
- kbase_csf_ktrace_gpu_cycle_cnt(kbdev));
- if (kbase_reset_gpu_try_prevent(kbdev)) {
- dev_warn(kbdev->dev, "Skip SC rails off for failing to prevent gpu reset");
- return;
- }
-
- rt_mutex_lock(&scheduler->lock);
- /* All the previously sent CSG/CSI level requests are expected to have
- * completed at this point.
- */
-
- if (can_turn_off_sc_rails(kbdev)) {
- if (recheck_gpu_idleness(kbdev)) {
- /* The GPU idle work, enqueued after previous idle
- * notification, could already be pending if GPU became
- * active momentarily after the previous idle notification
- * and all CSGs were reported as idle.
- */
- if (!scheduler->gpu_idle_work_pending)
- WARN_ON(scheduler->sc_power_rails_off);
- turn_off_sc_power_rails(kbdev);
- enqueue_gpu_idle_work(scheduler,
- kbdev->csf.gpu_idle_hysteresis_ms);
- }
- } else {
- ack_gpu_idle_event(kbdev);
- }
-
- rt_mutex_unlock(&scheduler->lock);
- kbase_reset_gpu_allow(kbdev);
- KBASE_KTRACE_ADD(kbdev, SCHEDULER_EXIT_SC_RAIL, NULL,
- kbase_csf_ktrace_gpu_cycle_cnt(kbdev));
-}
-#endif
-
static int scheduler_prepare(struct kbase_device *kbdev)
{
struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
@@ -5975,20 +5416,11 @@ static void schedule_actions(struct kbase_device *kbdev, bool is_tick)
kbase_reset_gpu_assert_prevented(kbdev);
lockdep_assert_held(&scheduler->lock);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- if (scheduler->gpu_idle_work_pending)
- return;
-#endif
-
if (kbase_csf_scheduler_wait_mcu_active(kbdev)) {
dev_err(kbdev->dev, "Wait for MCU power on failed on scheduling tick/tock");
return;
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- turn_on_sc_power_rails(kbdev);
-#endif
-
spin_lock_irqsave(&scheduler->interrupt_lock, flags);
skip_idle_slots_update = kbase_csf_scheduler_protected_mode_in_use(kbdev);
skip_scheduling_actions = !skip_idle_slots_update && kbdev->protected_mode;
@@ -6028,7 +5460,7 @@ redo_local_tock:
*/
if (unlikely(!scheduler->ngrp_to_schedule && scheduler->total_runnable_grps)) {
dev_dbg(kbdev->dev, "No groups to schedule in the tick");
- enqueue_gpu_idle_work(scheduler, 0);
+ enqueue_gpu_idle_work(scheduler);
return;
}
spin_lock_irqsave(&scheduler->interrupt_lock, flags);
@@ -6194,7 +5626,7 @@ static void schedule_on_tock(struct kbase_device *kbdev)
scheduler->state = SCHED_INACTIVE;
KBASE_KTRACE_ADD(kbdev, SCHED_INACTIVE, NULL, scheduler->state);
if (!scheduler->total_runnable_grps)
- enqueue_gpu_idle_work(scheduler, 0);
+ enqueue_gpu_idle_work(scheduler);
rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
@@ -6242,7 +5674,7 @@ static void schedule_on_tick(struct kbase_device *kbdev)
dev_dbg(kbdev->dev, "scheduling for next tick, num_runnable_groups:%u\n",
scheduler->total_runnable_grps);
} else if (!scheduler->total_runnable_grps) {
- enqueue_gpu_idle_work(scheduler, 0);
+ enqueue_gpu_idle_work(scheduler);
}
scheduler->state = SCHED_INACTIVE;
@@ -6431,11 +5863,7 @@ static void scheduler_inner_reset(struct kbase_device *kbdev)
WARN_ON(kbase_csf_scheduler_get_nr_active_csgs(kbdev));
/* Cancel any potential queued delayed work(s) */
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- cancel_delayed_work_sync(&scheduler->gpu_idle_work);
-#else
cancel_work_sync(&kbdev->csf.scheduler.gpu_idle_work);
-#endif
cancel_tick_work(scheduler);
cancel_tock_work(scheduler);
cancel_delayed_work_sync(&scheduler->ping_work);
@@ -6814,13 +6242,6 @@ scheduler_get_protm_enter_async_group(struct kbase_device *const kbdev,
input_grp = NULL;
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- if (input_grp && kbdev->csf.scheduler.sc_power_rails_off) {
- dev_warn(kbdev->dev, "SC power rails unexpectedly off in async protm enter");
- return NULL;
- }
-#endif
-
return input_grp;
}
@@ -6909,10 +6330,6 @@ static bool check_sync_update_for_on_slot_group(struct kbase_queue_group *group)
if (!evaluate_sync_update(queue))
continue;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- queue->status_wait = 0;
-#endif
-
/* Update csg_slots_idle_mask and group's run_state */
if (group->run_state != KBASE_CSF_GROUP_RUNNABLE) {
/* Only clear the group's idle flag if it has been dealt
@@ -6936,23 +6353,6 @@ static bool check_sync_update_for_on_slot_group(struct kbase_queue_group *group)
KBASE_KTRACE_ADD_CSF_GRP(kbdev, GROUP_SYNC_UPDATE_DONE, group, 0u);
sync_update_done = true;
-
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /* As the queue of an on-slot group has become unblocked,
- * the power rails can be turned on and the execution can
- * be resumed on HW.
- */
- if (kbdev->csf.scheduler.sc_power_rails_off) {
- cancel_gpu_idle_work(kbdev);
- turn_on_sc_power_rails(kbdev);
- spin_lock_irqsave(&scheduler->interrupt_lock,
- flags);
- kbase_csf_ring_cs_kernel_doorbell(kbdev,
- queue->csi_index, group->csg_nr, true);
- spin_unlock_irqrestore(&scheduler->interrupt_lock,
- flags);
- }
-#endif
}
}
@@ -7083,28 +6483,6 @@ static void check_sync_update_in_sleep_mode(struct kbase_device *kbdev)
}
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-static void check_sync_update_after_sc_power_down(struct kbase_device *kbdev)
-{
- struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
- u32 const num_groups = kbdev->csf.global_iface.group_num;
- u32 csg_nr;
-
- lockdep_assert_held(&scheduler->lock);
-
- for (csg_nr = 0; csg_nr < num_groups; csg_nr++) {
- struct kbase_queue_group *const group =
- kbdev->csf.scheduler.csg_slots[csg_nr].resident_group;
-
- if (!group)
- continue;
-
- if (check_sync_update_for_on_slot_group(group))
- return;
- }
-}
-#endif
-
/**
* check_group_sync_update_worker() - Check the sync wait condition for all the
* blocked queue groups
@@ -7151,14 +6529,6 @@ static void check_group_sync_update_worker(struct kthread_work *work)
*/
update_idle_suspended_group_state(group);
KBASE_KTRACE_ADD_CSF_GRP(kbdev, GROUP_SYNC_UPDATE_DONE, group, 0u);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- cancel_gpu_idle_work(kbdev);
- /* As an off-slot group has become runnable,
- * the rails will be turned on and the CS
- * kernel doorbell will be rung from the
- * scheduling tick.
- */
-#endif
}
}
} else {
@@ -7176,15 +6546,6 @@ static void check_group_sync_update_worker(struct kthread_work *work)
if (!sync_updated && (scheduler->state == SCHED_SLEEPING))
check_sync_update_in_sleep_mode(kbdev);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /* Check if the sync update happened for a blocked on-slot group,
- * after the shader core power rails were turned off and reactivate
- * the GPU if the wait condition is met for the blocked group.
- */
- if (!sync_updated && scheduler->sc_power_rails_off)
- check_sync_update_after_sc_power_down(kbdev);
-#endif
-
KBASE_KTRACE_ADD(kbdev, SCHEDULER_GROUP_SYNC_UPDATE_WORKER_END, kctx, 0u);
rt_mutex_unlock(&scheduler->lock);
@@ -7415,15 +6776,7 @@ int kbase_csf_scheduler_early_init(struct kbase_device *kbdev)
scheduler->csg_scheduling_period_ms = CSF_SCHEDULER_TIME_TICK_MS;
scheduler_doorbell_init(kbdev);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- INIT_DEFERRABLE_WORK(&scheduler->gpu_idle_work, gpu_idle_worker);
- INIT_WORK(&scheduler->sc_rails_off_work, sc_rails_off_worker);
- scheduler->sc_power_rails_off = true;
- scheduler->gpu_idle_work_pending = false;
- scheduler->gpu_idle_fw_timer_enabled = false;
-#else
INIT_WORK(&scheduler->gpu_idle_work, gpu_idle_worker);
-#endif
scheduler->fast_gpu_idle_handling = false;
atomic_set(&scheduler->gpu_no_longer_idle, false);
atomic_set(&scheduler->non_idle_offslot_grps, 0);
@@ -7454,12 +6807,7 @@ void kbase_csf_scheduler_term(struct kbase_device *kbdev)
* to be active at the time of Driver unload.
*/
WARN_ON(kbase_csf_scheduler_get_nr_active_csgs(kbdev));
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- flush_work(&kbdev->csf.scheduler.sc_rails_off_work);
- flush_delayed_work(&kbdev->csf.scheduler.gpu_idle_work);
-#else
flush_work(&kbdev->csf.scheduler.gpu_idle_work);
-#endif
rt_mutex_lock(&kbdev->csf.scheduler.lock);
if (kbdev->csf.scheduler.state != SCHED_SUSPENDED) {
@@ -7518,7 +6866,7 @@ static void scheduler_enable_tick_timer_nolock(struct kbase_device *kbdev)
kbase_csf_scheduler_invoke_tick(kbdev);
dev_dbg(kbdev->dev, "Re-enabling the scheduler timer\n");
} else if (scheduler->state != SCHED_SUSPENDED) {
- enqueue_gpu_idle_work(scheduler, 0);
+ enqueue_gpu_idle_work(scheduler);
}
}
diff --git a/mali_kbase/csf/mali_kbase_csf_scheduler.h b/mali_kbase/csf/mali_kbase_csf_scheduler.h
index 40f7f7b..b299235 100644
--- a/mali_kbase/csf/mali_kbase_csf_scheduler.h
+++ b/mali_kbase/csf/mali_kbase_csf_scheduler.h
@@ -653,17 +653,4 @@ void kbase_csf_scheduler_force_wakeup(struct kbase_device *kbdev);
void kbase_csf_scheduler_force_sleep(struct kbase_device *kbdev);
#endif
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-/**
- * turn_on_sc_power_rails - Turn on the shader core power rails.
- *
- * @kbdev: Pointer to the device.
- *
- * This function is called to synchronously turn on the shader core power rails,
- * before execution is resumed on the cores.
- *
- * scheduler lock must be held when calling this function
- */
-void turn_on_sc_power_rails(struct kbase_device *kbdev);
-#endif
#endif /* _KBASE_CSF_SCHEDULER_H_ */
diff --git a/mali_kbase/debug/backend/mali_kbase_debug_ktrace_codes_csf.h b/mali_kbase/debug/backend/mali_kbase_debug_ktrace_codes_csf.h
index b8999ea..81f173c 100644
--- a/mali_kbase/debug/backend/mali_kbase_debug_ktrace_codes_csf.h
+++ b/mali_kbase/debug/backend/mali_kbase_debug_ktrace_codes_csf.h
@@ -310,14 +310,6 @@ KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_EVICT_CTX_SLOTS_START),
* KCPU extra_info_val == Fence seqno.
*/
KBASE_KTRACE_CODE_MAKE_CODE(KCPU_FENCE_WAIT_END),
-
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_ENTER_SC_RAIL),
- KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_EXIT_SC_RAIL),
- KBASE_KTRACE_CODE_MAKE_CODE(SC_RAIL_RECHECK_IDLE),
- KBASE_KTRACE_CODE_MAKE_CODE(SC_RAIL_RECHECK_NOT_IDLE),
- KBASE_KTRACE_CODE_MAKE_CODE(SC_RAIL_CAN_TURN_OFF),
-#endif
#if 0 /* Dummy section to avoid breaking formatting */
};
#endif
diff --git a/mali_kbase/debug/backend/mali_kbase_debug_linux_ktrace_csf.h b/mali_kbase/debug/backend/mali_kbase_debug_linux_ktrace_csf.h
index b77779a..0b0de23 100644
--- a/mali_kbase/debug/backend/mali_kbase_debug_linux_ktrace_csf.h
+++ b/mali_kbase/debug/backend/mali_kbase_debug_linux_ktrace_csf.h
@@ -65,10 +65,6 @@ DEFINE_MALI_ADD_EVENT(SCHEDULER_GPU_IDLE_WORKER_HANDLING_START);
DEFINE_MALI_ADD_EVENT(SCHEDULER_GPU_IDLE_WORKER_HANDLING_END);
DEFINE_MALI_ADD_EVENT(CSF_FIRMWARE_MCU_HALTED);
DEFINE_MALI_ADD_EVENT(CSF_FIRMWARE_MCU_SLEEP);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-DEFINE_MALI_ADD_EVENT(SCHEDULER_ENTER_SC_RAIL);
-DEFINE_MALI_ADD_EVENT(SCHEDULER_EXIT_SC_RAIL);
-#endif
DEFINE_MALI_ADD_EVENT(SCHED_BUSY);
DEFINE_MALI_ADD_EVENT(SCHED_INACTIVE);
DEFINE_MALI_ADD_EVENT(SCHED_SUSPENDED);
@@ -173,11 +169,6 @@ DEFINE_MALI_CSF_GRP_EVENT(SCHEDULER_NONIDLE_OFFSLOT_GRP_DEC);
DEFINE_MALI_CSF_GRP_EVENT(SCHEDULER_HANDLE_IDLE_SLOTS);
DEFINE_MALI_CSF_GRP_EVENT(PROTM_EVENT_WORKER_START);
DEFINE_MALI_CSF_GRP_EVENT(PROTM_EVENT_WORKER_END);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-DEFINE_MALI_CSF_GRP_EVENT(SC_RAIL_RECHECK_IDLE);
-DEFINE_MALI_CSF_GRP_EVENT(SC_RAIL_RECHECK_NOT_IDLE);
-DEFINE_MALI_CSF_GRP_EVENT(SC_RAIL_CAN_TURN_OFF);
-#endif
DEFINE_MALI_CSF_GRP_EVENT(CSF_GROUP_INACTIVE);
DEFINE_MALI_CSF_GRP_EVENT(CSF_GROUP_RUNNABLE);
DEFINE_MALI_CSF_GRP_EVENT(CSF_GROUP_IDLE);
diff --git a/mali_kbase/debug/mali_kbase_debug_ktrace_codes.h b/mali_kbase/debug/mali_kbase_debug_ktrace_codes.h
index 6e81fb8..991f70f 100644
--- a/mali_kbase/debug/mali_kbase_debug_ktrace_codes.h
+++ b/mali_kbase/debug/mali_kbase_debug_ktrace_codes.h
@@ -156,10 +156,6 @@ KBASE_KTRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY),
KBASE_KTRACE_CODE_MAKE_CODE(ARB_GPU_LOST), KBASE_KTRACE_CODE_MAKE_CODE(ARB_VM_STATE),
KBASE_KTRACE_CODE_MAKE_CODE(ARB_VM_EVT),
#endif
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- KBASE_KTRACE_CODE_MAKE_CODE(PM_RAIL_ON),
- KBASE_KTRACE_CODE_MAKE_CODE(PM_RAIL_OFF),
-#endif
#if MALI_USE_CSF
#include "debug/backend/mali_kbase_debug_ktrace_codes_csf.h"
diff --git a/mali_kbase/debug/mali_kbase_debug_linux_ktrace.h b/mali_kbase/debug/mali_kbase_debug_linux_ktrace.h
index 8128eae..1825662 100644
--- a/mali_kbase/debug/mali_kbase_debug_linux_ktrace.h
+++ b/mali_kbase/debug/mali_kbase_debug_linux_ktrace.h
@@ -102,10 +102,6 @@ DEFINE_MALI_ADD_EVENT(ARB_VM_STATE);
DEFINE_MALI_ADD_EVENT(ARB_VM_EVT);
#endif
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-DEFINE_MALI_ADD_EVENT(PM_RAIL_ON);
-DEFINE_MALI_ADD_EVENT(PM_RAIL_OFF);
-#endif
#if MALI_USE_CSF
#include "backend/mali_kbase_debug_linux_ktrace_csf.h"
diff --git a/mali_kbase/device/backend/mali_kbase_device_hw_csf.c b/mali_kbase/device/backend/mali_kbase_device_hw_csf.c
index c5dc3c6..fc13359 100644
--- a/mali_kbase/device/backend/mali_kbase_device_hw_csf.c
+++ b/mali_kbase/device/backend/mali_kbase_device_hw_csf.c
@@ -75,35 +75,6 @@ static void kbase_gpu_fault_interrupt(struct kbase_device *kbdev)
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-/* When the GLB_PWROFF_TIMER expires, FW will write the SHADER_PWROFF register, this sequence
- * follows:
- * - SHADER_PWRTRANS goes high
- * - SHADER_READY goes low
- * - Iterator is told not to send any more work to the core
- * - Wait for the core to drain
- * - SHADER_PWRACTIVE goes low
- * - Do an IPA sample
- * - Flush the core
- * - Apply functional isolation
- * - Turn the clock off
- * - Put the core in reset
- * - Apply electrical isolation
- * - Power off the core
- * - SHADER_PWRTRANS goes low
- *
- * It's therefore safe to turn off the SC rail when:
- * - SHADER_READY == 0, this means the SC's last transitioned to OFF
- * - SHADER_PWRTRANS == 0, this means the SC's have finished transitioning
- */
-static bool safe_to_turn_off_sc_rail(struct kbase_device *kbdev)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
- return (kbase_reg_read64(kbdev, GPU_CONTROL_ENUM(SHADER_READY)) ||
- kbase_reg_read64(kbdev, GPU_CONTROL_ENUM(SHADER_PWRTRANS))) == 0;
-}
-#endif /* CONFIG_MALI_HOST_CONTROLS_SC_RAILS */
-
void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
{
u32 power_changed_mask = (POWER_CHANGED_ALL | MCU_STATUS_GPU_IRQ);
@@ -188,15 +159,6 @@ void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
if (val & CLEAN_CACHES_COMPLETED)
kbase_clean_caches_done(kbdev);
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- if (val & POWER_CHANGED_ALL) {
- unsigned long flags;
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbdev->pm.backend.sc_pwroff_safe = safe_to_turn_off_sc_rail(kbdev);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- }
-#endif
-
if (val & power_changed_mask) {
kbase_pm_power_changed(kbdev);
} else if (val & CLEAN_CACHES_COMPLETED) {
diff --git a/mali_kbase/mali_kbase_config.h b/mali_kbase/mali_kbase_config.h
index 9326e3b..c7a6b32 100644
--- a/mali_kbase/mali_kbase_config.h
+++ b/mali_kbase/mali_kbase_config.h
@@ -368,24 +368,6 @@ struct kbase_pm_callback_conf {
* this feature.
*/
void (*power_runtime_gpu_active_callback)(struct kbase_device *kbdev);
-
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- /*
- * This callback will be invoked by the Kbase when GPU becomes active
- * to turn on the shader core power rails.
- * This callback is invoked from process context and the power rails
- * must be turned on before the completion of callback.
- */
- void (*power_on_sc_rails_callback)(struct kbase_device *kbdev);
-
- /*
- * This callback will be invoked by the Kbase when GPU becomes idle
- * to turn off the shader core power rails.
- * This callback is invoked from process context and the power rails
- * must be turned off before the completion of callback.
- */
- void (*power_off_sc_rails_callback)(struct kbase_device *kbdev);
-#endif
};
/* struct kbase_gpu_clk_notifier_data - Data for clock rate change notifier.
diff --git a/mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c b/mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c
index 59ad094..7c92505 100644
--- a/mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c
+++ b/mali_kbase/platform/devicetree/mali_kbase_runtime_pm.c
@@ -267,17 +267,6 @@ static void pm_callback_suspend(struct kbase_device *kbdev)
pm_callback_runtime_off(kbdev);
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-static void pm_callback_sc_rails_on(struct kbase_device *kbdev)
-{
- dev_dbg(kbdev->dev, "SC rails are on");
-}
-
-static void pm_callback_sc_rails_off(struct kbase_device *kbdev)
-{
- dev_dbg(kbdev->dev, "SC rails are off");
-}
-#endif
struct kbase_pm_callback_conf pm_callbacks = {
.power_on_callback = pm_callback_power_on,
@@ -303,9 +292,4 @@ struct kbase_pm_callback_conf pm_callbacks = {
.power_runtime_gpu_idle_callback = NULL,
.power_runtime_gpu_active_callback = NULL,
#endif
-
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- .power_on_sc_rails_callback = pm_callback_sc_rails_on,
- .power_off_sc_rails_callback = pm_callback_sc_rails_off,
-#endif
};
diff --git a/mali_kbase/platform/meson/mali_kbase_runtime_pm.c b/mali_kbase/platform/meson/mali_kbase_runtime_pm.c
index d3df393..8cb9354 100644
--- a/mali_kbase/platform/meson/mali_kbase_runtime_pm.c
+++ b/mali_kbase/platform/meson/mali_kbase_runtime_pm.c
@@ -245,18 +245,6 @@ static void pm_callback_suspend(struct kbase_device *kbdev)
pm_callback_runtime_off(kbdev);
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-static void pm_callback_sc_rails_on(struct kbase_device *kbdev)
-{
- dev_dbg(kbdev->dev, "SC rails are on");
-}
-
-static void pm_callback_sc_rails_off(struct kbase_device *kbdev)
-{
- dev_dbg(kbdev->dev, "SC rails are off");
-}
-#endif
-
struct kbase_pm_callback_conf pm_callbacks = {
.power_on_callback = pm_callback_power_on,
.power_off_callback = pm_callback_power_off,
@@ -282,9 +270,4 @@ struct kbase_pm_callback_conf pm_callbacks = {
.power_runtime_gpu_idle_callback = NULL,
.power_runtime_gpu_active_callback = NULL,
#endif
-
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- .power_on_sc_rails_callback = pm_callback_sc_rails_on,
- .power_off_sc_rails_callback = pm_callback_sc_rails_off,
-#endif
};
diff --git a/mali_kbase/platform/pixel/mali_kbase_config_platform.h b/mali_kbase/platform/pixel/mali_kbase_config_platform.h
index 57cec12..ef73c65 100644
--- a/mali_kbase/platform/pixel/mali_kbase_config_platform.h
+++ b/mali_kbase/platform/pixel/mali_kbase_config_platform.h
@@ -334,9 +334,6 @@ struct pixel_context {
struct bcl_device *bcl_dev;
#endif
struct pixel_rail_state_log *rail_state_log;
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- bool ifpo_enabled;
-#endif
} pm;
#ifdef CONFIG_MALI_PIXEL_GPU_SECURE_RENDERING
diff --git a/mali_kbase/platform/pixel/pixel_gpu.c b/mali_kbase/platform/pixel/pixel_gpu.c
index 0435f4a..7ecc156 100644
--- a/mali_kbase/platform/pixel/pixel_gpu.c
+++ b/mali_kbase/platform/pixel/pixel_gpu.c
@@ -43,10 +43,6 @@
*/
#define GPU_SMC_TZPC_OK 0
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-#define HOST_CONTROLS_SC_RAILS_CFG_ENTRY_NAME "Host controls SC rails"
-#endif
-
/**
* pixel_gpu_secure_mode_enable() - Enables secure mode for the GPU
*
@@ -130,42 +126,10 @@ struct protected_mode_ops pixel_protected_ops = {
#endif /* CONFIG_MALI_PIXEL_GPU_SECURE_RENDERING */
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-/**
- * gpu_pixel_enable_host_ctrl_sc_rails() - Enable the config in FW to support host based
- * control of SC power rails
- *
- * Look for the config entry that enables support in FW for the Host based
- * control of shader core power rails and set it before the initial boot
- * or reload of firmware.
- *
- * @kbdev: Kbase device structure
- *
- * Return: 0 if successful, negative error code on failure
- */
-static int gpu_pixel_enable_host_ctrl_sc_rails(struct kbase_device *kbdev)
-{
- u32 addr;
- int ec = kbase_csf_firmware_cfg_find_config_address(
- kbdev, HOST_CONTROLS_SC_RAILS_CFG_ENTRY_NAME, &addr);
-
- if (!ec) {
- kbase_csf_update_firmware_memory(kbdev, addr, 1);
- }
-
- return ec;
-}
-#endif
-
static int gpu_fw_cfg_init(struct kbase_device *kbdev) {
int ec = 0;
#if MALI_USE_CSF
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- ec = gpu_pixel_enable_host_ctrl_sc_rails(kbdev);
- if (ec)
- dev_warn(kbdev->dev, "pixel: failed to enable SC rail host-control");
-#endif
if (gpu_sscd_fw_log_init(kbdev, 0)) {
dev_warn(kbdev->dev, "pixel: failed to enable FW log");
}
diff --git a/mali_kbase/platform/pixel/pixel_gpu_power.c b/mali_kbase/platform/pixel/pixel_gpu_power.c
index 0853176..bc9ce92 100644
--- a/mali_kbase/platform/pixel/pixel_gpu_power.c
+++ b/mali_kbase/platform/pixel/pixel_gpu_power.c
@@ -510,92 +510,6 @@ static void gpu_pm_callback_power_runtime_term(struct kbase_device *kbdev)
#endif /* IS_ENABLED(KBASE_PM_RUNTIME) */
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
-/**
- * gpu_pm_power_on_cores() - Powers on the GPU shader cores for
- * CONFIG_MALI_HOST_CONTROLS_SC_RAILS integrations.
- *
- * @kbdev: The &struct kbase_device for the GPU.
- *
- * Powers on the CORES domain for CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- * integrations. Afterwards shaders must be powered and may be used by GPU.
- *
- * Context: Process context. Takes and releases PM lock.
- */
-static void gpu_pm_power_on_cores(struct kbase_device *kbdev) {
- struct pixel_context *pc = kbdev->platform_context;
-
- gpu_pm_rail_state_start_transition_lock(pc);
-
- if (pc->pm.state == GPU_POWER_LEVEL_GLOBAL && pc->pm.ifpo_enabled) {
- pm_runtime_get_sync(pc->pm.domain_devs[GPU_PM_DOMAIN_CORES]);
- pc->pm.state = GPU_POWER_LEVEL_STACKS;
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- gpu_dvfs_event_power_on(kbdev);
-#endif
- }
-
- gpu_pm_rail_state_end_transition_unlock(pc);
-}
-
-/**
- * gpu_pm_power_off_cores() - Powers off the GPU shader cores for
- * CONFIG_MALI_HOST_CONTROLS_SC_RAILS integrations.
- *
- * @kbdev: The &struct kbase_device for the GPU.
- *
- * Powers off the CORES domain for CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- * integrations. Afterwards shaders are not powered and may not be used by GPU.
- *
- * Context: Process context. Takes and releases PM lock.
- */
-static void gpu_pm_power_off_cores(struct kbase_device *kbdev) {
- struct pixel_context *pc = kbdev->platform_context;
-
- gpu_pm_rail_state_start_transition_lock(pc);
-
- if (pc->pm.state == GPU_POWER_LEVEL_STACKS && pc->pm.ifpo_enabled) {
- pm_runtime_put_sync(pc->pm.domain_devs[GPU_PM_DOMAIN_CORES]);
- pc->pm.state = GPU_POWER_LEVEL_GLOBAL;
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- gpu_dvfs_event_power_off(kbdev);
-#endif
- }
-
- gpu_pm_rail_state_end_transition_unlock(pc);
-}
-
-/**
- * gpu_pm_callback_power_sc_rails_on() - Called by GPU when shaders are needed.
- *
- * @kbdev: The device that needs its shaders powered on.
- *
- * This callback is made when @dev needs shader cores powered on integrations
- * using CONFIG_MALI_HOST_CONTROLS_SC_RAILS.
- */
-static void gpu_pm_callback_power_sc_rails_on(struct kbase_device *kbdev) {
- dev_dbg(kbdev->dev, "%s\n", __func__);
-
- gpu_pm_power_on_cores(kbdev);
-}
-
-/**
- * gpu_pm_callback_power_sc_rails_off() - Called by GPU when shaders are idle.
- *
- * @kbdev: The device that needs its shaders powered on.
- *
- * This callback is made when @dev coud have its shader cores powered off on
- * integrations using CONFIG_MALI_HOST_CONTROLS_SC_RAILS.
- */
-static void gpu_pm_callback_power_sc_rails_off(struct kbase_device *kbdev) {
- dev_dbg(kbdev->dev, "%s\n", __func__);
-
- gpu_pm_power_off_cores(kbdev);
-}
-#endif /* CONFIG_MALI_HOST_CONTROLS_SC_RAILS */
-
static void gpu_pm_hw_reset(struct kbase_device *kbdev)
{
struct pixel_context *pc = kbdev->platform_context;
@@ -661,10 +575,6 @@ struct kbase_pm_callback_conf pm_callbacks = {
#endif /* KBASE_PM_RUNTIME */
.soft_reset_callback = NULL,
.hardware_reset_callback = gpu_pm_hw_reset,
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- .power_on_sc_rails_callback = gpu_pm_callback_power_sc_rails_on,
- .power_off_sc_rails_callback = gpu_pm_callback_power_sc_rails_off,
-#endif /* CONFIG_MALI_HOST_CONTROLS_SC_RAILS */
};
/**
@@ -745,10 +655,6 @@ int gpu_pm_init(struct kbase_device *kbdev)
}
}
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- pc->pm.ifpo_enabled = true;
-#endif
-
if (of_property_read_u32(np, "gpu_pm_autosuspend_delay", &pc->pm.autosuspend_delay)) {
pc->pm.use_autosuspend = false;
pc->pm.autosuspend_delay = 0;
diff --git a/mali_kbase/platform/pixel/pixel_gpu_sysfs.c b/mali_kbase/platform/pixel/pixel_gpu_sysfs.c
index f6164f9..a1c0e94 100644
--- a/mali_kbase/platform/pixel/pixel_gpu_sysfs.c
+++ b/mali_kbase/platform/pixel/pixel_gpu_sysfs.c
@@ -695,58 +695,6 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
return ret;
}
-static ssize_t ifpo_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- struct kbase_device *kbdev = dev->driver_data;
- struct pixel_context *pc = kbdev->platform_context;
- ssize_t ret = 0;
-
- if (!pc)
- return -ENODEV;
-
- mutex_lock(&pc->pm.lock);
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", pc->pm.ifpo_enabled);
- mutex_unlock(&pc->pm.lock);
- return ret;
-#else
- return -ENOTSUPP;
-#endif
-}
-
-static ssize_t ifpo_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
-#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
- int ret;
- bool enabled;
- struct kbase_device *kbdev = dev->driver_data;
- struct pixel_context *pc = kbdev->platform_context;
- if (!pc)
- return -ENODEV;
-
- ret = strtobool(buf, &enabled);
- if (ret)
- return -EINVAL;
-
- mutex_lock(&kbdev->csf.scheduler.lock);
-
- if (!enabled) {
- turn_on_sc_power_rails(kbdev);
- }
-
- mutex_lock(&pc->pm.lock);
- pc->pm.ifpo_enabled = enabled;
- mutex_unlock(&pc->pm.lock);
- mutex_unlock(&kbdev->csf.scheduler.lock);
-
- return count;
-#else
- return -ENOTSUPP;
-#endif
-}
-
-
/* Define devfreq-like attributes */
DEVICE_ATTR_RO(available_frequencies);
DEVICE_ATTR_RO(cur_freq);
@@ -761,7 +709,6 @@ DEVICE_ATTR_RO(time_in_state);
DEVICE_ATTR_RO(trans_stat);
DEVICE_ATTR_RO(available_governors);
DEVICE_ATTR_RW(governor);
-DEVICE_ATTR_RW(ifpo);
/* Initialization code */
@@ -795,7 +742,6 @@ static struct {
{ "available_governors", &dev_attr_available_governors },
{ "governor", &dev_attr_governor },
{ "trigger_core_dump", &dev_attr_trigger_core_dump },
- { "ifpo", &dev_attr_ifpo }
};
/**