summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQais Yousef <qyousef@google.com>2023-06-08 18:09:21 +0000
committerTreeHugger Robot <treehugger-gerrit@google.com>2023-06-09 18:45:32 +0000
commit21e0dc08966fd943e9136fd248f7dc85e3d6d19d (patch)
tree9b041b48750fc63104a04faff96c9012a0d81381
parent851f3b441ebfcbf9c1b07010b501c904c096c15b (diff)
downloadgs-android-gs-tangorpro-5.10-u-beta4.tar.gz
For some reason we still see regressions on use cases that relies on iowait boost. Tried several things but couldn't get the lost perf back. So as last resort reworking the logic so that we don't block the updates, but rather always force an additional freq update after applying the uclamp filters. Not ideal, but should retain old behavior, and ensure necessary freq update are applied to impose uclamp hints - albeit we might have already moved to wrong frequency temporarily. Bug: 281263677 Signed-off-by: Qais Yousef <qyousef@google.com> Change-Id: Ib288c014ac652c1d482afda7ff60a2cae5600102
-rw-r--r--drivers/soc/google/vh/kernel/sched/cpufreq_gov.c29
-rw-r--r--drivers/soc/google/vh/kernel/sched/fair.c33
-rw-r--r--drivers/soc/google/vh/kernel/sched/sched_priv.h3
3 files changed, 9 insertions, 56 deletions
diff --git a/drivers/soc/google/vh/kernel/sched/cpufreq_gov.c b/drivers/soc/google/vh/kernel/sched/cpufreq_gov.c
index 000158b1b4a7..faab075183e8 100644
--- a/drivers/soc/google/vh/kernel/sched/cpufreq_gov.c
+++ b/drivers/soc/google/vh/kernel/sched/cpufreq_gov.c
@@ -67,9 +67,6 @@ struct sugov_policy {
cpumask_t pmu_ignored_mask;
bool under_pmu_throttle;
bool relax_pmu_throttle;
-
- int block_updates;
- bool update_iowait;
};
struct sugov_cpu {
@@ -835,31 +832,7 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
raw_spin_lock(&sg_policy->update_lock);
- if (flags & SCHED_PIXEL_BLOCK_UPDATES) {
- if (flags & SCHED_CPUFREQ_IOWAIT)
- sg_policy->update_iowait = true;
-
- sg_policy->block_updates++;
- raw_spin_unlock(&sg_policy->update_lock);
- return;
- }
-
- if (flags & SCHED_PIXEL_RESUME_UPDATES) {
- sg_policy->block_updates--;
-
- if (WARN_ON(sg_policy->block_updates < 0))
- sg_policy->block_updates = 0;
-
- if (sg_policy->update_iowait) {
- flags |= SCHED_CPUFREQ_IOWAIT;
- sg_policy->update_iowait = false;
- }
- }
-
- if (sg_policy->block_updates) {
- raw_spin_unlock(&sg_policy->update_lock);
- return;
- }
+ sg_policy->limits_changed |= flags & SCHED_PIXEL_FORCE_UPDATE;
util = sugov_get_util(sg_cpu);
diff --git a/drivers/soc/google/vh/kernel/sched/fair.c b/drivers/soc/google/vh/kernel/sched/fair.c
index 9396bb979862..a132945ccda6 100644
--- a/drivers/soc/google/vh/kernel/sched/fair.c
+++ b/drivers/soc/google/vh/kernel/sched/fair.c
@@ -1989,26 +1989,7 @@ EXPORT_SYMBOL_GPL(vh_arch_set_freq_scale_pixel_mod);
void rvh_set_iowait_pixel_mod(void *data, struct task_struct *p, int *should_iowait_boost)
{
- unsigned int flags = SCHED_PIXEL_BLOCK_UPDATES;
-
*should_iowait_boost = p->in_iowait && uclamp_boosted(p);
-
- if (*should_iowait_boost)
- flags |= SCHED_CPUFREQ_IOWAIT;
-
- /*
- * Tell sched pixel to ignore cpufreq updates. this happens at
- * enqueue_task_fair() entry.
- *
- * We want to defer all request to defer frequency updates until uclamp
- * filter is applied.
- *
- * Note that enqueue_task_fair() could request cpufreq updates when
- * calling update_load_avg(). Since this vh is called before those
- * - this strategic block will ensure all subsequent requests are
- * ignored.
- */
- cpufreq_update_util(task_rq(p), flags);
}
void rvh_cpu_overutilized_pixel_mod(void *data, int cpu, int *overutilized)
@@ -2783,6 +2764,7 @@ void rvh_enqueue_task_fair_pixel_mod(void *data, struct rq *rq, struct task_stru
{
struct vendor_task_struct *vp = get_vendor_task_struct(p);
struct vendor_rq_struct *vrq = get_vendor_rq_struct(rq);
+ bool force_cpufreq_update = false;
if (vp->uclamp_fork_reset)
atomic_inc(&vrq->num_adpf_tasks);
@@ -2802,12 +2784,14 @@ void rvh_enqueue_task_fair_pixel_mod(void *data, struct rq *rq, struct task_stru
/* Can only process uclamp after sched_slice() was updated */
if (uclamp_is_used()) {
if (uclamp_can_ignore_uclamp_max(rq, p)) {
+ force_cpufreq_update = true;
uclamp_set_ignore_uclamp_max(p);
/* GKI has incremented it already, undo that */
uclamp_rq_dec_id(rq, p, UCLAMP_MAX);
}
if (uclamp_can_ignore_uclamp_min(rq, p)) {
+ force_cpufreq_update = true;
uclamp_set_ignore_uclamp_min(p);
/* GKI has incremented it already, undo that */
uclamp_rq_dec_id(rq, p, UCLAMP_MIN);
@@ -2815,14 +2799,11 @@ void rvh_enqueue_task_fair_pixel_mod(void *data, struct rq *rq, struct task_stru
}
/*
- * We strategically tell schedutil to ignore requests to update
- * frequencies when we call rvh_set_iowait_pixel_mod().
- *
- * Now we have applied the uclamp filter, we'll unconditionally request
- * a frequency update which should take all changes into account in one
- * go.
+ * If we have applied the uclamp filter, we'll unconditionally request
+ * a frequency update which should take new changes into account.
*/
- cpufreq_update_util(rq, SCHED_PIXEL_RESUME_UPDATES);
+ if (uclamp_is_used() && force_cpufreq_update)
+ cpufreq_update_util(rq, SCHED_PIXEL_FORCE_UPDATE);
}
void rvh_dequeue_task_fair_pixel_mod(void *data, struct rq *rq, struct task_struct *p, int flags)
diff --git a/drivers/soc/google/vh/kernel/sched/sched_priv.h b/drivers/soc/google/vh/kernel/sched/sched_priv.h
index 00b486408b14..5bfb551f601d 100644
--- a/drivers/soc/google/vh/kernel/sched/sched_priv.h
+++ b/drivers/soc/google/vh/kernel/sched/sched_priv.h
@@ -169,8 +169,7 @@ DECLARE_STATIC_KEY_FALSE(uclamp_max_filter_enable);
DECLARE_STATIC_KEY_FALSE(tapered_dvfs_headroom_enable);
-#define SCHED_PIXEL_BLOCK_UPDATES BIT(8)
-#define SCHED_PIXEL_RESUME_UPDATES BIT(9)
+#define SCHED_PIXEL_FORCE_UPDATE BIT(8)
/*****************************************************************************/
/* Upstream Code Section */