summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRick Yiu <rickyiu@google.com>2024-01-23 06:22:47 +0000
committerWei Wang <wvw@google.com>2024-02-06 18:11:31 +0000
commit19fac2b7674f4473a20854f2e8cf00eb4bd8af59 (patch)
treee2259f4f4e4a2036207137265211aacf14992616
parent6a4f8afe8a6cba336fb29df9841be2c0085958b8 (diff)
downloadgs-19fac2b7674f4473a20854f2e8cf00eb4bd8af59.tar.gz
vh: sched: Do not restrict ADPF task by preferred idle mask
Always let ADPF task use all allowed cpus so that the preferred idle mask will not restrict it. Bug: 248621289 Change-Id: I0938ec296a5ae269ec4ed20d6e481f3d9fc443bb Signed-off-by: Rick Yiu <rickyiu@google.com>
-rw-r--r--drivers/soc/google/vh/kernel/sched/fair.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/drivers/soc/google/vh/kernel/sched/fair.c b/drivers/soc/google/vh/kernel/sched/fair.c
index a7c971cc2..8bc8e4774 100644
--- a/drivers/soc/google/vh/kernel/sched/fair.c
+++ b/drivers/soc/google/vh/kernel/sched/fair.c
@@ -519,16 +519,13 @@ static inline unsigned int get_group_throttle(struct task_group *tg)
/*
* If a task is in prefer_idle group, check if it could run on the cpu based on its prio and the
- * prefer_idle cpumask defined, but bail out for bulk wake (wake_q_count > 1).
+ * prefer_idle cpumask defined.
*/
-static inline bool is_preferred_idle_cpu(struct task_struct *p, int cpu)
+static inline bool check_preferred_idle_mask(struct task_struct *p, int cpu)
{
int vendor_group = get_vendor_group(p);
- if (!vg[vendor_group].prefer_idle)
- return true;
-
- if (p->wake_q_count > 1)
+ if (!get_prefer_idle(p))
return true;
if (p->prio <= THREAD_PRIORITY_TOP_APP_BOOST) {
@@ -544,7 +541,7 @@ static inline const cpumask_t *get_preferred_idle_mask(struct task_struct *p)
{
int vendor_group = get_vendor_group(p);
- if (p->wake_q_count > 1)
+ if (p->wake_q_count || get_uclamp_fork_reset(p, false))
return cpu_possible_mask;
if (p->prio <= THREAD_PRIORITY_TOP_APP_BOOST) {
@@ -2415,7 +2412,7 @@ void rvh_select_task_rq_fair_pixel_mod(void *data, struct task_struct *p, int pr
/* prefer prev cpu */
if (cpu_active(prev_cpu) && cpu_is_idle(prev_cpu) &&
- task_fits_capacity(p, prev_cpu) && is_preferred_idle_cpu(p, prev_cpu)) {
+ task_fits_capacity(p, prev_cpu) && check_preferred_idle_mask(p, prev_cpu)) {
struct cpuidle_state *idle_state;
unsigned int exit_lat = UINT_MAX;