aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2017-08-14 22:59:50 +0000
committerandroid-build-merger <android-build-merger@google.com>2017-08-14 22:59:50 +0000
commit0286f8e57b973fb6ab596471b15620f4afebbb45 (patch)
tree0c80c68f30c62a805e30b2d1bafb7085e21e7050
parent8272e4e91db81dc364ae97c56e1576adcb35277f (diff)
parent3f793b2c46a66d47c5c0bf988c5419eb42121906 (diff)
downloadv4.4-0286f8e57b973fb6ab596471b15620f4afebbb45.tar.gz
sched/fair: Add a backup_cpu to find_best_target am: 2e13f308a9
am: 3f793b2c46 Change-Id: I454d8809433385a88c5759d362347cb8624d0bc3
-rw-r--r--kernel/sched/fair.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d1445e7b298a..c193e9b1c38f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6238,7 +6238,8 @@ static int start_cpu(bool boosted)
return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
}
-static inline int find_best_target(struct task_struct *p, bool boosted, bool prefer_idle)
+static inline int find_best_target(struct task_struct *p, int *backup_cpu,
+ bool boosted, bool prefer_idle)
{
unsigned long best_idle_min_cap_orig = ULONG_MAX;
unsigned long min_util = boosted_task_util(p);
@@ -6255,6 +6256,8 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
int target_cpu = -1;
int cpu, i;
+ *backup_cpu = -1;
+
schedstat_inc(p, se.statistics.nr_wakeups_fbt_attempts);
schedstat_inc(this_rq(), eas_stats.fbt_attempts);
@@ -6489,6 +6492,10 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
target_cpu = prefer_idle
? best_active_cpu
: best_idle_cpu;
+ else
+ *backup_cpu = prefer_idle
+ ? best_active_cpu
+ : best_idle_cpu;
trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
best_idle_cpu, best_active_cpu,
@@ -6527,7 +6534,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
struct sched_domain *sd;
- int target_cpu = prev_cpu, tmp_target;
+ int target_cpu = prev_cpu, tmp_target, tmp_backup;
bool boosted, prefer_idle;
schedstat_inc(p, se.statistics.nr_wakeups_secb_attempts);
@@ -6556,7 +6563,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
/* Find a cpu with sufficient capacity */
- tmp_target = find_best_target(p, boosted, prefer_idle);
+ tmp_target = find_best_target(p, &tmp_backup, boosted, prefer_idle);
if (!sd)
goto unlock;
@@ -6585,10 +6592,15 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
}
if (energy_diff(&eenv) >= 0) {
- schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
- schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
- target_cpu = prev_cpu;
- goto unlock;
+ /* No energy saving for target_cpu, try backup */
+ target_cpu = tmp_backup;
+ eenv.dst_cpu = target_cpu;
+ if (tmp_backup < 0 || energy_diff(&eenv) >= 0) {
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
+ schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
+ target_cpu = prev_cpu;
+ goto unlock;
+ }
}
schedstat_inc(p, se.statistics.nr_wakeups_secb_nrg_sav);