summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQuentin Perret <quentin.perret@arm.com>2019-02-27 11:21:24 +0000
committerQuentin Perret <qperret@google.com>2019-12-02 10:03:57 +0000
commiteed91ba4a035479ee4119e65d70b7e625501fa33 (patch)
tree2b58b881256dcfe82069c71bdadd8af72c0d433c
parent7565bcb84a77a0ab6f38ffb125d52ee42ae8b9f1 (diff)
downloadcommon-experimental/eas-dev.tar.gz
ANDROID: sched/fair: Bias EAS placement for latencyexperimental/eas-dev
Add to find_energy_efficient_cpu() a latency sensitive case which mimics what was done for prefer-idle in android-4.19 and before (see [1] for reference). This isn't strictly equivalent to the legacy algorithm but comes real close, and isn't very invasive. Overall, the idea is to select the biggest idle CPU we can find for latency-sensitive boosted tasks, and the smallest CPU where the can fit for latency-sensitive non-boosted tasks. The main differences with the legacy behaviour are the following: 1. the policy for 'prefer idle' when there isn't a single idle CPU in the system is simpler now. We just pick the CPU with the highest spare capacity; 2. the cstate awareness is implemented by minimizing the exit latency rather than the idle state index. This is how it is done in the slow path (find_idlest_group_cpu()), it doesn't require us to keep hooks into CPUIdle, and should actually be better because what we want is a CPU that can wake up quickly; 3. non-latency-sensitive tasks just use the standard mainline energy-aware wake-up path, which decides the placement using the Energy Model; 4. the 'boosted' and 'latency_sensitive' attributes of a task come from util_clamp (which now replaces schedtune). [1] https://android.googlesource.com/kernel/common.git/+/c27c56105dcaaae54ecc39ef33fbfac87a1486fc Change-Id: Ia58516906e9cb5abe08385a8cd088097043d8703 Signed-off-by: Quentin Perret <quentin.perret@arm.com>
-rw-r--r--kernel/sched/fair.c40
1 files changed, 38 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ec7565ad589e..f6b6f4b251be 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6348,8 +6348,13 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
{
unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+ int max_spare_cap_cpu_ls = prev_cpu, best_idle_cpu = -1;
+ unsigned long max_spare_cap_ls = 0, target_cap;
unsigned long cpu_cap, util, base_energy = 0;
+ bool boosted, latency_sensitive = false;
+ unsigned int min_exit_lat = UINT_MAX;
int cpu, best_energy_cpu = prev_cpu;
+ struct cpuidle_state *idle;
struct sched_domain *sd;
struct perf_domain *pd;
@@ -6378,6 +6383,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
if (!task_util_est(p))
goto unlock;
+ latency_sensitive = uclamp_latency_sensitive(p);
+ boosted = uclamp_boosted(p);
+ target_cap = boosted ? 0 : ULONG_MAX;
+
for (; pd; pd = pd->next) {
unsigned long cur_delta, spare_cap, max_spare_cap = 0;
unsigned long base_energy_pd;
@@ -6398,7 +6407,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
continue;
/* Always use prev_cpu as a candidate. */
- if (cpu == prev_cpu) {
+ if (!latency_sensitive && cpu == prev_cpu) {
prev_delta = compute_energy(p, prev_cpu, pd);
prev_delta -= base_energy_pd;
best_delta = min(best_delta, prev_delta);
@@ -6413,10 +6422,34 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
max_spare_cap = spare_cap;
max_spare_cap_cpu = cpu;
}
+
+ if (!latency_sensitive)
+ continue;
+
+ if (idle_cpu(cpu)) {
+ cpu_cap = capacity_orig_of(cpu);
+ if (boosted && cpu_cap < target_cap)
+ continue;
+ if (!boosted && cpu_cap > target_cap)
+ continue;
+ idle = idle_get_state(cpu_rq(cpu));
+ if (idle && idle->exit_latency > min_exit_lat &&
+ cpu_cap == target_cap)
+ continue;
+
+ if (idle)
+ min_exit_lat = idle->exit_latency;
+ target_cap = cpu_cap;
+ best_idle_cpu = cpu;
+ } else if (spare_cap > max_spare_cap_ls) {
+ max_spare_cap_ls = spare_cap;
+ max_spare_cap_cpu_ls = cpu;
+ }
}
/* Evaluate the energy impact of using this CPU. */
- if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) {
+ if (!latency_sensitive && max_spare_cap_cpu >= 0 &&
+ max_spare_cap_cpu != prev_cpu) {
cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
cur_delta -= base_energy_pd;
if (cur_delta < best_delta) {
@@ -6428,6 +6461,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
unlock:
rcu_read_unlock();
+ if (latency_sensitive)
+ return best_idle_cpu >= 0 ? best_idle_cpu : max_spare_cap_cpu_ls;
+
/*
* Pick the best CPU if prev_cpu cannot be used, or if it saves at
* least 6% of the energy used by prev_cpu.