aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-11-21 18:25:11 +0530
committerTirupathi Reddy <tirupath@codeaurora.org>2015-02-11 22:29:47 +0530
commitb60ba6321c72ee3c6b7a41e6b0ef32eaaca40c32 (patch)
tree4917392e2e94d4039e1a2fe71f46046616be3781 /kernel
parent31be4a94157308c1ec113c85ead01696d64cbd9a (diff)
downloadqcom-msm-v3.10-b60ba6321c72ee3c6b7a41e6b0ef32eaaca40c32.tar.gz
sched: Packing support until a frequency threshold
Add another dimension for task packing based on frequency. This patch adds a per-cpu tunable, rq->mostly_idle_freq, which when set will result in tasks being packed on a single cpu in cluster as long as cluster frequency is less than set threshold. Change-Id: I8c65376801efd158c8145073a10a1a555004f1da Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c62
-rw-r--r--kernel/sched/sched.h1
3 files changed, 64 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d79b2c82ae3..8c4db5ca0eb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8992,6 +8992,7 @@ void __init sched_init(void)
rq->hmp_flags = 0;
rq->mostly_idle_load = pct_to_real(20);
rq->mostly_idle_nr_run = 3;
+ rq->mostly_idle_freq = 0;
#ifdef CONFIG_SCHED_FREQ_INPUT
rq->old_busy_time = 0;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 042368a87c4..6e64f7ec1ff 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1381,6 +1381,25 @@ int sched_set_cpu_mostly_idle_load(int cpu, int mostly_idle_pct)
return 0;
}
+int sched_set_cpu_mostly_idle_freq(int cpu, unsigned int mostly_idle_freq)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (mostly_idle_freq > rq->max_possible_freq)
+ return -EINVAL;
+
+ rq->mostly_idle_freq = mostly_idle_freq;
+
+ return 0;
+}
+
+unsigned int sched_get_cpu_mostly_idle_freq(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return rq->mostly_idle_freq;
+}
+
int sched_get_cpu_mostly_idle_load(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -1845,6 +1864,42 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason)
return skip;
}
+/*
+ * Select a single cpu in cluster as target for packing, iff cluster frequency
+ * is less than a threshold level
+ */
+static int select_packing_target(struct task_struct *p, int best_cpu)
+{
+ struct rq *rq = cpu_rq(best_cpu);
+ struct cpumask search_cpus;
+ int i;
+ int min_cost = INT_MAX;
+ int target = best_cpu;
+
+ if (rq->cur_freq >= rq->mostly_idle_freq)
+ return best_cpu;
+
+ /* Don't pack if current freq is low because of throttling */
+ if (rq->max_freq <= rq->mostly_idle_freq)
+ return best_cpu;
+
+ cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
+ cpumask_and(&search_cpus, &search_cpus, &rq->freq_domain_cpumask);
+
+ /* Pick the first lowest power cpu as target */
+ for_each_cpu(i, &search_cpus) {
+ int cost = power_cost(p, i);
+
+ if (cost < min_cost) {
+ target = i;
+ min_cost = cost;
+ }
+ }
+
+ return target;
+}
+
+
/* return cheapest cpu that can fit this task */
static int select_best_cpu(struct task_struct *p, int target, int reason,
int sync)
@@ -1994,6 +2049,9 @@ done:
best_cpu = fallback_idle_cpu;
}
+ if (cpu_rq(best_cpu)->mostly_idle_freq)
+ best_cpu = select_packing_target(p, best_cpu);
+
return best_cpu;
}
@@ -7271,6 +7329,10 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
struct sched_domain *sd;
int i;
+ if (rq->mostly_idle_freq && rq->cur_freq < rq->mostly_idle_freq
+ && rq->max_freq > rq->mostly_idle_freq)
+ return 0;
+
if (rq->nr_running >= 2 && (rq->nr_running - rq->nr_small_tasks >= 2 ||
rq->nr_running > rq->mostly_idle_nr_run ||
cpu_load(cpu) > rq->mostly_idle_load)) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 80effb0ebb7..04f4edb764a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -499,6 +499,7 @@ struct rq {
u64 window_start;
u32 mostly_idle_load;
int mostly_idle_nr_run;
+ int mostly_idle_freq;
#ifdef CONFIG_SCHED_FREQ_INPUT
unsigned int old_busy_time;