aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/base/cpu.c41
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c62
-rw-r--r--kernel/sched/sched.h1
5 files changed, 108 insertions, 0 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index b0de9573fbf..eb0b4e84c62 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -205,6 +205,42 @@ static ssize_t __ref store_sched_mostly_idle_load(struct device *dev,
return err;
}
+static ssize_t show_sched_mostly_idle_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpunum;
+ unsigned int mostly_idle_freq;
+
+ cpunum = cpu->dev.id;
+
+ mostly_idle_freq = sched_get_cpu_mostly_idle_freq(cpunum);
+
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_freq);
+
+ return rc;
+}
+
+static ssize_t __ref store_sched_mostly_idle_freq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int cpuid = cpu->dev.id, err;
+ unsigned int mostly_idle_freq;
+
+ err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_freq);
+ if (err)
+ return err;
+
+ err = sched_set_cpu_mostly_idle_freq(cpuid, mostly_idle_freq);
+ if (err >= 0)
+ err = count;
+
+ return err;
+}
+
static ssize_t show_sched_mostly_idle_nr_run(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -241,6 +277,8 @@ static ssize_t __ref store_sched_mostly_idle_nr_run(struct device *dev,
return err;
}
+static DEVICE_ATTR(sched_mostly_idle_freq, 0664, show_sched_mostly_idle_freq,
+ store_sched_mostly_idle_freq);
static DEVICE_ATTR(sched_mostly_idle_load, 0664, show_sched_mostly_idle_load,
store_sched_mostly_idle_load);
static DEVICE_ATTR(sched_mostly_idle_nr_run, 0664,
@@ -424,6 +462,9 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
if (!error)
error = device_create_file(&cpu->dev,
&dev_attr_sched_mostly_idle_nr_run);
+ if (!error)
+ error = device_create_file(&cpu->dev,
+ &dev_attr_sched_mostly_idle_freq);
#endif
return error;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 51058b214f8..ba6e25d54b6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1914,6 +1914,9 @@ extern int sched_set_cpu_mostly_idle_load(int cpu, int mostly_idle_pct);
extern int sched_get_cpu_mostly_idle_load(int cpu);
extern int sched_set_cpu_mostly_idle_nr_run(int cpu, int nr_run);
extern int sched_get_cpu_mostly_idle_nr_run(int cpu);
+extern int
+sched_set_cpu_mostly_idle_freq(int cpu, unsigned int mostly_idle_freq);
+extern unsigned int sched_get_cpu_mostly_idle_freq(int cpu);
#else
static inline int sched_set_boost(int enable)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d79b2c82ae3..8c4db5ca0eb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8992,6 +8992,7 @@ void __init sched_init(void)
rq->hmp_flags = 0;
rq->mostly_idle_load = pct_to_real(20);
rq->mostly_idle_nr_run = 3;
+ rq->mostly_idle_freq = 0;
#ifdef CONFIG_SCHED_FREQ_INPUT
rq->old_busy_time = 0;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 042368a87c4..6e64f7ec1ff 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1381,6 +1381,25 @@ int sched_set_cpu_mostly_idle_load(int cpu, int mostly_idle_pct)
return 0;
}
+int sched_set_cpu_mostly_idle_freq(int cpu, unsigned int mostly_idle_freq)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (mostly_idle_freq > rq->max_possible_freq)
+ return -EINVAL;
+
+ rq->mostly_idle_freq = mostly_idle_freq;
+
+ return 0;
+}
+
+unsigned int sched_get_cpu_mostly_idle_freq(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return rq->mostly_idle_freq;
+}
+
int sched_get_cpu_mostly_idle_load(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -1845,6 +1864,42 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason)
return skip;
}
+/*
+ * Select a single cpu in cluster as target for packing, iff cluster frequency
+ * is less than a threshold level
+ */
+static int select_packing_target(struct task_struct *p, int best_cpu)
+{
+ struct rq *rq = cpu_rq(best_cpu);
+ struct cpumask search_cpus;
+ int i;
+ int min_cost = INT_MAX;
+ int target = best_cpu;
+
+ if (rq->cur_freq >= rq->mostly_idle_freq)
+ return best_cpu;
+
+ /* Don't pack if current freq is low because of throttling */
+ if (rq->max_freq <= rq->mostly_idle_freq)
+ return best_cpu;
+
+ cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
+ cpumask_and(&search_cpus, &search_cpus, &rq->freq_domain_cpumask);
+
+ /* Pick the first lowest power cpu as target */
+ for_each_cpu(i, &search_cpus) {
+ int cost = power_cost(p, i);
+
+ if (cost < min_cost) {
+ target = i;
+ min_cost = cost;
+ }
+ }
+
+ return target;
+}
+
+
/* return cheapest cpu that can fit this task */
static int select_best_cpu(struct task_struct *p, int target, int reason,
int sync)
@@ -1994,6 +2049,9 @@ done:
best_cpu = fallback_idle_cpu;
}
+ if (cpu_rq(best_cpu)->mostly_idle_freq)
+ best_cpu = select_packing_target(p, best_cpu);
+
return best_cpu;
}
@@ -7271,6 +7329,10 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
struct sched_domain *sd;
int i;
+ if (rq->mostly_idle_freq && rq->cur_freq < rq->mostly_idle_freq
+ && rq->max_freq > rq->mostly_idle_freq)
+ return 0;
+
if (rq->nr_running >= 2 && (rq->nr_running - rq->nr_small_tasks >= 2 ||
rq->nr_running > rq->mostly_idle_nr_run ||
cpu_load(cpu) > rq->mostly_idle_load)) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 80effb0ebb7..04f4edb764a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -499,6 +499,7 @@ struct rq {
u64 window_start;
u32 mostly_idle_load;
int mostly_idle_nr_run;
+ int mostly_idle_freq;
#ifdef CONFIG_SCHED_FREQ_INPUT
unsigned int old_busy_time;