aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorHanumath Prasad <hpprasad@codeaurora.org>2015-02-24 11:01:39 +0530
committerHanumath Prasad <hpprasad@codeaurora.org>2015-02-24 11:13:08 +0530
commit4e10c691f8886ee666c87eb10676ad773cc54008 (patch)
tree39ef2b17c34fda438eae09a781f65ca5bb28af34 /kernel
parentfa0472cee53f2a77eb7725946b236df161c56405 (diff)
downloadqcom-msm-v3.10-4e10c691f8886ee666c87eb10676ad773cc54008.tar.gz
Revert "sched: per-cpu mostly_idle threshold"
This reverts commit 31be4a94157308c1ec113c85ead01696d64cbd9a. Change-Id: I462cbdcea79f77782e34a3d8ad20ece3ebe9aefc Signed-off-by: Hanumath Prasad <hpprasad@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c10
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/fair.c84
-rw-r--r--kernel/sched/sched.h8
-rw-r--r--kernel/sysctl.c14
5 files changed, 50 insertions, 69 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index be07c9ed39d..009cb07320b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2026,7 +2026,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
u64 start_ts = sched_clock();
int reason = WINDOW_CHANGE;
unsigned int old = 0, new = 0;
- unsigned int old_window_size = sched_ravg_window;
disable_window_stats();
@@ -2049,13 +2048,8 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
for_each_possible_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
- if (window_start) {
- u32 mostly_idle_load = rq->mostly_idle_load;
-
+ if (window_start)
rq->window_start = window_start;
- rq->mostly_idle_load = div64_u64((u64)mostly_idle_load *
- (u64)sched_ravg_window, (u64)old_window_size);
- }
#ifdef CONFIG_SCHED_FREQ_INPUT
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
#endif
@@ -8996,8 +8990,6 @@ void __init sched_init(void)
rq->window_start = 0;
rq->nr_small_tasks = rq->nr_big_tasks = 0;
rq->hmp_flags = 0;
- rq->mostly_idle_load = pct_to_real(20);
- rq->mostly_idle_nr_run = 3;
#ifdef CONFIG_SCHED_FREQ_INPUT
rq->old_busy_time = 0;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 43fd8983e6f..26f93f77bcd 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -306,8 +306,6 @@ do { \
P(cpu_power);
#endif
#ifdef CONFIG_SCHED_HMP
- P(mostly_idle_load);
- P(mostly_idle_nr_run);
P(load_scale_factor);
P(capacity);
P(max_possible_capacity);
@@ -400,6 +398,7 @@ static void sched_debug_header(struct seq_file *m)
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
#ifdef CONFIG_SCHED_HMP
+ P(sched_mostly_idle_load);
P(sched_small_task);
P(sched_upmigrate);
P(sched_downmigrate);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 042368a87c4..abb602575be 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1256,6 +1256,14 @@ unsigned int __read_mostly sched_enable_hmp = 0;
unsigned int __read_mostly sysctl_sched_spill_nr_run = 10;
/*
+ * A cpu is considered practically idle, if:
+ *
+ * rq->nr_running <= sysctl_sched_mostly_idle_nr_run &&
+ * rq->cumulative_runnable_avg <= sched_mostly_idle_load
+ */
+unsigned int __read_mostly sysctl_sched_mostly_idle_nr_run = 3;
+
+/*
* Control whether or not individual CPU power consumption is used to
* guide task placement.
*/
@@ -1269,6 +1277,16 @@ unsigned int __read_mostly sched_enable_power_aware = 0;
unsigned int __read_mostly sysctl_sched_powerband_limit_pct = 20;
/*
+ * Conversion of *_pct to absolute form is based on max_task_load().
+ *
+ * For example:
+ * sched_mostly_idle_load =
+ * (sysctl_sched_mostly_idle_load_pct * max_task_load()) / 100;
+ */
+unsigned int __read_mostly sched_mostly_idle_load;
+unsigned int __read_mostly sysctl_sched_mostly_idle_load_pct = 20;
+
+/*
* CPUs with load greater than the sched_spill_load_threshold are not
* eligible for task placement. When all CPUs in a cluster achieve a
* load higher than this level, tasks becomes eligible for inter
@@ -1341,11 +1359,17 @@ static inline int available_cpu_capacity(int cpu)
return rq->capacity;
}
+#define pct_to_real(tunable) \
+ (div64_u64((u64)tunable * (u64)max_task_load(), 100))
+
void set_hmp_defaults(void)
{
sched_spill_load =
pct_to_real(sysctl_sched_spill_load_pct);
+ sched_mostly_idle_load =
+ pct_to_real(sysctl_sched_mostly_idle_load_pct);
+
sched_small_task =
pct_to_real(sysctl_sched_small_task_pct);
@@ -1369,44 +1393,6 @@ void set_hmp_defaults(void)
(u64)sched_ravg_window, 100);
}
-int sched_set_cpu_mostly_idle_load(int cpu, int mostly_idle_pct)
-{
- struct rq *rq = cpu_rq(cpu);
-
- if (mostly_idle_pct < 0 || mostly_idle_pct > 100)
- return -EINVAL;
-
- rq->mostly_idle_load = pct_to_real(mostly_idle_pct);
-
- return 0;
-}
-
-int sched_get_cpu_mostly_idle_load(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- int mostly_idle_pct;
-
- mostly_idle_pct = real_to_pct(rq->mostly_idle_load);
-
- return mostly_idle_pct;
-}
-
-int sched_set_cpu_mostly_idle_nr_run(int cpu, int nr_run)
-{
- struct rq *rq = cpu_rq(cpu);
-
- rq->mostly_idle_nr_run = nr_run;
-
- return 0;
-}
-
-int sched_get_cpu_mostly_idle_nr_run(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- return rq->mostly_idle_nr_run;
-}
-
/*
* 'load' is in reference to "best cpu" at its best frequency.
* Scale that in reference to a given cpu, accounting for how bad it is
@@ -1488,12 +1474,9 @@ spill_threshold_crossed(struct task_struct *p, struct rq *rq, int cpu,
int mostly_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- int mostly_idle;
-
- mostly_idle = (cpu_load(cpu) <= rq->mostly_idle_load
- && rq->nr_running <= rq->mostly_idle_nr_run);
- return mostly_idle;
+ return (cpu_load(cpu) <= sched_mostly_idle_load
+ && rq->nr_running <= sysctl_sched_mostly_idle_nr_run);
}
static int mostly_idle_cpu_sync(int cpu, int sync)
@@ -1513,8 +1496,8 @@ static int mostly_idle_cpu_sync(int cpu, int sync)
load -= rq->curr->ravg.demand;
}
- return (load <= rq->mostly_idle_load
- && nr_running <= rq->mostly_idle_nr_run);
+ return (load <= sched_mostly_idle_load
+ && nr_running <= sysctl_sched_mostly_idle_nr_run);
}
static int boost_refcount;
@@ -2135,9 +2118,10 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
return ret;
if ((sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct) ||
- *data > 100) {
- *data = old_val;
- return -EINVAL;
+ (sysctl_sched_mostly_idle_load_pct >
+ sysctl_sched_spill_load_pct) || *data > 100) {
+ *data = old_val;
+ return -EINVAL;
}
/*
@@ -7272,8 +7256,8 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
int i;
if (rq->nr_running >= 2 && (rq->nr_running - rq->nr_small_tasks >= 2 ||
- rq->nr_running > rq->mostly_idle_nr_run ||
- cpu_load(cpu) > rq->mostly_idle_load)) {
+ rq->nr_running > sysctl_sched_mostly_idle_nr_run ||
+ cpu_load(cpu) > sched_mostly_idle_load)) {
if (rq->capacity == max_capacity)
return 1;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 80effb0ebb7..7d7eb698126 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -497,8 +497,6 @@ struct rq {
int capacity;
int max_possible_capacity;
u64 window_start;
- u32 mostly_idle_load;
- int mostly_idle_nr_run;
#ifdef CONFIG_SCHED_FREQ_INPUT
unsigned int old_busy_time;
@@ -751,12 +749,6 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
}
-#define pct_to_real(tunable) \
- (div64_u64((u64)tunable * (u64)max_task_load(), 100))
-
-#define real_to_pct(tunable) \
- (div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
-
#else /* CONFIG_SCHED_HMP */
static inline int pct_task_load(struct task_struct *p) { return 0; }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f8c670ccb80..2652c71d122 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -368,6 +368,20 @@ static struct ctl_table kern_table[] = {
.proc_handler = sched_hmp_proc_update_handler,
},
{
+ .procname = "sched_mostly_idle_load",
+ .data = &sysctl_sched_mostly_idle_load_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
+ {
+ .procname = "sched_mostly_idle_nr_run",
+ .data = &sysctl_sched_mostly_idle_nr_run,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "sched_spill_load",
.data = &sysctl_sched_spill_load_pct,
.maxlen = sizeof(unsigned int),