summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2012-06-28 03:24:14 +0100
committerJon Medhurst <tixy@linaro.org>2012-07-16 15:01:35 +0100
commit8763254581f462f3f3a26501907581d6a737d841 (patch)
tree38ff0df8c94b29d5b12814889b83435d4a16050f
parentd2d2bf3317826473ad069267dde95751ba7c909b (diff)
downloadvexpress-a9-8763254581f462f3f3a26501907581d6a737d841.tar.gz
sched: aggregate total task_group load
Maintain a global running sum of the average load seen on each cfs_rq belonging to each task group so that it may be used in calculating an appropriate shares:weight distribution. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Ben Segall <bsegall@google.com>
-rw-r--r--kernel/sched/debug.c4
-rw-r--r--kernel/sched/fair.c22
-rw-r--r--kernel/sched/sched.h4
3 files changed, 30 insertions, 0 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2d2e2b3c1be..290892361a0 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -230,6 +230,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->runnable_load_avg);
SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg",
cfs_rq->blocked_load_avg);
+ SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
+ atomic64_read(&cfs_rq->tg->load_avg));
+ SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib",
+ cfs_rq->tg_load_contrib);
#endif
print_cfs_group_stats(m, cpu, cfs_rq->tg);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 67f435715ff..8ef44d2e381 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1096,6 +1096,26 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
return decays;
}
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
+ int force_update)
+{
+ struct task_group *tg = cfs_rq->tg;
+ s64 tg_contrib;
+
+ tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
+ tg_contrib -= cfs_rq->tg_load_contrib;
+
+ if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
+ atomic64_add(tg_contrib, &tg->load_avg);
+ cfs_rq->tg_load_contrib += tg_contrib;
+ }
+}
+#else
+static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
+ int force_update) {}
+#endif
+
/* Compute the current contribution to load_avg by se, return any delta */
static long __update_entity_load_avg_contrib(struct sched_entity *se)
{
@@ -1166,6 +1186,8 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
atomic64_add(decays, &cfs_rq->decay_counter);
cfs_rq->last_decay = now;
}
+
+ __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
}
static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 28a76f6bb55..de8a0d7ee62 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -112,6 +112,7 @@ struct task_group {
unsigned long shares;
atomic_t load_weight;
+ atomic64_t load_avg;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
@@ -232,6 +233,9 @@ struct cfs_rq {
u64 runnable_load_avg, blocked_load_avg;
atomic64_t decay_counter, removed_load;
u64 last_decay;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ u64 tg_load_contrib;
+#endif
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */