aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDima Zavin <dima@android.com>2010-09-28 17:35:31 -0700
committerErik Gilling <konkers@android.com>2011-03-30 11:24:25 -0700
commite9e779dceaf3f00763b8ca8f0e61d98830ee87d8 (patch)
tree76b5db856b533726d68899ad3b226b9748b53f85
parent46918c60879c22437f746f358139d930e072e2fe (diff)
downloadexperimental-e9e779dceaf3f00763b8ca8f0e61d98830ee87d8.tar.gz
sched: use the old min_vruntime when normalizing on dequeue
After pulling the thread off the run-queue during a cgroup change, the cfs_rq.min_vruntime gets recalculated. The dequeued thread's vruntime then gets normalized to this new value. This can then lead to the thread getting an unfair boost in the new group if the vruntime of the next task in the old run-queue was way further ahead. Cc: Arve Hjønnevåg <arve@android.com> Signed-off-by: Dima Zavin <dima@android.com>
-rw-r--r--kernel/sched_fair.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3f7ec9e27ee..8255611c442 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1045,6 +1045,8 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
+ u64 min_vruntime;
+
/*
* Update run-time statistics of the 'current'.
*/
@@ -1071,6 +1073,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se->on_rq = 0;
update_cfs_load(cfs_rq, 0);
account_entity_dequeue(cfs_rq, se);
+
+ min_vruntime = cfs_rq->min_vruntime;
update_min_vruntime(cfs_rq);
update_cfs_shares(cfs_rq);
@@ -1080,7 +1084,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* movement in our normalized position.
*/
if (!(flags & DEQUEUE_SLEEP))
- se->vruntime -= cfs_rq->min_vruntime;
+ se->vruntime -= min_vruntime;
}
/*