]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/commitdiff
sched/fair: Fix PELT integrity for new groups
authorPeter Zijlstra <peterz@infradead.org>
Fri, 17 Jun 2016 09:20:46 +0000 (11:20 +0200)
committermobile promotions <svcmobile_promotions@nvidia.com>
Mon, 15 Aug 2016 21:05:21 +0000 (14:05 -0700)
Vincent reported that when a new task is moved into a new cgroup it
gets attached twice to the load tracking:

  sched_move_task()
    task_move_group_fair()
      detach_task_cfs_rq()
      set_task_rq()
      attach_task_cfs_rq()
        attach_entity_load_avg()
          se->avg.last_load_update = cfs_rq->avg.last_load_update // == 0

  enqueue_entity()
    enqueue_entity_load_avg()
      update_cfs_rq_load_avg()
        now = clock()
        __update_load_avg(&cfs_rq->avg)
          cfs_rq->avg.last_load_update = now
          // ages load/util for: now - 0, load/util -> 0
      if (migrated)
        attach_entity_load_avg()
          se->avg.last_load_update = cfs_rq->avg.last_load_update; // now != 0

The problem is that we don't update cfs_rq load_avg before all
entity attach/detach operations. Only enqueue_task() and migrate_task()
do this.

By fixing this, the above will not happen, because the
sched_move_task() attach will have updated cfs_rq's last_load_update
time before attach, and in turn the attach will have set the entity's
last_load_update stamp.

Note that there is a further problem with sched_move_task() calling
detach on a task that hasn't yet been attached; this will be taken
care of in a subsequent patch.

Reported-by: Vincent Guittot <vincent.guittot@linaro.org>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yuyang Du <yuyang.du@intel.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Conflicts:
kernel/sched/fair.c

(cherry picked from upstream commit 010114739d294c474764c94196d32fb92e233657)

Change-Id: Ib8a60c6096d3b21b63b0937af111325d10ab7500
Signed-off-by: Sai Gurrappadi <sgurrappadi@nvidia.com>
Reviewed-on: http://git-master/r/1195368
GVS: Gerrit_Virtual_Submit
Reviewed-by: Puneet Saxena <puneets@nvidia.com>
Reviewed-by: Matthew Longnecker <mlongnecker@nvidia.com>
kernel/sched/fair.c

index 9f138bb73cc69e4a718b44e28b003dbffb5d52fb..c6328c89d264b2404f81f1b4209f7bd017a14b10 100644 (file)
@@ -3093,6 +3093,11 @@ static bool task_fits_rq(struct task_struct *p, struct rq *rq)
 }
 #else /* CONFIG_SMP */
 
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
+{
+       return 0;
+}
 static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
 static inline void
 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
@@ -8456,6 +8461,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 now = cfs_rq_clock_task(cfs_rq);
 
        if (!vruntime_normalized(p)) {
                /*
@@ -8467,6 +8473,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
        }
 
        /* Catch up with the cfs_rq and remove our load when we leave */
+       update_cfs_rq_load_avg(now, cfs_rq, false);
        detach_entity_load_avg(cfs_rq, se);
 }
 
@@ -8474,6 +8481,7 @@ static void attach_task_cfs_rq(struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 now = cfs_rq_clock_task(cfs_rq);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        /*
@@ -8484,6 +8492,7 @@ static void attach_task_cfs_rq(struct task_struct *p)
 #endif
 
        /* Synchronize task with its cfs_rq */
+       update_cfs_rq_load_avg(now, cfs_rq, false);
        attach_entity_load_avg(cfs_rq, se);
 
        if (!vruntime_normalized(p))