]> rtime.felk.cvut.cz Git - linux-imx.git/commitdiff
sched: Move sched_avg_update() to update_cpu_load()
authorSuresh Siddha <suresh.b.siddha@intel.com>
Thu, 10 Feb 2011 08:52:07 +0000 (09:52 +0100)
committerAK <andi@firstfloor.org>
Thu, 31 Mar 2011 18:57:57 +0000 (11:57 -0700)
Commit: da2b71edd8a7db44fe1746261410a981f3e03632 upstream

Currently sched_avg_update() (which updates rt_avg stats in the rq)
is getting called from scale_rt_power() (in the load balance context)
which doesn't take rq->lock.

Fix it by moving the sched_avg_update() to more appropriate
update_cpu_load() where the CFS load gets updated as well.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
LKML-Reference: <1282596171.2694.3.camel@sbsiddha-MOBL3>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
kernel/sched.c
kernel/sched_fair.c

index d4f71b01136c10d3dd82b15b9d27a1d7b949f189..1747071e26f133ae676d39e9699b468600bddb92 100644 (file)
@@ -1270,6 +1270,10 @@ static void resched_task(struct task_struct *p)
 static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
 }
+
+static void sched_avg_update(struct rq *rq)
+{
+}
 #endif /* CONFIG_SMP */
 
 #if BITS_PER_LONG == 32
@@ -3152,6 +3156,8 @@ static void update_cpu_load(struct rq *this_rq)
        }
 
        calc_load_account_active(this_rq);
+
+       sched_avg_update(this_rq);
 }
 
 #ifdef CONFIG_SMP
index a878b5332daad5d7db16625f298a4e963edac909..44d7bdc800f83e9d15185205b702cf4b402caff0 100644 (file)
@@ -2268,8 +2268,6 @@ unsigned long scale_rt_power(int cpu)
        struct rq *rq = cpu_rq(cpu);
        u64 total, available;
 
-       sched_avg_update(rq);
-
        total = sched_avg_period() + (rq->clock - rq->age_stamp);
        available = total - rq->rt_avg;