]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - kernel/sched/fair.c
Merge branch '4.0.8-rt6'
[zynq/linux.git] / kernel / sched / fair.c
index 486d00c408b0bd610323fa15a9ae57bd76d689b1..473801eefab81f4f8aef61278f2c813ff25ac3d8 100644 (file)
@@ -3139,7 +3139,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
        if (delta_exec > ideal_runtime) {
-               resched_curr(rq_of(cfs_rq));
+               resched_curr_lazy(rq_of(cfs_rq));
                /*
                 * The current task ran long enough, ensure it doesn't get
                 * re-elected due to buddy favours.
@@ -3163,7 +3163,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
                return;
 
        if (delta > ideal_runtime)
-               resched_curr(rq_of(cfs_rq));
+               resched_curr_lazy(rq_of(cfs_rq));
 }
 
 static void
@@ -3303,7 +3303,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         * validating it and just reschedule.
         */
        if (queued) {
-               resched_curr(rq_of(cfs_rq));
+               resched_curr_lazy(rq_of(cfs_rq));
                return;
        }
        /*
@@ -3494,7 +3494,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
         * hierarchy can be throttled
         */
        if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
-               resched_curr(rq_of(cfs_rq));
+               resched_curr_lazy(rq_of(cfs_rq));
 }
 
 static __always_inline
@@ -4117,7 +4117,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 
                if (delta < 0) {
                        if (rq->curr == p)
-                               resched_curr(rq);
+                               resched_curr_lazy(rq);
                        return;
                }
                hrtick_start(rq, delta);
@@ -4981,7 +4981,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        return;
 
 preempt:
-       resched_curr(rq);
+       resched_curr_lazy(rq);
        /*
         * Only set the backward buddy when the current task is still
         * on the rq. This can happen when a wakeup gets interleaved
@@ -7767,7 +7767,7 @@ static void task_fork_fair(struct task_struct *p)
                 * 'current' within the tree based on its new key value.
                 */
                swap(curr->vruntime, se->vruntime);
-               resched_curr(rq);
+               resched_curr_lazy(rq);
        }
 
        se->vruntime -= cfs_rq->min_vruntime;
@@ -7792,7 +7792,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
         */
        if (rq->curr == p) {
                if (p->prio > oldprio)
-                       resched_curr(rq);
+                       resched_curr_lazy(rq);
        } else
                check_preempt_curr(rq, p, 0);
 }