]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0091-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
WAR:media:i2c:ov5693: add flip and mirror setting
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0091-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
1 From 2dfa3f09f640789ea273d76c7b211b2b1fd5f47b Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Sat, 25 Jun 2011 09:21:04 +0200
4 Subject: [PATCH 091/365] sched: Add saved_state for tasks blocked on sleeping
5  locks
6
7 Spinlocks are state preserving in !RT. RT changes the state when a
8 task gets blocked on a lock. So we need to remember the state before
9 the lock contention. If a regular wakeup (not a RTmutex related
10 wakeup) happens, the saved_state is updated to running. When the lock
11 sleep is done, the saved state is restored.
12
13 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
14 ---
15  include/linux/sched.h |  2 ++
16  kernel/sched/core.c   | 31 ++++++++++++++++++++++++++++++-
17  kernel/sched/sched.h  |  1 +
18  3 files changed, 33 insertions(+), 1 deletion(-)
19
20 diff --git a/include/linux/sched.h b/include/linux/sched.h
21 index 5925cc4..0295180 100644
22 --- a/include/linux/sched.h
23 +++ b/include/linux/sched.h
24 @@ -1390,6 +1390,7 @@ struct tlbflush_unmap_batch {
25  
26  struct task_struct {
27         volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
28 +       volatile long saved_state;      /* saved state for "spinlock sleepers" */
29         void *stack;
30         atomic_t usage;
31         unsigned int flags;     /* per process flags, defined below */
32 @@ -2495,6 +2496,7 @@ extern void xtime_update(unsigned long ticks);
33  
34  extern int wake_up_state(struct task_struct *tsk, unsigned int state);
35  extern int wake_up_process(struct task_struct *tsk);
36 +extern int wake_up_lock_sleeper(struct task_struct * tsk);
37  extern void wake_up_new_task(struct task_struct *tsk);
38  #ifdef CONFIG_SMP
39   extern void kick_process(struct task_struct *tsk);
40 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
41 index c0e6f2b..e6b63f0 100644
42 --- a/kernel/sched/core.c
43 +++ b/kernel/sched/core.c
44 @@ -1962,8 +1962,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
45          */
46         smp_mb__before_spinlock();
47         raw_spin_lock_irqsave(&p->pi_lock, flags);
48 -       if (!(p->state & state))
49 +       if (!(p->state & state)) {
50 +               /*
51 +                * The task might be running due to a spinlock sleeper
52 +                * wakeup. Check the saved state and set it to running
53 +                * if the wakeup condition is true.
54 +                */
55 +               if (!(wake_flags & WF_LOCK_SLEEPER)) {
56 +                       if (p->saved_state & state)
57 +                               p->saved_state = TASK_RUNNING;
58 +               }
59                 goto out;
60 +       }
61 +
62 +       /*
63 +        * If this is a regular wakeup, then we can unconditionally
64 +        * clear the saved state of a "lock sleeper".
65 +        */
66 +       if (!(wake_flags & WF_LOCK_SLEEPER))
67 +               p->saved_state = TASK_RUNNING;
68  
69         trace_sched_waking(p);
70  
71 @@ -2118,6 +2135,18 @@ int wake_up_process(struct task_struct *p)
72  }
73  EXPORT_SYMBOL(wake_up_process);
74  
75 +/**
76 + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
77 + * @p: The process to be woken up.
78 + *
79 + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
80 + * the nature of the wakeup.
81 + */
82 +int wake_up_lock_sleeper(struct task_struct *p)
83 +{
84 +       return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
85 +}
86 +
87  int wake_up_state(struct task_struct *p, unsigned int state)
88  {
89         return try_to_wake_up(p, state, 0);
90 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
91 index b654396..7012cde 100644
92 --- a/kernel/sched/sched.h
93 +++ b/kernel/sched/sched.h
94 @@ -1111,6 +1111,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
95  #define WF_SYNC                0x01            /* waker goes to sleep after wakeup */
96  #define WF_FORK                0x02            /* child wakeup after fork */
97  #define WF_MIGRATED    0x4             /* internal use, task got migrated */
98 +#define WF_LOCK_SLEEPER        0x08            /* wakeup spinlock "sleeper" */
99  
100  /*
101   * To aid in avoiding the subversion of "niceness" due to uneven distribution
102 -- 
103 2.7.4
104