]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0359-x86-preempt-lazy-fixup-should_resched.patch
Fix memguard and related syscalls
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0359-x86-preempt-lazy-fixup-should_resched.patch
1 From 87495fd5e4eae1f98aafeb38074489c503a6717f Mon Sep 17 00:00:00 2001
2 From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
3 Date: Wed, 14 Sep 2016 19:18:47 +0200
4 Subject: [PATCH 359/366] x86/preempt-lazy: fixup should_resched()
5
6 should_resched() returns true if NEED_RESCHED is set and the
7 preempt_count is 0 _or_ if NEED_RESCHED_LAZY is set ignoring the preempt
8 counter. Ignoring the preemp counter is wrong. This patch adds this into
9 account.
10 While at it, __preempt_count_dec_and_test() ignores preempt_lazy_count
11 while checking TIF_NEED_RESCHED_LAZY so we this check, too.
12
13 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
14 Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
15 ---
16  arch/x86/include/asm/preempt.h | 17 +++++++++++++++--
17  1 file changed, 15 insertions(+), 2 deletions(-)
18
19 diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
20 index 5dbd2d0..6f432ad 100644
21 --- a/arch/x86/include/asm/preempt.h
22 +++ b/arch/x86/include/asm/preempt.h
23 @@ -89,6 +89,8 @@ static __always_inline bool __preempt_count_dec_and_test(void)
24         if (____preempt_count_dec_and_test())
25                 return true;
26  #ifdef CONFIG_PREEMPT_LAZY
27 +       if (current_thread_info()->preempt_lazy_count)
28 +               return false;
29         return test_thread_flag(TIF_NEED_RESCHED_LAZY);
30  #else
31         return false;
32 @@ -101,8 +103,19 @@ static __always_inline bool __preempt_count_dec_and_test(void)
33  static __always_inline bool should_resched(int preempt_offset)
34  {
35  #ifdef CONFIG_PREEMPT_LAZY
36 -       return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset ||
37 -                       test_thread_flag(TIF_NEED_RESCHED_LAZY));
38 +       u32 tmp;
39 +
40 +       tmp = raw_cpu_read_4(__preempt_count);
41 +       if (tmp == preempt_offset)
42 +               return true;
43 +
44 +       /* preempt count == 0 ? */
45 +       tmp &= ~PREEMPT_NEED_RESCHED;
46 +       if (tmp)
47 +               return false;
48 +       if (current_thread_info()->preempt_lazy_count)
49 +               return false;
50 +       return test_thread_flag(TIF_NEED_RESCHED_LAZY);
51  #else
52         return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
53  #endif
54 -- 
55 1.9.1
56