]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - arch/x86/include/asm/preempt.h
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / arch / x86 / include / asm / preempt.h
index 17f2186457012eb77fcb2f6ca2183d1b0a062384..11bd1b7ee6eb75ecba4df54fb9ebf72f24b07f82 100644 (file)
@@ -79,17 +79,46 @@ static __always_inline void __preempt_count_sub(int val)
  * a decrement which hits zero means we have no preempt_count and should
  * reschedule.
  */
-static __always_inline bool __preempt_count_dec_and_test(void)
+static __always_inline bool ____preempt_count_dec_and_test(void)
 {
        GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
 }
 
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+       if (____preempt_count_dec_and_test())
+               return true;
+#ifdef CONFIG_PREEMPT_LAZY
+       if (current_thread_info()->preempt_lazy_count)
+               return false;
+       return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+#else
+       return false;
+#endif
+}
+
 /*
  * Returns true when we need to resched and can (barring IRQ state).
  */
 static __always_inline bool should_resched(int preempt_offset)
 {
+#ifdef CONFIG_PREEMPT_LAZY
+       u32 tmp;
+
+       tmp = raw_cpu_read_4(__preempt_count);
+       if (tmp == preempt_offset)
+               return true;
+
+       /* preempt count == 0 ? */
+       tmp &= ~PREEMPT_NEED_RESCHED;
+       if (tmp)
+               return false;
+       if (current_thread_info()->preempt_lazy_count)
+               return false;
+       return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+#else
        return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
+#endif
 }
 
 #ifdef CONFIG_PREEMPT