]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - arch/x86/kernel/process_32.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / arch / x86 / kernel / process_32.c
index bd7be8efdc4ce7ae49f155b7f701f6d6d800293f..b3b0a7f7b1ca27fb0fbca864a431fb9a77a0a1bf 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/kdebug.h>
+#include <linux/highmem.h>
 
 #include <asm/pgtable.h>
 #include <asm/ldt.h>
@@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 }
 EXPORT_SYMBOL_GPL(start_thread);
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+{
+       int i;
+
+       /*
+        * Clear @prev's kmap_atomic mappings
+        */
+       for (i = 0; i < prev_p->kmap_idx; i++) {
+               int idx = i + KM_TYPE_NR * smp_processor_id();
+               pte_t *ptep = kmap_pte - idx;
+
+               kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
+       }
+       /*
+        * Restore @next_p's kmap_atomic mappings
+        */
+       for (i = 0; i < next_p->kmap_idx; i++) {
+               int idx = i + KM_TYPE_NR * smp_processor_id();
+
+               if (!pte_none(next_p->kmap_pte[i]))
+                       set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
+       }
+}
+#else
+static inline void
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
+#endif
+
 
 /*
  *     switch_to(x,y) should switch tasks from x to y.
@@ -271,6 +301,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
                __switch_to_xtra(prev_p, next_p, tss);
 
+       switch_kmaps(prev_p, next_p);
+
        /*
         * Leave lazy mode, flushing any hypercalls made here.
         * This must be done before restoring TLS segments so