]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - arch/x86/mm/highmem_32.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / arch / x86 / mm / highmem_32.c
index 6d18b70ed5a9bb808f2c976450671e7034e3169c..f752724c22e8a413cfdbd30f4bde215139074024 100644 (file)
@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
  */
 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
+       pte_t pte = mk_pte(page, prot);
        unsigned long vaddr;
        int idx, type;
 
-       preempt_disable();
+       preempt_disable_nort();
        pagefault_disable();
 
        if (!PageHighMem(page))
@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
-       set_pte(kmap_pte-idx, mk_pte(page, prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+       current->kmap_pte[type] = pte;
+#endif
+       set_pte(kmap_pte-idx, pte);
        arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
                 * is a bad idea also, in case the page changes cacheability
                 * attributes or becomes a protected page in a hypervisor.
                 */
+#ifdef CONFIG_PREEMPT_RT_FULL
+               current->kmap_pte[type] = __pte(0);
+#endif
                kpte_clear_flush(kmap_pte-idx, vaddr);
                kmap_atomic_idx_pop();
                arch_flush_lazy_mmu_mode();
@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
 #endif
 
        pagefault_enable();
-       preempt_enable();
+       preempt_enable_nort();
 }
 EXPORT_SYMBOL(__kunmap_atomic);