]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
mm, rt: kmap_atomic scheduling
authorPeter Zijlstra <peterz@infradead.org>
Thu, 28 Jul 2011 08:43:51 +0000 (10:43 +0200)
committerMichal Sojka <sojka@merica.cz>
Sun, 13 Sep 2015 07:47:36 +0000 (09:47 +0200)
In fact, with migrate_disable() existing one could play games with
kmap_atomic. You could save/restore the kmap_atomic slots on context
switch (if there are any in use of course), this should be esp easy now
that we have a kmap_atomic stack.

Something like the below.. it wants replacing all the preempt_disable()
stuff with pagefault_disable() && migrate_disable() of course, but then
you can flip kmaps around like below.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
[dvhart@linux.intel.com: build fix]
Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
[tglx@linutronix.de: Get rid of the per cpu variable and store the idx
     and the pte content right away in the task struct.
     Shortens the context switch code. ]

arch/x86/kernel/process_32.c
arch/x86/mm/highmem_32.c
arch/x86/mm/iomap_32.c
include/linux/highmem.h
include/linux/sched.h
include/linux/uaccess.h
mm/highmem.c

index 603c4f99cb5a17f83e65c3066a5642a4bb9d0f42..ae3ac253f181cd1cf51cd18a702378504602bbc9 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/kdebug.h>
+#include <linux/highmem.h>
 
 #include <asm/pgtable.h>
 #include <asm/ldt.h>
@@ -214,6 +215,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 }
 EXPORT_SYMBOL_GPL(start_thread);
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+{
+       int i;
+
+       /*
+        * Clear @prev's kmap_atomic mappings
+        */
+       for (i = 0; i < prev_p->kmap_idx; i++) {
+               int idx = i + KM_TYPE_NR * smp_processor_id();
+               pte_t *ptep = kmap_pte - idx;
+
+               kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
+       }
+       /*
+        * Restore @next_p's kmap_atomic mappings
+        */
+       for (i = 0; i < next_p->kmap_idx; i++) {
+               int idx = i + KM_TYPE_NR * smp_processor_id();
+
+               if (!pte_none(next_p->kmap_pte[i]))
+                       set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
+       }
+}
+#else
+static inline void
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
+#endif
+
 
 /*
  *     switch_to(x,y) should switch tasks from x to y.
@@ -301,6 +331,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
                __switch_to_xtra(prev_p, next_p, tss);
 
+       switch_kmaps(prev_p, next_p);
+
        /*
         * Leave lazy mode, flushing any hypercalls made here.
         * This must be done before restoring TLS segments so
index eecb207a2037080f9f5d74c36c300b217a4f7a82..0d1cbcf47f8064274409e2a3ad6914dab48021a8 100644 (file)
@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
  */
 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
+       pte_t pte = mk_pte(page, prot);
        unsigned long vaddr;
        int idx, type;
 
-       preempt_disable();
+       preempt_disable_nort();
        pagefault_disable();
 
        if (!PageHighMem(page))
@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
-       set_pte(kmap_pte-idx, mk_pte(page, prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+       current->kmap_pte[type] = pte;
+#endif
+       set_pte(kmap_pte-idx, pte);
        arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
                 * is a bad idea also, in case the page changes cacheability
                 * attributes or becomes a protected page in a hypervisor.
                 */
+#ifdef CONFIG_PREEMPT_RT_FULL
+               current->kmap_pte[type] = __pte(0);
+#endif
                kpte_clear_flush(kmap_pte-idx, vaddr);
                kmap_atomic_idx_pop();
                arch_flush_lazy_mmu_mode();
@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
 #endif
 
        pagefault_enable();
-       preempt_enable();
+       preempt_enable_nort();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
index 2b7ece0e103aec9e501c6018f00aa6113e6999f5..63ced61497451c34f89f13b48b22fc48279ef456 100644 (file)
@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
 
 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 {
+       pte_t pte = pfn_pte(pfn, prot);
        unsigned long vaddr;
        int idx, type;
 
@@ -65,7 +66,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
        type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-       set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+       current->kmap_pte[type] = pte;
+#endif
+       set_pte(kmap_pte - idx, pte);
        arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
@@ -113,6 +117,9 @@ iounmap_atomic(void __iomem *kvaddr)
                 * is a bad idea also, in case the page changes cacheability
                 * attributes or becomes a protected page in a hypervisor.
                 */
+#ifdef CONFIG_PREEMPT_RT_FULL
+               current->kmap_pte[type] = __pte(0);
+#endif
                kpte_clear_flush(kmap_pte-idx, vaddr);
                kmap_atomic_idx_pop();
        }
index 6aefcd0031a6bd013cf322d1021812fcf84250c2..0b0079fca9a77df655b67963e9694dd15a2de610 100644 (file)
@@ -87,32 +87,51 @@ static inline void __kunmap_atomic(void *addr)
 
 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 DECLARE_PER_CPU(int, __kmap_atomic_idx);
+#endif
 
 static inline int kmap_atomic_idx_push(void)
 {
+#ifndef CONFIG_PREEMPT_RT_FULL
        int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
 
-#ifdef CONFIG_DEBUG_HIGHMEM
+# ifdef CONFIG_DEBUG_HIGHMEM
        WARN_ON_ONCE(in_irq() && !irqs_disabled());
        BUG_ON(idx >= KM_TYPE_NR);
-#endif
+# endif
        return idx;
+#else
+       current->kmap_idx++;
+       BUG_ON(current->kmap_idx > KM_TYPE_NR);
+       return current->kmap_idx - 1;
+#endif
 }
 
 static inline int kmap_atomic_idx(void)
 {
+#ifndef CONFIG_PREEMPT_RT_FULL
        return __this_cpu_read(__kmap_atomic_idx) - 1;
+#else
+       return current->kmap_idx - 1;
+#endif
 }
 
 static inline void kmap_atomic_idx_pop(void)
 {
-#ifdef CONFIG_DEBUG_HIGHMEM
+#ifndef CONFIG_PREEMPT_RT_FULL
+# ifdef CONFIG_DEBUG_HIGHMEM
        int idx = __this_cpu_dec_return(__kmap_atomic_idx);
 
        BUG_ON(idx < 0);
-#else
+# else
        __this_cpu_dec(__kmap_atomic_idx);
+# endif
+#else
+       current->kmap_idx--;
+# ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(current->kmap_idx < 0);
+# endif
 #endif
 }
 
index 4032139fd6894e838f366e59b39fc5d327ce87d2..887257c05f15626653359aab945b5b989dd2864c 100644 (file)
@@ -26,6 +26,7 @@ struct sched_param {
 #include <linux/nodemask.h>
 #include <linux/mm_types.h>
 #include <linux/preempt_mask.h>
+#include <asm/kmap_types.h>
 
 #include <asm/page.h>
 #include <asm/ptrace.h>
@@ -1789,6 +1790,12 @@ struct task_struct {
        struct rcu_head put_rcu;
        int softirq_nestcnt;
 #endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
+       int kmap_idx;
+       pte_t kmap_pte[KM_TYPE_NR];
+# endif
+#endif
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
        unsigned long   task_state_change;
 #endif
index ae572c1386073cce6c57807c47dc3d4694af0f03..941b2dab50cd398bbf47540a0e318f4c31d84cfa 100644 (file)
@@ -24,6 +24,7 @@ static __always_inline void pagefault_disabled_dec(void)
  */
 static inline void pagefault_disable(void)
 {
+       migrate_disable();
        pagefault_disabled_inc();
        /*
         * make sure to have issued the store before a pagefault
@@ -40,6 +41,7 @@ static inline void pagefault_enable(void)
         */
        barrier();
        pagefault_disabled_dec();
+       migrate_enable();
 }
 
 /*
index 123bcd3ed4f209ba3710d9bfcaf8725d0a105534..16e8cf26d38ae198ff3e1c2cbd07f03cdc297ef7 100644 (file)
 #include <linux/kgdb.h>
 #include <asm/tlbflush.h>
 
-
+#ifndef CONFIG_PREEMPT_RT_FULL
 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
 DEFINE_PER_CPU(int, __kmap_atomic_idx);
 #endif
+#endif
 
 /*
  * Virtual_count is not a pure "count".
@@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
 unsigned long totalhigh_pages __read_mostly;
 EXPORT_SYMBOL(totalhigh_pages);
 
-
+#ifndef CONFIG_PREEMPT_RT_FULL
 EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
+#endif
 
 unsigned int nr_free_highpages (void)
 {