]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0204-arm-Enable-highmem-for-rt.patch
rt_patches: required rebase due to printk change
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0204-arm-Enable-highmem-for-rt.patch
1 From 93564d8227dfae7268b5cdd20827dcf321d38872 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Wed, 13 Feb 2013 11:03:11 +0100
4 Subject: [PATCH 204/366] arm: Enable highmem for rt
5
6 fixup highmem for ARM.
7
8 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
9 ---
10  arch/arm/include/asm/switch_to.h |  8 +++++++
11  arch/arm/mm/highmem.c            | 45 +++++++++++++++++++++++++++++++++++-----
12  include/linux/highmem.h          |  1 +
13  3 files changed, 49 insertions(+), 5 deletions(-)
14
15 diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
16 index 12ebfcc..c962084 100644
17 --- a/arch/arm/include/asm/switch_to.h
18 +++ b/arch/arm/include/asm/switch_to.h
19 @@ -3,6 +3,13 @@
20  
21  #include <linux/thread_info.h>
22  
23 +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
24 +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
25 +#else
26 +static inline void
27 +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
28 +#endif
29 +
30  /*
31   * For v7 SMP cores running a preemptible kernel we may be pre-empted
32   * during a TLB maintenance operation, so execute an inner-shareable dsb
33 @@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
34  #define switch_to(prev,next,last)                                      \
35  do {                                                                   \
36         __complete_pending_tlbi();                                      \
37 +       switch_kmaps(prev, next);                                       \
38         last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));        \
39  } while (0)
40  
41 diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
42 index eb4b225d..deabc36 100644
43 --- a/arch/arm/mm/highmem.c
44 +++ b/arch/arm/mm/highmem.c
45 @@ -54,12 +54,13 @@ EXPORT_SYMBOL(kunmap);
46  
47  void *kmap_atomic(struct page *page)
48  {
49 +       pte_t pte = mk_pte(page, kmap_prot);
50         unsigned int idx;
51         unsigned long vaddr;
52         void *kmap;
53         int type;
54  
55 -       preempt_disable();
56 +       preempt_disable_nort();
57         pagefault_disable();
58         if (!PageHighMem(page))
59                 return page_address(page);
60 @@ -93,7 +94,10 @@ void *kmap_atomic(struct page *page)
61          * in place, so the contained TLB flush ensures the TLB is updated
62          * with the new mapping.
63          */
64 -       set_fixmap_pte(idx, mk_pte(page, kmap_prot));
65 +#ifdef CONFIG_PREEMPT_RT_FULL
66 +       current->kmap_pte[type] = pte;
67 +#endif
68 +       set_fixmap_pte(idx, pte);
69  
70         return (void *)vaddr;
71  }
72 @@ -110,6 +114,9 @@ void __kunmap_atomic(void *kvaddr)
73  
74                 if (cache_is_vivt())
75                         __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
76 +#ifdef CONFIG_PREEMPT_RT_FULL
77 +               current->kmap_pte[type] = __pte(0);
78 +#endif
79  #ifdef CONFIG_DEBUG_HIGHMEM
80                 BUG_ON(vaddr != __fix_to_virt(idx));
81  #else
82 @@ -122,17 +129,18 @@ void __kunmap_atomic(void *kvaddr)
83                 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
84         }
85         pagefault_enable();
86 -       preempt_enable();
87 +       preempt_enable_nort();
88  }
89  EXPORT_SYMBOL(__kunmap_atomic);
90  
91  void *kmap_atomic_pfn(unsigned long pfn)
92  {
93 +       pte_t pte = pfn_pte(pfn, kmap_prot);
94         unsigned long vaddr;
95         int idx, type;
96         struct page *page = pfn_to_page(pfn);
97  
98 -       preempt_disable();
99 +       preempt_disable_nort();
100         pagefault_disable();
101         if (!PageHighMem(page))
102                 return page_address(page);
103 @@ -143,7 +151,34 @@ void *kmap_atomic_pfn(unsigned long pfn)
104  #ifdef CONFIG_DEBUG_HIGHMEM
105         BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
106  #endif
107 -       set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
108 +#ifdef CONFIG_PREEMPT_RT_FULL
109 +       current->kmap_pte[type] = pte;
110 +#endif
111 +       set_fixmap_pte(idx, pte);
112  
113         return (void *)vaddr;
114  }
115 +#if defined CONFIG_PREEMPT_RT_FULL
116 +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
117 +{
118 +       int i;
119 +
120 +       /*
121 +        * Clear @prev's kmap_atomic mappings
122 +        */
123 +       for (i = 0; i < prev_p->kmap_idx; i++) {
124 +               int idx = i + KM_TYPE_NR * smp_processor_id();
125 +
126 +               set_fixmap_pte(idx, __pte(0));
127 +       }
128 +       /*
129 +        * Restore @next_p's kmap_atomic mappings
130 +        */
131 +       for (i = 0; i < next_p->kmap_idx; i++) {
132 +               int idx = i + KM_TYPE_NR * smp_processor_id();
133 +
134 +               if (!pte_none(next_p->kmap_pte[i]))
135 +                       set_fixmap_pte(idx, next_p->kmap_pte[i]);
136 +       }
137 +}
138 +#endif
139 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
140 index 2f56829..a2a50f5 100644
141 --- a/include/linux/highmem.h
142 +++ b/include/linux/highmem.h
143 @@ -7,6 +7,7 @@
144  #include <linux/mm.h>
145  #include <linux/uaccess.h>
146  #include <linux/hardirq.h>
147 +#include <linux/sched.h>
148  
149  #include <asm/cacheflush.h>
150  
151 -- 
152 1.9.1
153