1 From 93564d8227dfae7268b5cdd20827dcf321d38872 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Wed, 13 Feb 2013 11:03:11 +0100
4 Subject: [PATCH 204/366] arm: Enable highmem for rt
8 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
10 arch/arm/include/asm/switch_to.h | 8 +++++++
11 arch/arm/mm/highmem.c | 45 +++++++++++++++++++++++++++++++++++-----
12 include/linux/highmem.h | 1 +
13 3 files changed, 49 insertions(+), 5 deletions(-)
15 diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
16 index 12ebfcc..c962084 100644
17 --- a/arch/arm/include/asm/switch_to.h
18 +++ b/arch/arm/include/asm/switch_to.h
21 #include <linux/thread_info.h>
23 +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
24 +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
27 +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
31 * For v7 SMP cores running a preemptible kernel we may be pre-empted
32 * during a TLB maintenance operation, so execute an inner-shareable dsb
33 @@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
34 #define switch_to(prev,next,last) \
36 __complete_pending_tlbi(); \
37 + switch_kmaps(prev, next); \
38 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
41 diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
42 index eb4b225d..deabc36 100644
43 --- a/arch/arm/mm/highmem.c
44 +++ b/arch/arm/mm/highmem.c
45 @@ -54,12 +54,13 @@ EXPORT_SYMBOL(kunmap);
47 void *kmap_atomic(struct page *page)
49 + pte_t pte = mk_pte(page, kmap_prot);
56 + preempt_disable_nort();
58 if (!PageHighMem(page))
59 return page_address(page);
60 @@ -93,7 +94,10 @@ void *kmap_atomic(struct page *page)
61 * in place, so the contained TLB flush ensures the TLB is updated
62 * with the new mapping.
64 - set_fixmap_pte(idx, mk_pte(page, kmap_prot));
65 +#ifdef CONFIG_PREEMPT_RT_FULL
66 + current->kmap_pte[type] = pte;
68 + set_fixmap_pte(idx, pte);
72 @@ -110,6 +114,9 @@ void __kunmap_atomic(void *kvaddr)
75 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
76 +#ifdef CONFIG_PREEMPT_RT_FULL
77 + current->kmap_pte[type] = __pte(0);
79 #ifdef CONFIG_DEBUG_HIGHMEM
80 BUG_ON(vaddr != __fix_to_virt(idx));
82 @@ -122,17 +129,18 @@ void __kunmap_atomic(void *kvaddr)
83 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
87 + preempt_enable_nort();
89 EXPORT_SYMBOL(__kunmap_atomic);
91 void *kmap_atomic_pfn(unsigned long pfn)
93 + pte_t pte = pfn_pte(pfn, kmap_prot);
96 struct page *page = pfn_to_page(pfn);
99 + preempt_disable_nort();
101 if (!PageHighMem(page))
102 return page_address(page);
103 @@ -143,7 +151,34 @@ void *kmap_atomic_pfn(unsigned long pfn)
104 #ifdef CONFIG_DEBUG_HIGHMEM
105 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
107 - set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
108 +#ifdef CONFIG_PREEMPT_RT_FULL
109 + current->kmap_pte[type] = pte;
111 + set_fixmap_pte(idx, pte);
113 return (void *)vaddr;
115 +#if defined CONFIG_PREEMPT_RT_FULL
116 +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
121 + * Clear @prev's kmap_atomic mappings
123 + for (i = 0; i < prev_p->kmap_idx; i++) {
124 + int idx = i + KM_TYPE_NR * smp_processor_id();
126 + set_fixmap_pte(idx, __pte(0));
129 + * Restore @next_p's kmap_atomic mappings
131 + for (i = 0; i < next_p->kmap_idx; i++) {
132 + int idx = i + KM_TYPE_NR * smp_processor_id();
134 + if (!pte_none(next_p->kmap_pte[i]))
135 + set_fixmap_pte(idx, next_p->kmap_pte[i]);
139 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
140 index 2f56829..a2a50f5 100644
141 --- a/include/linux/highmem.h
142 +++ b/include/linux/highmem.h
144 #include <linux/mm.h>
145 #include <linux/uaccess.h>
146 #include <linux/hardirq.h>
147 +#include <linux/sched.h>
149 #include <asm/cacheflush.h>