1 #ifndef __M68K_MMU_CONTEXT_H
2 #define __M68K_MMU_CONTEXT_H
4 #include <asm-generic/mm_hooks.h>
6 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11 #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
13 #include <asm/setup.h>
15 #include <asm/pgalloc.h>
17 static inline int init_new_context(struct task_struct *tsk,
20 mm->context = virt_to_phys(mm->pgd);
24 #define destroy_context(mm) do { } while(0)
26 static inline void switch_mm_0230(struct mm_struct *mm)
28 unsigned long crp[2] = {
29 0x80000000 | _PAGE_TABLE, mm->context
33 asm volatile (".chip 68030");
35 /* flush MC68030/MC68020 caches (they are virtually addressed) */
40 : "=d" (tmp) : "di" (FLUSH_I_AND_D));
42 /* Switch the root pointer. For a 030-only kernel,
43 * avoid flushing the whole ATC, we only need to
44 * flush the user entries. The 68851 does this by
45 * itself. Avoid a runtime check here.
48 #ifdef CPU_M68030_ONLY
56 asm volatile (".chip 68k");
59 static inline void switch_mm_0460(struct mm_struct *mm)
61 asm volatile (".chip 68040");
63 /* flush address translation cache (user entries) */
64 asm volatile ("pflushan");
66 /* switch the root pointer */
67 asm volatile ("movec %0,%%urp" : : "r" (mm->context));
72 /* clear user entries in the branch cache */
77 : "=d" (tmp): "di" (0x00200000));
80 asm volatile (".chip 68k");
83 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
86 if (CPU_IS_020_OR_030)
93 #define deactivate_mm(tsk,mm) do { } while (0)
95 static inline void activate_mm(struct mm_struct *prev_mm,
96 struct mm_struct *next_mm)
98 next_mm->context = virt_to_phys(next_mm->pgd);
100 if (CPU_IS_020_OR_030)
101 switch_mm_0230(next_mm);
103 switch_mm_0460(next_mm);
106 #elif defined(CONFIG_SUN3)
107 #include <asm/sun3mmu.h>
108 #include <linux/sched.h>
110 extern unsigned long get_free_context(struct mm_struct *mm);
111 extern void clear_context(unsigned long context);
113 /* set the context for a new task to unmapped */
114 static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
116 mm->context = SUN3_INVALID_CONTEXT;
120 /* find the context given to this process, and if it hasn't already
121 got one, go get one for it. */
122 static inline void get_mmu_context(struct mm_struct *mm)
124 if(mm->context == SUN3_INVALID_CONTEXT)
125 mm->context = get_free_context(mm);
128 /* flush context if allocated... */
129 static inline void destroy_context(struct mm_struct *mm)
131 if(mm->context != SUN3_INVALID_CONTEXT)
132 clear_context(mm->context);
135 static inline void activate_context(struct mm_struct *mm)
138 sun3_put_context(mm->context);
141 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
143 activate_context(tsk->mm);
146 #define deactivate_mm(tsk,mm) do { } while (0)
148 static inline void activate_mm(struct mm_struct *prev_mm,
149 struct mm_struct *next_mm)
151 activate_context(next_mm);
154 #else /* CONFIG_COLDFIRE */
156 #include <asm/coldfire.h>
157 #include <asm/atomic.h>
158 #include <asm/bitops.h>
161 #define NO_CONTEXT 256
162 #define LAST_CONTEXT 255
163 #define FIRST_CONTEXT 1
165 extern void set_context(mm_context_t context, pgd_t *pgd);
166 extern unsigned long context_map[];
167 extern mm_context_t next_mmu_context;
169 extern atomic_t nr_free_contexts;
170 extern struct mm_struct *context_mm[LAST_CONTEXT+1];
171 extern void steal_context(void);
173 static inline void get_mmu_context(struct mm_struct *mm)
177 if (mm->context != NO_CONTEXT)
179 while (atomic_dec_and_test_lt(&nr_free_contexts)) {
180 atomic_inc(&nr_free_contexts);
183 ctx = next_mmu_context;
184 while (test_and_set_bit(ctx, context_map)) {
185 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
186 if (ctx > LAST_CONTEXT)
189 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
191 context_mm[ctx] = mm;
195 * Set up the context for a new address space.
197 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
200 * We're finished using the context for an address space.
202 static inline void destroy_context(struct mm_struct *mm)
204 if (mm->context != NO_CONTEXT) {
205 clear_bit(mm->context, context_map);
206 mm->context = NO_CONTEXT;
207 atomic_inc(&nr_free_contexts);
211 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
212 struct task_struct *tsk)
214 get_mmu_context(tsk->mm);
215 set_context(tsk->mm->context, next->pgd);
219 * After we have set current->mm to a new value, this activates
220 * the context for the new mm so we see the new mappings.
222 static inline void activate_mm(struct mm_struct *active_mm,
223 struct mm_struct *mm)
226 set_context(mm->context, mm->pgd);
229 #define deactivate_mm(tsk, mm) do { } while (0)
231 extern void mmu_context_init(void);
232 #ifdef CONFIG_M547X_8X
233 #define prepare_arch_switch(next) load_ksp_mmu(next)
235 //FIXME: Don't use TLB here for kernel stacks
237 static inline void load_ksp_mmu(struct task_struct *task)
240 struct mm_struct *mm;
247 local_irq_save(flags);
248 mmuar = task->thread.ksp;
250 /* Search for a valid TLB entry, if one is found, don't remap */
252 *MMUOR = MMUOR_STLB | MMUOR_ADR;
257 if ((*MMUSR) & MMUSR_HIT)
260 if (mmuar >= PAGE_OFFSET) {
263 printk ("load_ksp_mmu: non-kernel mm found: 0x%08x\n", (unsigned int) task->mm);
270 pgd = pgd_offset(mm, mmuar);
274 pmd = pmd_offset(pgd, mmuar);
278 pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
279 : pte_offset_map(pmd, mmuar);
280 if (pte_none(*pte) || !pte_present(*pte))
283 set_pte(pte, pte_mkyoung(*pte));
284 asid = mm->context & 0xff;
285 if (!pte_dirty(*pte) && mmuar<=PAGE_OFFSET)
286 set_pte(pte, pte_wrprotect(*pte));
288 *MMUTR = (mmuar & PAGE_MASK) | (asid << CF_ASID_MMU_SHIFT)
289 | (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK ) >> CF_PAGE_MMUTR_SHIFT)
292 *MMUDR = (pte_val(*pte) & PAGE_MASK)
293 | ((pte->pte) & CF_PAGE_MMUDR_MASK)
294 | MMUDR_SZ8K | MMUDR_X;
296 *MMUOR = MMUOR_ACC | MMUOR_UAA;
302 printk ("ksp load failed: mm=0x%08x ksp=0x%08x\n", (unsigned int) mm, (unsigned int) mmuar);
305 local_irq_restore(flags);
308 #endif /* CONFIG_M547X_8X */
310 #endif /* CONFIG_COLDFIRE */
312 #else /* !CONFIG_MMU */
314 static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
320 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
324 #define destroy_context(mm) do { } while (0)
325 #define deactivate_mm(tsk,mm) do { } while (0)
327 static inline void activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
331 #endif /* CONFIG_MMU */
332 #endif /* __M68K_MMU_CONTEXT_H */