2 * linux/arch/m68k/mm/cf-mmu.c
4 * Based upon linux/arch/m68k/mm/sun3mmu.c
5 * Based upon linux/arch/ppc/mm/mmu_context.c
7 * Implementations of mm routines specific to the Coldfire MMU.
9 * Copyright (c) 2008 Freescale Semiconductor, Inc.
12 #include <linux/signal.h>
13 #include <linux/sched.h>
15 #include <linux/swap.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/init.h>
20 #ifdef CONFIG_BLK_DEV_RAM
21 #include <linux/blkdev.h>
23 #include <linux/bootmem.h>
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/system.h>
30 #include <asm/machdep.h>
32 #include <asm/mmu_context.h>
33 #include <asm/pgalloc.h>
35 #include <asm/coldfire.h>
36 #include <asm/tlbflush.h>
38 #define KMAPAREA(x) ((x >= VMALLOC_START) && ( x < KMAP_END))
42 mm_context_t next_mmu_context;
43 unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
45 atomic_t nr_free_contexts;
46 struct mm_struct *context_mm[LAST_CONTEXT+1];
48 const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
50 extern unsigned long empty_bad_page_table;
51 extern unsigned long empty_bad_page;
52 extern unsigned long num_pages;
54 extern char __init_begin, __init_end;
56 void free_initmem(void)
60 unsigned long start = (unsigned long)&__init_begin;
61 unsigned long end = (unsigned long)&__init_end;
63 printk(KERN_INFO "free_initmem: __init_begin = 0x%lx __init_end = 0x%lx\n", start, end);
65 addr = (unsigned long)&__init_begin;
66 for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
67 /* not currently used */
68 virt_to_page(addr)->flags &= ~(1 << PG_reserved);
69 init_page_count(virt_to_page(addr));
76 /* Coldfire paging_init derived from sun3 */
77 void __init paging_init(void)
82 unsigned long address;
83 unsigned long next_pgtable;
84 unsigned long bootmem_end;
85 unsigned long zones_size[MAX_NR_ZONES];
89 empty_zero_page = (void *)alloc_bootmem_pages(PAGE_SIZE);
90 memset((void *)empty_zero_page, 0, PAGE_SIZE);
92 pg_dir = swapper_pg_dir;
93 memset(swapper_pg_dir, 0, sizeof (swapper_pg_dir));
95 size = num_pages * sizeof(pte_t);
96 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
97 next_pgtable = (unsigned long)alloc_bootmem_pages(size);
99 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
100 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
102 address = PAGE_OFFSET;
103 while (address < (unsigned long)high_memory)
105 pg_table = (pte_t *)next_pgtable;
106 next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
107 pgd_val(*pg_dir) = (unsigned long) pg_table;
110 /* now change pg_table to kernel virtual addresses */
111 for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table)
113 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
114 if (address >= (unsigned long)high_memory)
117 set_pte (pg_table, pte);
118 address += PAGE_SIZE;
125 for (zone = 0; zone < MAX_NR_ZONES; zone++)
126 zones_size[zone] = 0x0;
128 /* allocate the bottom 32M (0x40x 0x41x) to DMA - head.S marks them NO CACHE */
129 /* JKM - this should be changed to allocate from the TOP (0x4f,0x4e) but the
130 * allocator is being a bit challenging */
131 zones_size[ZONE_DMA] = (32*1024*1024) >> PAGE_SHIFT;
133 /* allocate the rest to NORMAL - head.S marks them CACHE */
134 zones_size[ZONE_NORMAL] = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT) - zones_size[0];
136 free_area_init(zones_size);
140 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
142 struct mm_struct *mm;
150 local_save_flags(flags);
153 mmuar = ( dtlb ) ? regs->mmuar
154 : regs->pc + (extension_word * sizeof(long));
156 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
159 local_irq_restore(flags);
163 pgd = pgd_offset(mm, mmuar);
164 if (pgd_none(*pgd)) {
165 local_irq_restore(flags);
169 pmd = pmd_offset(pgd, mmuar);
170 if (pmd_none(*pmd)) {
171 local_irq_restore(flags);
175 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
176 : pte_offset_map(pmd, mmuar);
177 if (pte_none(*pte) || !pte_present(*pte)) {
178 local_irq_restore(flags);
183 if (!pte_write(*pte)) {
184 local_irq_restore(flags);
187 set_pte(pte, pte_mkdirty(*pte));
190 set_pte(pte, pte_mkyoung(*pte));
191 asid = mm->context & 0xff;
192 if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
193 set_pte(pte, pte_wrprotect(*pte));
195 *MMUTR = (mmuar & PAGE_MASK) | (asid << CF_ASID_MMU_SHIFT)
196 | (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK ) >> CF_PAGE_MMUTR_SHIFT)
199 *MMUDR = (pte_val(*pte) & PAGE_MASK)
200 | ((pte->pte) & CF_PAGE_MMUDR_MASK)
201 | MMUDR_SZ8K | MMUDR_X;
204 *MMUOR = MMUOR_ACC | MMUOR_UAA;
206 *MMUOR = MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA;
211 printk("cf_tlb_miss: va=%lx, pa=%lx\n", (mmuar & PAGE_MASK),
212 (pte_val(*pte) & PAGE_MASK));
214 local_irq_restore(flags);
219 /* The following was taken from arch/ppc/mmu_context.c
221 * Initialize the context management stuff.
223 void __init mmu_context_init(void)
226 * Some processors have too few contexts to reserve one for
227 * init_mm, and require using context 0 for a normal task.
228 * Other processors reserve the use of context zero for the kernel.
229 * This code assumes FIRST_CONTEXT < 32.
231 context_map[0] = (1 << FIRST_CONTEXT) - 1;
232 next_mmu_context = FIRST_CONTEXT;
233 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
237 * Steal a context from a task that has one at the moment.
238 * This is only used on 8xx and 4xx and we presently assume that
239 * they don't do SMP. If they do then thicfpgalloc.hs will have to check
240 * whether the MM we steal is in use.
241 * We also assume that this is only used on systems that don't
242 * use an MMU hash table - this is true for 8xx and 4xx.
243 * This isn't an LRU system, it just frees up each context in
244 * turn (sort-of pseudo-random replacement :). This would be the
245 * place to implement an LRU scheme if anyone was motivated to do it.
248 void steal_context(void)
250 struct mm_struct *mm;
251 /* free up context `next_mmu_context' */
252 /* if we shouldn't free context 0, don't... */
253 if (next_mmu_context < FIRST_CONTEXT)
254 next_mmu_context = FIRST_CONTEXT;
255 mm = context_mm[next_mmu_context];