]> rtime.felk.cvut.cz Git - lisovros/linux_canprio.git/blob - arch/arm/mm/mmu.c
Merge branches 'l2', 'pgt2' and 'misc' into for-linus
[lisovros/linux_canprio.git] / arch / arm / mm / mmu.c
1 /*
2  *  linux/arch/arm/mm/mmu.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/mman.h>
15 #include <linux/nodemask.h>
16 #include <linux/memblock.h>
17 #include <linux/fs.h>
18 #include <linux/vmalloc.h>
19
20 #include <asm/cp15.h>
21 #include <asm/cputype.h>
22 #include <asm/sections.h>
23 #include <asm/cachetype.h>
24 #include <asm/setup.h>
25 #include <asm/sizes.h>
26 #include <asm/smp_plat.h>
27 #include <asm/tlb.h>
28 #include <asm/highmem.h>
29 #include <asm/traps.h>
30
31 #include <asm/mach/arch.h>
32 #include <asm/mach/map.h>
33
34 #include "mm.h"
35
36 /*
37  * empty_zero_page is a special page that is used for
38  * zero-initialized data and COW.
39  */
40 struct page *empty_zero_page;
41 EXPORT_SYMBOL(empty_zero_page);
42
43 /*
44  * The pmd table for the upper-most set of pages.
45  */
46 pmd_t *top_pmd;
47
48 #define CPOLICY_UNCACHED        0
49 #define CPOLICY_BUFFERED        1
50 #define CPOLICY_WRITETHROUGH    2
51 #define CPOLICY_WRITEBACK       3
52 #define CPOLICY_WRITEALLOC      4
53
54 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
55 static unsigned int ecc_mask __initdata = 0;
56 pgprot_t pgprot_user;
57 pgprot_t pgprot_kernel;
58
59 EXPORT_SYMBOL(pgprot_user);
60 EXPORT_SYMBOL(pgprot_kernel);
61
62 struct cachepolicy {
63         const char      policy[16];
64         unsigned int    cr_mask;
65         pmdval_t        pmd;
66         pteval_t        pte;
67 };
68
69 static struct cachepolicy cache_policies[] __initdata = {
70         {
71                 .policy         = "uncached",
72                 .cr_mask        = CR_W|CR_C,
73                 .pmd            = PMD_SECT_UNCACHED,
74                 .pte            = L_PTE_MT_UNCACHED,
75         }, {
76                 .policy         = "buffered",
77                 .cr_mask        = CR_C,
78                 .pmd            = PMD_SECT_BUFFERED,
79                 .pte            = L_PTE_MT_BUFFERABLE,
80         }, {
81                 .policy         = "writethrough",
82                 .cr_mask        = 0,
83                 .pmd            = PMD_SECT_WT,
84                 .pte            = L_PTE_MT_WRITETHROUGH,
85         }, {
86                 .policy         = "writeback",
87                 .cr_mask        = 0,
88                 .pmd            = PMD_SECT_WB,
89                 .pte            = L_PTE_MT_WRITEBACK,
90         }, {
91                 .policy         = "writealloc",
92                 .cr_mask        = 0,
93                 .pmd            = PMD_SECT_WBWA,
94                 .pte            = L_PTE_MT_WRITEALLOC,
95         }
96 };
97
98 /*
99  * These are useful for identifying cache coherency
100  * problems by allowing the cache or the cache and
101  * writebuffer to be turned off.  (Note: the write
102  * buffer should not be on and the cache off).
103  */
104 static int __init early_cachepolicy(char *p)
105 {
106         int i;
107
108         for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
109                 int len = strlen(cache_policies[i].policy);
110
111                 if (memcmp(p, cache_policies[i].policy, len) == 0) {
112                         cachepolicy = i;
113                         cr_alignment &= ~cache_policies[i].cr_mask;
114                         cr_no_alignment &= ~cache_policies[i].cr_mask;
115                         break;
116                 }
117         }
118         if (i == ARRAY_SIZE(cache_policies))
119                 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
120         /*
121          * This restriction is partly to do with the way we boot; it is
122          * unpredictable to have memory mapped using two different sets of
123          * memory attributes (shared, type, and cache attribs).  We can not
124          * change these attributes once the initial assembly has setup the
125          * page tables.
126          */
127         if (cpu_architecture() >= CPU_ARCH_ARMv6) {
128                 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
129                 cachepolicy = CPOLICY_WRITEBACK;
130         }
131         flush_cache_all();
132         set_cr(cr_alignment);
133         return 0;
134 }
135 early_param("cachepolicy", early_cachepolicy);
136
137 static int __init early_nocache(char *__unused)
138 {
139         char *p = "buffered";
140         printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
141         early_cachepolicy(p);
142         return 0;
143 }
144 early_param("nocache", early_nocache);
145
146 static int __init early_nowrite(char *__unused)
147 {
148         char *p = "uncached";
149         printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
150         early_cachepolicy(p);
151         return 0;
152 }
153 early_param("nowb", early_nowrite);
154
155 #ifndef CONFIG_ARM_LPAE
156 static int __init early_ecc(char *p)
157 {
158         if (memcmp(p, "on", 2) == 0)
159                 ecc_mask = PMD_PROTECTION;
160         else if (memcmp(p, "off", 3) == 0)
161                 ecc_mask = 0;
162         return 0;
163 }
164 early_param("ecc", early_ecc);
165 #endif
166
167 static int __init noalign_setup(char *__unused)
168 {
169         cr_alignment &= ~CR_A;
170         cr_no_alignment &= ~CR_A;
171         set_cr(cr_alignment);
172         return 1;
173 }
174 __setup("noalign", noalign_setup);
175
176 #ifndef CONFIG_SMP
177 void adjust_cr(unsigned long mask, unsigned long set)
178 {
179         unsigned long flags;
180
181         mask &= ~CR_A;
182
183         set &= mask;
184
185         local_irq_save(flags);
186
187         cr_no_alignment = (cr_no_alignment & ~mask) | set;
188         cr_alignment = (cr_alignment & ~mask) | set;
189
190         set_cr((get_cr() & ~mask) | set);
191
192         local_irq_restore(flags);
193 }
194 #endif
195
196 #define PROT_PTE_DEVICE         L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
197 #define PROT_SECT_DEVICE        PMD_TYPE_SECT|PMD_SECT_AP_WRITE
198
199 static struct mem_type mem_types[] = {
200         [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
201                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
202                                   L_PTE_SHARED,
203                 .prot_l1        = PMD_TYPE_TABLE,
204                 .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_S,
205                 .domain         = DOMAIN_IO,
206         },
207         [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
208                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
209                 .prot_l1        = PMD_TYPE_TABLE,
210                 .prot_sect      = PROT_SECT_DEVICE,
211                 .domain         = DOMAIN_IO,
212         },
213         [MT_DEVICE_CACHED] = {    /* ioremap_cached */
214                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
215                 .prot_l1        = PMD_TYPE_TABLE,
216                 .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_WB,
217                 .domain         = DOMAIN_IO,
218         },      
219         [MT_DEVICE_WC] = {      /* ioremap_wc */
220                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
221                 .prot_l1        = PMD_TYPE_TABLE,
222                 .prot_sect      = PROT_SECT_DEVICE,
223                 .domain         = DOMAIN_IO,
224         },
225         [MT_UNCACHED] = {
226                 .prot_pte       = PROT_PTE_DEVICE,
227                 .prot_l1        = PMD_TYPE_TABLE,
228                 .prot_sect      = PMD_TYPE_SECT | PMD_SECT_XN,
229                 .domain         = DOMAIN_IO,
230         },
231         [MT_CACHECLEAN] = {
232                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
233                 .domain    = DOMAIN_KERNEL,
234         },
235 #ifndef CONFIG_ARM_LPAE
236         [MT_MINICLEAN] = {
237                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
238                 .domain    = DOMAIN_KERNEL,
239         },
240 #endif
241         [MT_LOW_VECTORS] = {
242                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
243                                 L_PTE_RDONLY,
244                 .prot_l1   = PMD_TYPE_TABLE,
245                 .domain    = DOMAIN_USER,
246         },
247         [MT_HIGH_VECTORS] = {
248                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
249                                 L_PTE_USER | L_PTE_RDONLY,
250                 .prot_l1   = PMD_TYPE_TABLE,
251                 .domain    = DOMAIN_USER,
252         },
253         [MT_MEMORY] = {
254                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
255                 .prot_l1   = PMD_TYPE_TABLE,
256                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
257                 .domain    = DOMAIN_KERNEL,
258         },
259         [MT_ROM] = {
260                 .prot_sect = PMD_TYPE_SECT,
261                 .domain    = DOMAIN_KERNEL,
262         },
263         [MT_MEMORY_NONCACHED] = {
264                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
265                                 L_PTE_MT_BUFFERABLE,
266                 .prot_l1   = PMD_TYPE_TABLE,
267                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
268                 .domain    = DOMAIN_KERNEL,
269         },
270         [MT_MEMORY_DTCM] = {
271                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
272                                 L_PTE_XN,
273                 .prot_l1   = PMD_TYPE_TABLE,
274                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
275                 .domain    = DOMAIN_KERNEL,
276         },
277         [MT_MEMORY_ITCM] = {
278                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
279                 .prot_l1   = PMD_TYPE_TABLE,
280                 .domain    = DOMAIN_KERNEL,
281         },
282         [MT_MEMORY_SO] = {
283                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
284                                 L_PTE_MT_UNCACHED,
285                 .prot_l1   = PMD_TYPE_TABLE,
286                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
287                                 PMD_SECT_UNCACHED | PMD_SECT_XN,
288                 .domain    = DOMAIN_KERNEL,
289         },
290 };
291
292 const struct mem_type *get_mem_type(unsigned int type)
293 {
294         return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
295 }
296 EXPORT_SYMBOL(get_mem_type);
297
298 /*
299  * Adjust the PMD section entries according to the CPU in use.
300  */
301 static void __init build_mem_type_table(void)
302 {
303         struct cachepolicy *cp;
304         unsigned int cr = get_cr();
305         pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
306         int cpu_arch = cpu_architecture();
307         int i;
308
309         if (cpu_arch < CPU_ARCH_ARMv6) {
310 #if defined(CONFIG_CPU_DCACHE_DISABLE)
311                 if (cachepolicy > CPOLICY_BUFFERED)
312                         cachepolicy = CPOLICY_BUFFERED;
313 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
314                 if (cachepolicy > CPOLICY_WRITETHROUGH)
315                         cachepolicy = CPOLICY_WRITETHROUGH;
316 #endif
317         }
318         if (cpu_arch < CPU_ARCH_ARMv5) {
319                 if (cachepolicy >= CPOLICY_WRITEALLOC)
320                         cachepolicy = CPOLICY_WRITEBACK;
321                 ecc_mask = 0;
322         }
323         if (is_smp())
324                 cachepolicy = CPOLICY_WRITEALLOC;
325
326         /*
327          * Strip out features not present on earlier architectures.
328          * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
329          * without extended page tables don't have the 'Shared' bit.
330          */
331         if (cpu_arch < CPU_ARCH_ARMv5)
332                 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
333                         mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
334         if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
335                 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
336                         mem_types[i].prot_sect &= ~PMD_SECT_S;
337
338         /*
339          * ARMv5 and lower, bit 4 must be set for page tables (was: cache
340          * "update-able on write" bit on ARM610).  However, Xscale and
341          * Xscale3 require this bit to be cleared.
342          */
343         if (cpu_is_xscale() || cpu_is_xsc3()) {
344                 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
345                         mem_types[i].prot_sect &= ~PMD_BIT4;
346                         mem_types[i].prot_l1 &= ~PMD_BIT4;
347                 }
348         } else if (cpu_arch < CPU_ARCH_ARMv6) {
349                 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
350                         if (mem_types[i].prot_l1)
351                                 mem_types[i].prot_l1 |= PMD_BIT4;
352                         if (mem_types[i].prot_sect)
353                                 mem_types[i].prot_sect |= PMD_BIT4;
354                 }
355         }
356
357         /*
358          * Mark the device areas according to the CPU/architecture.
359          */
360         if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
361                 if (!cpu_is_xsc3()) {
362                         /*
363                          * Mark device regions on ARMv6+ as execute-never
364                          * to prevent speculative instruction fetches.
365                          */
366                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
367                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
368                         mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
369                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
370                 }
371                 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
372                         /*
373                          * For ARMv7 with TEX remapping,
374                          * - shared device is SXCB=1100
375                          * - nonshared device is SXCB=0100
376                          * - write combine device mem is SXCB=0001
377                          * (Uncached Normal memory)
378                          */
379                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
380                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
381                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
382                 } else if (cpu_is_xsc3()) {
383                         /*
384                          * For Xscale3,
385                          * - shared device is TEXCB=00101
386                          * - nonshared device is TEXCB=01000
387                          * - write combine device mem is TEXCB=00100
388                          * (Inner/Outer Uncacheable in xsc3 parlance)
389                          */
390                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
391                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
392                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
393                 } else {
394                         /*
395                          * For ARMv6 and ARMv7 without TEX remapping,
396                          * - shared device is TEXCB=00001
397                          * - nonshared device is TEXCB=01000
398                          * - write combine device mem is TEXCB=00100
399                          * (Uncached Normal in ARMv6 parlance).
400                          */
401                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
402                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
403                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
404                 }
405         } else {
406                 /*
407                  * On others, write combining is "Uncached/Buffered"
408                  */
409                 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
410         }
411
412         /*
413          * Now deal with the memory-type mappings
414          */
415         cp = &cache_policies[cachepolicy];
416         vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
417
418         /*
419          * Only use write-through for non-SMP systems
420          */
421         if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
422                 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
423
424         /*
425          * Enable CPU-specific coherency if supported.
426          * (Only available on XSC3 at the moment.)
427          */
428         if (arch_is_coherent() && cpu_is_xsc3()) {
429                 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
430                 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
431                 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
432                 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
433         }
434         /*
435          * ARMv6 and above have extended page tables.
436          */
437         if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
438 #ifndef CONFIG_ARM_LPAE
439                 /*
440                  * Mark cache clean areas and XIP ROM read only
441                  * from SVC mode and no access from userspace.
442                  */
443                 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
444                 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
445                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
446 #endif
447
448                 if (is_smp()) {
449                         /*
450                          * Mark memory with the "shared" attribute
451                          * for SMP systems
452                          */
453                         user_pgprot |= L_PTE_SHARED;
454                         kern_pgprot |= L_PTE_SHARED;
455                         vecs_pgprot |= L_PTE_SHARED;
456                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
457                         mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
458                         mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
459                         mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
460                         mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
461                         mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
462                         mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
463                         mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
464                 }
465         }
466
467         /*
468          * Non-cacheable Normal - intended for memory areas that must
469          * not cause dirty cache line writebacks when used
470          */
471         if (cpu_arch >= CPU_ARCH_ARMv6) {
472                 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
473                         /* Non-cacheable Normal is XCB = 001 */
474                         mem_types[MT_MEMORY_NONCACHED].prot_sect |=
475                                 PMD_SECT_BUFFERED;
476                 } else {
477                         /* For both ARMv6 and non-TEX-remapping ARMv7 */
478                         mem_types[MT_MEMORY_NONCACHED].prot_sect |=
479                                 PMD_SECT_TEX(1);
480                 }
481         } else {
482                 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
483         }
484
485 #ifdef CONFIG_ARM_LPAE
486         /*
487          * Do not generate access flag faults for the kernel mappings.
488          */
489         for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
490                 mem_types[i].prot_pte |= PTE_EXT_AF;
491                 mem_types[i].prot_sect |= PMD_SECT_AF;
492         }
493         kern_pgprot |= PTE_EXT_AF;
494         vecs_pgprot |= PTE_EXT_AF;
495 #endif
496
497         for (i = 0; i < 16; i++) {
498                 unsigned long v = pgprot_val(protection_map[i]);
499                 protection_map[i] = __pgprot(v | user_pgprot);
500         }
501
502         mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
503         mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
504
505         pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
506         pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
507                                  L_PTE_DIRTY | kern_pgprot);
508
509         mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
510         mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
511         mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
512         mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
513         mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
514         mem_types[MT_ROM].prot_sect |= cp->pmd;
515
516         switch (cp->pmd) {
517         case PMD_SECT_WT:
518                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
519                 break;
520         case PMD_SECT_WB:
521         case PMD_SECT_WBWA:
522                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
523                 break;
524         }
525         printk("Memory policy: ECC %sabled, Data cache %s\n",
526                 ecc_mask ? "en" : "dis", cp->policy);
527
528         for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
529                 struct mem_type *t = &mem_types[i];
530                 if (t->prot_l1)
531                         t->prot_l1 |= PMD_DOMAIN(t->domain);
532                 if (t->prot_sect)
533                         t->prot_sect |= PMD_DOMAIN(t->domain);
534         }
535 }
536
537 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
538 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
539                               unsigned long size, pgprot_t vma_prot)
540 {
541         if (!pfn_valid(pfn))
542                 return pgprot_noncached(vma_prot);
543         else if (file->f_flags & O_SYNC)
544                 return pgprot_writecombine(vma_prot);
545         return vma_prot;
546 }
547 EXPORT_SYMBOL(phys_mem_access_prot);
548 #endif
549
550 #define vectors_base()  (vectors_high() ? 0xffff0000 : 0)
551
552 static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
553 {
554         void *ptr = __va(memblock_alloc(sz, align));
555         memset(ptr, 0, sz);
556         return ptr;
557 }
558
559 static void __init *early_alloc(unsigned long sz)
560 {
561         return early_alloc_aligned(sz, sz);
562 }
563
564 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
565 {
566         if (pmd_none(*pmd)) {
567                 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
568                 __pmd_populate(pmd, __pa(pte), prot);
569         }
570         BUG_ON(pmd_bad(*pmd));
571         return pte_offset_kernel(pmd, addr);
572 }
573
574 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
575                                   unsigned long end, unsigned long pfn,
576                                   const struct mem_type *type)
577 {
578         pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
579         do {
580                 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
581                 pfn++;
582         } while (pte++, addr += PAGE_SIZE, addr != end);
583 }
584
585 static void __init alloc_init_section(pud_t *pud, unsigned long addr,
586                                       unsigned long end, phys_addr_t phys,
587                                       const struct mem_type *type)
588 {
589         pmd_t *pmd = pmd_offset(pud, addr);
590
591         /*
592          * Try a section mapping - end, addr and phys must all be aligned
593          * to a section boundary.  Note that PMDs refer to the individual
594          * L1 entries, whereas PGDs refer to a group of L1 entries making
595          * up one logical pointer to an L2 table.
596          */
597         if (((addr | end | phys) & ~SECTION_MASK) == 0) {
598                 pmd_t *p = pmd;
599
600 #ifndef CONFIG_ARM_LPAE
601                 if (addr & SECTION_SIZE)
602                         pmd++;
603 #endif
604
605                 do {
606                         *pmd = __pmd(phys | type->prot_sect);
607                         phys += SECTION_SIZE;
608                 } while (pmd++, addr += SECTION_SIZE, addr != end);
609
610                 flush_pmd_entry(p);
611         } else {
612                 /*
613                  * No need to loop; pte's aren't interested in the
614                  * individual L1 entries.
615                  */
616                 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
617         }
618 }
619
620 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
621         unsigned long phys, const struct mem_type *type)
622 {
623         pud_t *pud = pud_offset(pgd, addr);
624         unsigned long next;
625
626         do {
627                 next = pud_addr_end(addr, end);
628                 alloc_init_section(pud, addr, next, phys, type);
629                 phys += next - addr;
630         } while (pud++, addr = next, addr != end);
631 }
632
633 #ifndef CONFIG_ARM_LPAE
634 static void __init create_36bit_mapping(struct map_desc *md,
635                                         const struct mem_type *type)
636 {
637         unsigned long addr, length, end;
638         phys_addr_t phys;
639         pgd_t *pgd;
640
641         addr = md->virtual;
642         phys = __pfn_to_phys(md->pfn);
643         length = PAGE_ALIGN(md->length);
644
645         if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
646                 printk(KERN_ERR "MM: CPU does not support supersection "
647                        "mapping for 0x%08llx at 0x%08lx\n",
648                        (long long)__pfn_to_phys((u64)md->pfn), addr);
649                 return;
650         }
651
652         /* N.B. ARMv6 supersections are only defined to work with domain 0.
653          *      Since domain assignments can in fact be arbitrary, the
654          *      'domain == 0' check below is required to insure that ARMv6
655          *      supersections are only allocated for domain 0 regardless
656          *      of the actual domain assignments in use.
657          */
658         if (type->domain) {
659                 printk(KERN_ERR "MM: invalid domain in supersection "
660                        "mapping for 0x%08llx at 0x%08lx\n",
661                        (long long)__pfn_to_phys((u64)md->pfn), addr);
662                 return;
663         }
664
665         if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
666                 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
667                        " at 0x%08lx invalid alignment\n",
668                        (long long)__pfn_to_phys((u64)md->pfn), addr);
669                 return;
670         }
671
672         /*
673          * Shift bits [35:32] of address into bits [23:20] of PMD
674          * (See ARMv6 spec).
675          */
676         phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
677
678         pgd = pgd_offset_k(addr);
679         end = addr + length;
680         do {
681                 pud_t *pud = pud_offset(pgd, addr);
682                 pmd_t *pmd = pmd_offset(pud, addr);
683                 int i;
684
685                 for (i = 0; i < 16; i++)
686                         *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
687
688                 addr += SUPERSECTION_SIZE;
689                 phys += SUPERSECTION_SIZE;
690                 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
691         } while (addr != end);
692 }
693 #endif  /* !CONFIG_ARM_LPAE */
694
695 /*
696  * Create the page directory entries and any necessary
697  * page tables for the mapping specified by `md'.  We
698  * are able to cope here with varying sizes and address
699  * offsets, and we take full advantage of sections and
700  * supersections.
701  */
702 static void __init create_mapping(struct map_desc *md)
703 {
704         unsigned long addr, length, end;
705         phys_addr_t phys;
706         const struct mem_type *type;
707         pgd_t *pgd;
708
709         if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
710                 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
711                        " at 0x%08lx in user region\n",
712                        (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
713                 return;
714         }
715
716         if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
717             md->virtual >= PAGE_OFFSET &&
718             (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
719                 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
720                        " at 0x%08lx out of vmalloc space\n",
721                        (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
722         }
723
724         type = &mem_types[md->type];
725
726 #ifndef CONFIG_ARM_LPAE
727         /*
728          * Catch 36-bit addresses
729          */
730         if (md->pfn >= 0x100000) {
731                 create_36bit_mapping(md, type);
732                 return;
733         }
734 #endif
735
736         addr = md->virtual & PAGE_MASK;
737         phys = __pfn_to_phys(md->pfn);
738         length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
739
740         if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
741                 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
742                        "be mapped using pages, ignoring.\n",
743                        (long long)__pfn_to_phys(md->pfn), addr);
744                 return;
745         }
746
747         pgd = pgd_offset_k(addr);
748         end = addr + length;
749         do {
750                 unsigned long next = pgd_addr_end(addr, end);
751
752                 alloc_init_pud(pgd, addr, next, phys, type);
753
754                 phys += next - addr;
755                 addr = next;
756         } while (pgd++, addr != end);
757 }
758
759 /*
760  * Create the architecture specific mappings
761  */
762 void __init iotable_init(struct map_desc *io_desc, int nr)
763 {
764         struct map_desc *md;
765         struct vm_struct *vm;
766
767         if (!nr)
768                 return;
769
770         vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
771
772         for (md = io_desc; nr; md++, nr--) {
773                 create_mapping(md);
774                 vm->addr = (void *)(md->virtual & PAGE_MASK);
775                 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
776                 vm->phys_addr = __pfn_to_phys(md->pfn); 
777                 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 
778                 vm->flags |= VM_ARM_MTYPE(md->type);
779                 vm->caller = iotable_init;
780                 vm_area_add_early(vm++);
781         }
782 }
783
784 static void * __initdata vmalloc_min =
785         (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
786
787 /*
788  * vmalloc=size forces the vmalloc area to be exactly 'size'
789  * bytes. This can be used to increase (or decrease) the vmalloc
790  * area - the default is 240m.
791  */
792 static int __init early_vmalloc(char *arg)
793 {
794         unsigned long vmalloc_reserve = memparse(arg, NULL);
795
796         if (vmalloc_reserve < SZ_16M) {
797                 vmalloc_reserve = SZ_16M;
798                 printk(KERN_WARNING
799                         "vmalloc area too small, limiting to %luMB\n",
800                         vmalloc_reserve >> 20);
801         }
802
803         if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
804                 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
805                 printk(KERN_WARNING
806                         "vmalloc area is too big, limiting to %luMB\n",
807                         vmalloc_reserve >> 20);
808         }
809
810         vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
811         return 0;
812 }
813 early_param("vmalloc", early_vmalloc);
814
815 static phys_addr_t lowmem_limit __initdata = 0;
816
817 void __init sanity_check_meminfo(void)
818 {
819         int i, j, highmem = 0;
820
821         for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
822                 struct membank *bank = &meminfo.bank[j];
823                 *bank = meminfo.bank[i];
824
825                 if (bank->start > ULONG_MAX)
826                         highmem = 1;
827
828 #ifdef CONFIG_HIGHMEM
829                 if (__va(bank->start) >= vmalloc_min ||
830                     __va(bank->start) < (void *)PAGE_OFFSET)
831                         highmem = 1;
832
833                 bank->highmem = highmem;
834
835                 /*
836                  * Split those memory banks which are partially overlapping
837                  * the vmalloc area greatly simplifying things later.
838                  */
839                 if (!highmem && __va(bank->start) < vmalloc_min &&
840                     bank->size > vmalloc_min - __va(bank->start)) {
841                         if (meminfo.nr_banks >= NR_BANKS) {
842                                 printk(KERN_CRIT "NR_BANKS too low, "
843                                                  "ignoring high memory\n");
844                         } else {
845                                 memmove(bank + 1, bank,
846                                         (meminfo.nr_banks - i) * sizeof(*bank));
847                                 meminfo.nr_banks++;
848                                 i++;
849                                 bank[1].size -= vmalloc_min - __va(bank->start);
850                                 bank[1].start = __pa(vmalloc_min - 1) + 1;
851                                 bank[1].highmem = highmem = 1;
852                                 j++;
853                         }
854                         bank->size = vmalloc_min - __va(bank->start);
855                 }
856 #else
857                 bank->highmem = highmem;
858
859                 /*
860                  * Highmem banks not allowed with !CONFIG_HIGHMEM.
861                  */
862                 if (highmem) {
863                         printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
864                                "(!CONFIG_HIGHMEM).\n",
865                                (unsigned long long)bank->start,
866                                (unsigned long long)bank->start + bank->size - 1);
867                         continue;
868                 }
869
870                 /*
871                  * Check whether this memory bank would entirely overlap
872                  * the vmalloc area.
873                  */
874                 if (__va(bank->start) >= vmalloc_min ||
875                     __va(bank->start) < (void *)PAGE_OFFSET) {
876                         printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
877                                "(vmalloc region overlap).\n",
878                                (unsigned long long)bank->start,
879                                (unsigned long long)bank->start + bank->size - 1);
880                         continue;
881                 }
882
883                 /*
884                  * Check whether this memory bank would partially overlap
885                  * the vmalloc area.
886                  */
887                 if (__va(bank->start + bank->size) > vmalloc_min ||
888                     __va(bank->start + bank->size) < __va(bank->start)) {
889                         unsigned long newsize = vmalloc_min - __va(bank->start);
890                         printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
891                                "to -%.8llx (vmalloc region overlap).\n",
892                                (unsigned long long)bank->start,
893                                (unsigned long long)bank->start + bank->size - 1,
894                                (unsigned long long)bank->start + newsize - 1);
895                         bank->size = newsize;
896                 }
897 #endif
898                 if (!bank->highmem && bank->start + bank->size > lowmem_limit)
899                         lowmem_limit = bank->start + bank->size;
900
901                 j++;
902         }
903 #ifdef CONFIG_HIGHMEM
904         if (highmem) {
905                 const char *reason = NULL;
906
907                 if (cache_is_vipt_aliasing()) {
908                         /*
909                          * Interactions between kmap and other mappings
910                          * make highmem support with aliasing VIPT caches
911                          * rather difficult.
912                          */
913                         reason = "with VIPT aliasing cache";
914                 }
915                 if (reason) {
916                         printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
917                                 reason);
918                         while (j > 0 && meminfo.bank[j - 1].highmem)
919                                 j--;
920                 }
921         }
922 #endif
923         meminfo.nr_banks = j;
924         high_memory = __va(lowmem_limit - 1) + 1;
925         memblock_set_current_limit(lowmem_limit);
926 }
927
928 static inline void prepare_page_table(void)
929 {
930         unsigned long addr;
931         phys_addr_t end;
932
933         /*
934          * Clear out all the mappings below the kernel image.
935          */
936         for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
937                 pmd_clear(pmd_off_k(addr));
938
939 #ifdef CONFIG_XIP_KERNEL
940         /* The XIP kernel is mapped in the module area -- skip over it */
941         addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
942 #endif
943         for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
944                 pmd_clear(pmd_off_k(addr));
945
946         /*
947          * Find the end of the first block of lowmem.
948          */
949         end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
950         if (end >= lowmem_limit)
951                 end = lowmem_limit;
952
953         /*
954          * Clear out all the kernel space mappings, except for the first
955          * memory bank, up to the vmalloc region.
956          */
957         for (addr = __phys_to_virt(end);
958              addr < VMALLOC_START; addr += PMD_SIZE)
959                 pmd_clear(pmd_off_k(addr));
960 }
961
962 #ifdef CONFIG_ARM_LPAE
963 /* the first page is reserved for pgd */
964 #define SWAPPER_PG_DIR_SIZE     (PAGE_SIZE + \
965                                  PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
966 #else
967 #define SWAPPER_PG_DIR_SIZE     (PTRS_PER_PGD * sizeof(pgd_t))
968 #endif
969
970 /*
971  * Reserve the special regions of memory
972  */
973 void __init arm_mm_memblock_reserve(void)
974 {
975         /*
976          * Reserve the page tables.  These are already in use,
977          * and can only be in node 0.
978          */
979         memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
980
981 #ifdef CONFIG_SA1111
982         /*
983          * Because of the SA1111 DMA bug, we want to preserve our
984          * precious DMA-able memory...
985          */
986         memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
987 #endif
988 }
989
990 /*
991  * Set up the device mappings.  Since we clear out the page tables for all
992  * mappings above VMALLOC_START, we will remove any debug device mappings.
993  * This means you have to be careful how you debug this function, or any
994  * called function.  This means you can't use any function or debugging
995  * method which may touch any device, otherwise the kernel _will_ crash.
996  */
997 static void __init devicemaps_init(struct machine_desc *mdesc)
998 {
999         struct map_desc map;
1000         unsigned long addr;
1001         void *vectors;
1002
1003         /*
1004          * Allocate the vector page early.
1005          */
1006         vectors = early_alloc(PAGE_SIZE);
1007
1008         early_trap_init(vectors);
1009
1010         for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
1011                 pmd_clear(pmd_off_k(addr));
1012
1013         /*
1014          * Map the kernel if it is XIP.
1015          * It is always first in the modulearea.
1016          */
1017 #ifdef CONFIG_XIP_KERNEL
1018         map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1019         map.virtual = MODULES_VADDR;
1020         map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1021         map.type = MT_ROM;
1022         create_mapping(&map);
1023 #endif
1024
1025         /*
1026          * Map the cache flushing regions.
1027          */
1028 #ifdef FLUSH_BASE
1029         map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1030         map.virtual = FLUSH_BASE;
1031         map.length = SZ_1M;
1032         map.type = MT_CACHECLEAN;
1033         create_mapping(&map);
1034 #endif
1035 #ifdef FLUSH_BASE_MINICACHE
1036         map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1037         map.virtual = FLUSH_BASE_MINICACHE;
1038         map.length = SZ_1M;
1039         map.type = MT_MINICLEAN;
1040         create_mapping(&map);
1041 #endif
1042
1043         /*
1044          * Create a mapping for the machine vectors at the high-vectors
1045          * location (0xffff0000).  If we aren't using high-vectors, also
1046          * create a mapping at the low-vectors virtual address.
1047          */
1048         map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1049         map.virtual = 0xffff0000;
1050         map.length = PAGE_SIZE;
1051         map.type = MT_HIGH_VECTORS;
1052         create_mapping(&map);
1053
1054         if (!vectors_high()) {
1055                 map.virtual = 0;
1056                 map.type = MT_LOW_VECTORS;
1057                 create_mapping(&map);
1058         }
1059
1060         /*
1061          * Ask the machine support to map in the statically mapped devices.
1062          */
1063         if (mdesc->map_io)
1064                 mdesc->map_io();
1065
1066         /*
1067          * Finally flush the caches and tlb to ensure that we're in a
1068          * consistent state wrt the writebuffer.  This also ensures that
1069          * any write-allocated cache lines in the vector page are written
1070          * back.  After this point, we can start to touch devices again.
1071          */
1072         local_flush_tlb_all();
1073         flush_cache_all();
1074 }
1075
1076 static void __init kmap_init(void)
1077 {
1078 #ifdef CONFIG_HIGHMEM
1079         pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1080                 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1081 #endif
1082 }
1083
1084 static void __init map_lowmem(void)
1085 {
1086         struct memblock_region *reg;
1087
1088         /* Map all the lowmem memory banks. */
1089         for_each_memblock(memory, reg) {
1090                 phys_addr_t start = reg->base;
1091                 phys_addr_t end = start + reg->size;
1092                 struct map_desc map;
1093
1094                 if (end > lowmem_limit)
1095                         end = lowmem_limit;
1096                 if (start >= end)
1097                         break;
1098
1099                 map.pfn = __phys_to_pfn(start);
1100                 map.virtual = __phys_to_virt(start);
1101                 map.length = end - start;
1102                 map.type = MT_MEMORY;
1103
1104                 create_mapping(&map);
1105         }
1106 }
1107
1108 /*
1109  * paging_init() sets up the page tables, initialises the zone memory
1110  * maps, and sets up the zero page, bad page and bad page tables.
1111  */
1112 void __init paging_init(struct machine_desc *mdesc)
1113 {
1114         void *zero_page;
1115
1116         memblock_set_current_limit(lowmem_limit);
1117
1118         build_mem_type_table();
1119         prepare_page_table();
1120         map_lowmem();
1121         devicemaps_init(mdesc);
1122         kmap_init();
1123
1124         top_pmd = pmd_off_k(0xffff0000);
1125
1126         /* allocate the zero page. */
1127         zero_page = early_alloc(PAGE_SIZE);
1128
1129         bootmem_init();
1130
1131         empty_zero_page = virt_to_page(zero_page);
1132         __flush_dcache_page(NULL, empty_zero_page);
1133 }