4 #include <asm/mcfmmu.h>
8 #include <asm/virtconvert.h>
9 #include <linux/linkage.h>
11 /* For virtual address to physical address conversion */
12 #define VTOP(addr) __pa(addr)
13 #define PTOV(addr) __va(addr)
16 #endif /* !__ASSEMBLY__ */
18 /* Page protection values within PTE. */
20 /* MMUDR bits, in proper place. */
21 #define CF_PAGE_LOCKED (0x00000002)
22 #define CF_PAGE_EXEC (0x00000004)
23 #define CF_PAGE_WRITABLE (0x00000008)
24 #define CF_PAGE_READABLE (0x00000010)
25 #define CF_PAGE_SYSTEM (0x00000020)
26 #define CF_PAGE_COPYBACK (0x00000040)
27 #define CF_PAGE_NOCACHE (0x00000080)
29 #define CF_CACHEMASK (~0x00000040)
30 #define CF_PAGE_MMUDR_MASK (0x000000fe)
32 #define _PAGE_NOCACHE030 (CF_PAGE_NOCACHE)
34 /* MMUTR bits, need shifting down. */
35 #define CF_PAGE_VALID (0x00000400)
36 #define CF_PAGE_SHARED (0x00000800)
38 #define CF_PAGE_MMUTR_MASK (0x00000c00)
39 #define CF_PAGE_MMUTR_SHIFT (10)
40 #define CF_ASID_MMU_SHIFT (2)
42 /* Fake bits, not implemented in CF, will get masked out before
43 hitting hardware, and might go away altogether once this port is
46 #error COLDFIRE Error: Pages must be at least 8k in size
48 #define CF_PAGE_ACCESSED (0x00001000)
49 #define CF_PAGE_FILE (0x00000200)
50 #define CF_PAGE_DIRTY (0x00000001)
52 #define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
53 #define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
54 #define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
55 #define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
56 #define _DESCTYPE_MASK 0x003
57 #define _CACHEMASK040 (~0x060)
58 #define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
61 /* Externally used page protection values. */
62 #define _PAGE_PRESENT (CF_PAGE_VALID)
63 #define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
64 #define _PAGE_DIRTY (CF_PAGE_DIRTY)
65 #define _PAGE_READWRITE (CF_PAGE_WRITABLE \
70 /* Compound page protection values. */
71 #define PAGE_NONE __pgprot(CF_PAGE_VALID \
74 #define PAGE_SHARED __pgprot(CF_PAGE_VALID \
78 #define PAGE_INIT __pgprot(CF_PAGE_VALID \
85 #define PAGE_KERNEL __pgprot(CF_PAGE_VALID \
93 #define PAGE_COPY __pgprot(CF_PAGE_VALID \
98 * Page protections for initialising protection_map. See mm/mmap.c
99 * for use. In general, the bit positions are xwr, and P-items are
100 * private, the S-items are shared.
103 #define __P000 PAGE_NONE
104 #define __P100 __pgprot(CF_PAGE_VALID \
107 #define __P010 __pgprot(CF_PAGE_VALID \
110 #define __P110 __pgprot(CF_PAGE_VALID \
114 #define __P001 __pgprot(CF_PAGE_VALID \
117 #define __P101 __pgprot(CF_PAGE_VALID \
121 #define __P011 __pgprot(CF_PAGE_VALID \
125 #define __P111 __pgprot(CF_PAGE_VALID \
131 #define __S000 PAGE_NONE
132 #define __S100 __pgprot(CF_PAGE_VALID \
136 #define __S010 PAGE_SHARED
137 #define __S110 __pgprot(CF_PAGE_VALID \
141 #define __S001 __pgprot(CF_PAGE_VALID \
145 #define __S101 __pgprot(CF_PAGE_VALID \
150 #define __S011 __pgprot(CF_PAGE_VALID \
154 #define __S111 __pgprot(CF_PAGE_VALID \
160 #define PTE_MASK PAGE_MASK
161 #define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
166 * Conversion functions: convert a page and protection to a page entry,
167 * and a page entry and page directory to the page they refer to.
169 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
171 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
173 pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
177 #define pmd_set(pmdp, ptep) do {} while (0)
179 static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
181 pgd_val(*pgdp) = virt_to_phys(pmdp);
184 #define __pte_page(pte) \
185 ((unsigned long) ((pte_val(pte) & CF_PAGE_PGNUM_MASK) + PAGE_OFFSET))
186 #define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
188 extern inline int pte_none(pte_t pte)
190 return !pte_val(pte);
192 extern inline int pte_present(pte_t pte)
194 return pte_val(pte) & CF_PAGE_VALID;
196 extern inline void pte_clear(struct mm_struct *mm, unsigned long addr,
202 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
203 #define pte_page(pte) virt_to_page(__pte_page(pte))
205 extern inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
206 #define pmd_none(pmd) pmd_none2(&(pmd))
207 extern inline int pmd_bad2(pmd_t *pmd) { return 0; }
208 #define pmd_bad(pmd) pmd_bad2(&(pmd))
209 #define pmd_present(pmd) (!pmd_none2(&(pmd)))
210 extern inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
212 extern inline int pgd_none(pgd_t pgd) { return 0; }
213 extern inline int pgd_bad(pgd_t pgd) { return 0; }
214 extern inline int pgd_present(pgd_t pgd) { return 1; }
215 extern inline void pgd_clear(pgd_t *pgdp) {}
217 //FIXME: maybe support with a fakebit?
218 static inline int pte_special(pte_t pte) { return 0; }
219 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
221 #define pte_ERROR(e) \
222 printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
223 __FILE__, __LINE__, pte_val(e))
224 #define pmd_ERROR(e) \
225 printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
226 __FILE__, __LINE__, pmd_val(e))
227 #define pgd_ERROR(e) \
228 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
229 __FILE__, __LINE__, pgd_val(e))
233 * The following only work if pte_present() is true.
234 * Undefined behaviour if not...
235 * [we have the full set here even if they don't change from m68k]
237 extern inline int pte_read(pte_t pte) \
238 { return pte_val(pte) & CF_PAGE_READABLE; }
239 extern inline int pte_write(pte_t pte) \
240 { return pte_val(pte) & CF_PAGE_WRITABLE; }
241 extern inline int pte_exec(pte_t pte) \
242 { return pte_val(pte) & CF_PAGE_EXEC; }
243 extern inline int pte_dirty(pte_t pte) \
244 { return pte_val(pte) & CF_PAGE_DIRTY; }
245 extern inline int pte_young(pte_t pte) \
246 { return pte_val(pte) & CF_PAGE_ACCESSED; }
247 extern inline int pte_file(pte_t pte) \
248 { return pte_val(pte) & CF_PAGE_FILE; }
250 extern inline pte_t pte_wrprotect(pte_t pte) \
251 { pte_val(pte) &= ~CF_PAGE_WRITABLE; return pte; }
252 extern inline pte_t pte_rdprotect(pte_t pte) \
253 { pte_val(pte) &= ~CF_PAGE_READABLE; return pte; }
254 extern inline pte_t pte_exprotect(pte_t pte) \
255 { pte_val(pte) &= ~CF_PAGE_EXEC; return pte; }
256 extern inline pte_t pte_mkclean(pte_t pte) \
257 { pte_val(pte) &= ~CF_PAGE_DIRTY; return pte; }
258 extern inline pte_t pte_mkold(pte_t pte) \
259 { pte_val(pte) &= ~CF_PAGE_ACCESSED; return pte; }
260 extern inline pte_t pte_mkwrite(pte_t pte) \
261 { pte_val(pte) |= CF_PAGE_WRITABLE; return pte; }
262 extern inline pte_t pte_mkread(pte_t pte) \
263 { pte_val(pte) |= CF_PAGE_READABLE; return pte; }
264 extern inline pte_t pte_mkexec(pte_t pte) \
265 { pte_val(pte) |= CF_PAGE_EXEC; return pte; }
266 extern inline pte_t pte_mkdirty(pte_t pte) \
267 { pte_val(pte) |= CF_PAGE_DIRTY; return pte; }
268 extern inline pte_t pte_mkyoung(pte_t pte) \
269 { pte_val(pte) |= CF_PAGE_ACCESSED; return pte; }
270 extern inline pte_t pte_mknocache(pte_t pte) \
271 { pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40); return pte; }
272 extern inline pte_t pte_mkcache(pte_t pte) \
273 { pte_val(pte) &= ~CF_PAGE_NOCACHE; return pte; }
275 #define swapper_pg_dir kernel_pg_dir
276 extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
278 /* Find an entry in a pagetable directory. */
279 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
281 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
283 /* Find an entry in a kernel pagetable directory. */
284 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
286 /* Find an entry in the second-level pagetable. */
287 extern inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
289 return (pmd_t *) pgd;
292 /* Find an entry in the third-level pagetable. */
293 #define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
294 #define pte_offset_kernel(dir, address) ((pte_t *) __pmd_page(*(dir)) + \
295 __pte_offset(address))
297 /* Disable caching for page at given kernel virtual address. */
298 static inline void nocache_page(void *vaddr)
303 unsigned long addr = (unsigned long)vaddr;
305 dir = pgd_offset_k(addr);
306 pmdp = pmd_offset(dir, addr);
307 ptep = pte_offset_kernel(pmdp, addr);
308 *ptep = pte_mknocache(*ptep);
311 /* Enable caching for page at given kernel virtual address. */
312 static inline void cache_page(void *vaddr)
317 unsigned long addr = (unsigned long)vaddr;
319 dir = pgd_offset_k(addr);
320 pmdp = pmd_offset(dir, addr);
321 ptep = pte_offset_kernel(pmdp, addr);
322 *ptep = pte_mkcache(*ptep);
325 #define PTE_FILE_MAX_BITS 21
326 #define PTE_FILE_SHIFT 11
328 static inline unsigned long pte_to_pgoff(pte_t pte)
330 return pte_val(pte) >> PTE_FILE_SHIFT;
333 static inline pte_t pgoff_to_pte(unsigned pgoff)
335 pte_t pte = __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
339 /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
340 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
341 (offset << PTE_FILE_SHIFT) })
342 #define __swp_type(x) ((x).val & 0xFF)
343 #define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
344 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
345 #define __swp_entry_to_pte(x) (__pte((x).val))
347 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
349 #define pte_offset_map(pmdp, address) ((pte_t *)__pmd_page(*pmdp) + \
350 __pte_offset(address))
351 #define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
352 #define pte_unmap(pte) kunmap(pte)
353 #define pte_unmap_nested(pte) kunmap(pte)
355 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
356 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
359 #endif /* !__ASSEMBLY__ */
360 #endif /* !_MCF_PGTABLE_H */