2 * arch/m68k/include/asm/mcf_5445x_cacheflush.h - Coldfire 5445x Cache
4 * Based on arch/m68k/include/asm/cacheflush.h
7 * Kurt Mahan kmahan@freescale.com
9 * Copyright Freescale Semiconductor, Inc. 2007, 2008
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #ifndef M68K_CF_5445x_CACHEFLUSH_H
17 #define M68K_CF_5445x_CACHEFLUSH_H
19 #include <asm/mcfcache.h>
22 * Coldfire Cache Model
24 * The Coldfire processors use a Harvard architecture cache configured
25 * as four-way set associative. The cache does not implement bus snooping
26 * so cache coherency with other masters must be maintained in software.
28 * The cache is managed via the CPUSHL instruction in conjunction with
29 * bits set in the CACR (cache control register). Currently the code
30 * uses the CPUSHL enhancement which adds the ability to
31 * invalidate/clear/push a cacheline by physical address. This feature
32 * is designated in the Hardware Configuration Register [D1-CPES].
35 * DPI[28] cpushl invalidate disable for d-cache
36 * IDPI[12] cpushl invalidate disable for i-cache
37 * SPA[14] cpushl search by physical address
38 * IVO[20] cpushl invalidate only
41 * * invalidate = reset the cache line's valid bit
42 * * push = generate a line-sized store of the data if its contents are marked
43 * as modifed (the modified flag is cleared after the store)
44 * * clear = push + invalidate
48 * flush_icache - Flush all of the instruction cache
50 static inline void flush_icache(void)
57 "cpushl %%ic,(%%a0)\n"
58 "add%.l #0x0010,%%a0\n"
72 * flush_dcache - Flush all of the data cache
74 static inline void flush_dcache(void)
81 "cpushl %%dc,(%%a0)\n"
82 "add%.l #0x0010,%%a0\n"
96 * flush_bcache - Flush all of both caches
98 static inline void flush_bcache(void)
103 "move%.l %%d0,%%a0\n"
105 "cpushl %%bc,(%%a0)\n"
106 "add%.l #0x0010,%%a0\n"
112 "move%.l %%d0,%%a0\n"
120 * cf_cache_clear - invalidate cache
121 * @paddr: starting physical address
122 * @len: number of bytes
124 * Invalidate cache lines starting at paddr for len bytes.
125 * Those lines are not pushed.
127 static inline void cf_cache_clear(unsigned long paddr, int len)
129 /* number of lines */
130 len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
134 /* align on set boundary */
140 "movec %%d0,%%cacr\n"
144 "cpushl %%bc,(%%a0)\n"
145 "lea 0x10(%%a0),%%a0\n"
149 : : "a" (paddr), "r" (len),
151 "i" (CF_CACR_SPA+CF_CACR_IVO)
156 * cf_cache_push - Push dirty cache out with no invalidate
157 * @paddr: starting physical address
158 * @len: number of bytes
160 * Push the any dirty lines starting at paddr for len bytes.
161 * Those lines are not invalidated.
163 static inline void cf_cache_push(unsigned long paddr, int len)
165 /* number of lines */
166 len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
170 /* align on set boundary */
176 "movec %%d0,%%cacr\n"
180 "cpushl %%bc,(%%a0)\n"
181 "lea 0x10(%%a0),%%a0\n"
185 : : "a" (paddr), "r" (len),
187 "i" (CF_CACR_SPA+CF_CACR_DPI+CF_CACR_IDPI)
192 * cf_cache_flush - Push dirty cache out and invalidate
193 * @paddr: starting physical address
194 * @len: number of bytes
196 * Push the any dirty lines starting at paddr for len bytes and
197 * invalidate those lines.
199 static inline void cf_cache_flush(unsigned long paddr, int len)
201 /* number of lines */
202 len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
206 /* align on set boundary */
212 "movec %%d0,%%cacr\n"
216 "cpushl %%bc,(%%a0)\n"
217 "lea 0x10(%%a0),%%a0\n"
221 : : "a" (paddr), "r" (len),
228 * cf_cache_flush_range - Push dirty data/inst cache in range out and invalidate
229 * @vstart - starting virtual address
230 * @vend: ending virtual address
232 * Push the any dirty data/instr lines starting at paddr for len bytes and
233 * invalidate those lines.
235 static inline void cf_cache_flush_range(unsigned long vstart, unsigned long vend)
239 /* align on set boundary */
240 vstart &= 0xfffffff0;
241 vend = PAGE_ALIGN((vend + (CACHE_LINE_SIZE-1))) & 0xfffffff0;
245 vstart = __pa(vstart);
251 "movec %%d0,%%cacr\n"
255 "cpushl %%bc,(%%a0)\n"
256 "lea 0x10(%%a0),%%a0\n"
257 "cmpa%.l %%a0,%%a1\n"
261 : "a" (vstart), "a" (vend),
268 * cf_dcache_flush_range - Push dirty data cache in range out and invalidate
269 * @vstart - starting virtual address
270 * @vend: ending virtual address
272 * Push the any dirty data lines starting at paddr for len bytes and
273 * invalidate those lines.
275 static inline void cf_dcache_flush_range(unsigned long vstart, unsigned long vend)
277 /* align on set boundary */
278 vstart &= 0xfffffff0;
279 vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
284 "movec %%d0,%%cacr\n"
288 "cpushl %%dc,(%%a0)\n"
289 "lea 0x10(%%a0),%%a0\n"
290 "cmpa%.l %%a0,%%a1\n"
294 : "a" (__pa(vstart)), "a" (__pa(vend)),
301 * cf_icache_flush_range - Push dirty inst cache in range out and invalidate
302 * @vstart - starting virtual address
303 * @vend: ending virtual address
305 * Push the any dirty instr lines starting at paddr for len bytes and
306 * invalidate those lines. This should just be an invalidate since you
307 * shouldn't be able to have dirty instruction cache.
309 static inline void cf_icache_flush_range(unsigned long vstart, unsigned long vend)
311 /* align on set boundary */
312 vstart &= 0xfffffff0;
313 vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
318 "movec %%d0,%%cacr\n"
322 "cpushl %%ic,(%%a0)\n"
323 "lea 0x10(%%a0),%%a0\n"
324 "cmpa%.l %%a0,%%a1\n"
328 : "a" (__pa(vstart)), "a" (__pa(vend)),
335 * flush_cache_mm - Flush an mm_struct
336 * @mm: mm_struct to flush
338 static inline void flush_cache_mm(struct mm_struct *mm)
340 if (mm == current->mm)
344 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
347 * flush_cache_range - Flush a cache range
349 * @start: Starting address
350 * @end: Ending address
352 * flush_cache_range must be a macro to avoid a dependency on
353 * linux/mm.h which includes this file.
355 static inline void flush_cache_range(struct vm_area_struct *vma,
356 unsigned long start, unsigned long end)
358 if (vma->vm_mm == current->mm)
359 cf_cache_flush_range(start, end);
363 * flush_cache_page - Flush a page of the cache
368 * flush_cache_page must be a macro to avoid a dependency on
369 * linux/mm.h which includes this file.
371 static inline void flush_cache_page(struct vm_area_struct *vma,
372 unsigned long vmaddr, unsigned long pfn)
374 if (vma->vm_mm == current->mm)
375 cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
379 * __flush_page_to_ram - Push a page out of the cache
380 * @vaddr: Virtual address at start of page
382 * Push the page at kernel virtual address *vaddr* and clear
385 static inline void __flush_page_to_ram(void *vaddr)
390 "movec %%d0,%%cacr\n"
392 "and%.l #0xfffffff0,%%d0\n"
393 "move%.l %%d0,%%a0\n"
396 "cpushl %%bc,(%%a0)\n"
397 "lea 0x10(%%a0),%%a0\n"
401 : : "a" (__pa(vaddr)), "i" (PAGE_SIZE / CACHE_LINE_SIZE),
402 "r" (shadow_cacr), "i" (CF_CACR_SPA)
407 * Various defines for the kernel.
410 extern void cache_clear(unsigned long paddr, int len);
411 extern void cache_push(unsigned long paddr, int len);
412 extern void flush_icache_range(unsigned long address, unsigned long endaddr);
414 #define flush_cache_all() flush_bcache()
415 #define flush_cache_vmap(start, end) flush_bcache()
416 #define flush_cache_vunmap(start, end) flush_bcache()
418 #define flush_dcache_range(vstart, vend) cf_dcache_flush_range(vstart, vend)
419 #define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
420 #define flush_dcache_mmap_lock(mapping) do { } while (0)
421 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
423 #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
426 * copy_to_user_page - Copy memory to user page
428 static inline void copy_to_user_page(struct vm_area_struct *vma,
429 struct page *page, unsigned long vaddr,
430 void *dst, void *src, int len)
432 memcpy(dst, src, len);
433 cf_cache_flush(page_to_phys(page), PAGE_SIZE);
437 * copy_from_user_page - Copy memory from user page
439 static inline void copy_from_user_page(struct vm_area_struct *vma,
440 struct page *page, unsigned long vaddr,
441 void *dst, void *src, int len)
443 cf_cache_flush(page_to_phys(page), PAGE_SIZE);
444 memcpy(dst, src, len);
447 #endif /* M68K_CF_5445x_CACHEFLUSH_H */