2 * arch/m68k/include/asm/mcf_m547x_8x_cacheflush.h - Coldfire 547x/548x Cache
4 * Based on arch/m68k/include/asm/cacheflush.h
7 * Kurt Mahan kmahan@freescale.com
9 * Copyright Freescale Semiconductor, Inc. 2007, 2008
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #ifndef _M68K_MCF_M547X_8X_CACHEFLUSH_H
17 #define _M68K_MCF_M547X_8X_CACHEFLUSH_H
20 * Cache handling functions
23 #define flush_icache() \
26 unsigned long start_set; \
27 unsigned long end_set; \
30 end_set = (unsigned long)LAST_DCACHE_ADDR; \
32 for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
33 asm volatile("cpushl %%ic,(%0)\n" \
35 "\tcpushl %%ic,(%0)\n" \
37 "\tcpushl %%ic,(%0)\n" \
39 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
43 #define flush_dcache() \
46 unsigned long start_set; \
47 unsigned long end_set; \
50 end_set = (unsigned long)LAST_DCACHE_ADDR; \
52 for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
53 asm volatile("cpushl %%dc,(%0)\n" \
55 "\tcpushl %%dc,(%0)\n" \
57 "\tcpushl %%dc,(%0)\n" \
59 "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
63 #define flush_bcache() \
66 unsigned long start_set; \
67 unsigned long end_set; \
70 end_set = (unsigned long)LAST_DCACHE_ADDR; \
72 for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
73 asm volatile("cpushl %%bc,(%0)\n" \
75 "\tcpushl %%bc,(%0)\n" \
77 "\tcpushl %%bc,(%0)\n" \
79 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
84 * invalidate the cache for the specified memory range.
85 * It starts at the physical address specified for
86 * the given number of bytes.
88 extern void cache_clear(unsigned long paddr, int len);
90 * push any dirty cache in the specified memory range.
91 * It starts at the physical address specified for
92 * the given number of bytes.
94 extern void cache_push(unsigned long paddr, int len);
97 * push and invalidate pages in the specified user virtual
100 extern void cache_push_v(unsigned long vaddr, int len);
102 /* This is needed whenever the virtual mapping of the current
106 * flush_cache_mm - Flush an mm_struct
107 * @mm: mm_struct to flush
109 static inline void flush_cache_mm(struct mm_struct *mm)
111 if (mm == current->mm)
115 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
117 #define flush_cache_all() flush_bcache()
120 * flush_cache_range - Flush a cache range
122 * @start: Starting address
123 * @end: Ending address
125 * flush_cache_range must be a macro to avoid a dependency on
126 * linux/mm.h which includes this file.
128 static inline void flush_cache_range(struct vm_area_struct *vma,
129 unsigned long start, unsigned long end)
131 if (vma->vm_mm == current->mm)
133 // cf_cache_flush_range(start, end);
137 * flush_cache_page - Flush a page of the cache
142 * flush_cache_page must be a macro to avoid a dependency on
143 * linux/mm.h which includes this file.
145 static inline void flush_cache_page(struct vm_area_struct *vma,
146 unsigned long vmaddr, unsigned long pfn)
148 if (vma->vm_mm == current->mm)
150 // cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
153 /* Push the page at kernel virtual address and clear the icache */
154 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
155 #define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
156 extern inline void __flush_page_to_ram(void *address)
159 unsigned long start_set;
160 unsigned long end_set;
161 unsigned long addr = (unsigned long) address;
163 addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
165 start_set = addr & _ICACHE_SET_MASK;
166 end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
168 if (start_set > end_set) {
169 /* from the begining to the lowest address */
170 for (set = 0; set <= end_set; set += (0x10 - 3)) {
171 asm volatile("cpushl %%bc,(%0)\n"
173 "\tcpushl %%bc,(%0)\n"
175 "\tcpushl %%bc,(%0)\n"
177 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
179 /* next loop will finish the cache ie pass the hole */
180 end_set = LAST_ICACHE_ADDR;
182 for (set = start_set; set <= end_set; set += (0x10 - 3)) {
183 asm volatile("cpushl %%bc,(%0)\n"
185 "\tcpushl %%bc,(%0)\n"
187 "\tcpushl %%bc,(%0)\n"
189 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
193 /* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
194 #define flush_dcache_page(page) \
195 __flush_page_to_ram((void *) page_address(page))
196 #define flush_icache_page(vma,pg) \
197 __flush_page_to_ram((void *) page_address(pg))
198 #define flush_icache_user_range(adr,len) do { } while (0)
200 #define flush_icache_user_page(vma,page,addr,len) do { } while (0)
202 /* Push n pages at kernel virtual address and clear the icache */
203 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
204 extern inline void flush_icache_range (unsigned long address,
205 unsigned long endaddr)
208 unsigned long start_set;
209 unsigned long end_set;
211 start_set = address & _ICACHE_SET_MASK;
212 end_set = endaddr & _ICACHE_SET_MASK;
214 if (start_set > end_set) {
215 /* from the begining to the lowest address */
216 for (set = 0; set <= end_set; set += (0x10 - 3)) {
217 asm volatile("cpushl %%ic,(%0)\n"
219 "\tcpushl %%ic,(%0)\n"
221 "\tcpushl %%ic,(%0)\n"
223 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
225 /* next loop will finish the cache ie pass the hole */
226 end_set = LAST_ICACHE_ADDR;
228 for (set = start_set; set <= end_set; set += (0x10 - 3)) {
229 asm volatile("cpushl %%ic,(%0)\n"
231 "\tcpushl %%ic,(%0)\n"
233 "\tcpushl %%ic,(%0)\n"
235 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
239 static inline void copy_to_user_page(struct vm_area_struct *vma,
240 struct page *page, unsigned long vaddr,
241 void *dst, void *src, int len)
243 memcpy(dst, src, len);
244 flush_icache_user_page(vma, page, vaddr, len);
246 static inline void copy_from_user_page(struct vm_area_struct *vma,
247 struct page *page, unsigned long vaddr,
248 void *dst, void *src, int len)
250 memcpy(dst, src, len);
253 #define flush_cache_vmap(start, end) flush_cache_all()
254 #define flush_cache_vunmap(start, end) flush_cache_all()
255 #define flush_dcache_mmap_lock(mapping) do { } while (0)
256 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
258 #endif /* _M68K_CACHEFLUSH_CODLFIRE_M547X_8X_H */