]> rtime.felk.cvut.cz Git - mcf548x/linux.git/blob - arch/m68k/include/asm/mcf_cacheflush_m547x_8x.h
Headers cleanup
[mcf548x/linux.git] / arch / m68k / include / asm / mcf_cacheflush_m547x_8x.h
1 /*
2  * arch/m68k/include/asm/mcf_m547x_8x_cacheflush.h - Coldfire 547x/548x Cache
3  *
4  * Based on arch/m68k/include/asm/cacheflush.h
5  *
6  * Coldfire pieces by:
7  *   Kurt Mahan kmahan@freescale.com
8  *
9  * Copyright Freescale Semiconductor, Inc. 2007, 2008
10  *
11  * This program is free software; you can redistribute  it and/or modify it
12  * under  the terms of  the GNU General  Public License as published by the
13  * Free Software Foundation;  either version 2 of the  License, or (at your
14  * option) any later version.
15  */
16 #ifndef _M68K_MCF_M547X_8X_CACHEFLUSH_H
17 #define _M68K_MCF_M547X_8X_CACHEFLUSH_H
18
19 /*
20  * Cache handling functions
21  */
22
23 #define flush_icache()                                          \
24 ({                                                              \
25   unsigned long set;                                            \
26   unsigned long start_set;                                      \
27   unsigned long end_set;                                        \
28                                                                 \
29   start_set = 0;                                                \
30   end_set = (unsigned long)LAST_DCACHE_ADDR;                    \
31                                                                 \
32   for (set = start_set; set <= end_set; set += (0x10 - 3)) {    \
33     asm volatile("cpushl %%ic,(%0)\n"                           \
34                  "\taddq%.l #1,%0\n"                            \
35                  "\tcpushl %%ic,(%0)\n"                         \
36                  "\taddq%.l #1,%0\n"                            \
37                  "\tcpushl %%ic,(%0)\n"                         \
38                  "\taddq%.l #1,%0\n"                            \
39                  "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));                \
40   }                                                             \
41 })
42
43 #define flush_dcache()                                          \
44 ({                                                              \
45   unsigned long set;                                            \
46   unsigned long start_set;                                      \
47   unsigned long end_set;                                        \
48                                                                 \
49   start_set = 0;                                                \
50   end_set = (unsigned long)LAST_DCACHE_ADDR;                    \
51                                                                 \
52   for (set = start_set; set <= end_set; set += (0x10 - 3)) {    \
53     asm volatile("cpushl %%dc,(%0)\n"                           \
54                  "\taddq%.l #1,%0\n"                            \
55                  "\tcpushl %%dc,(%0)\n"                         \
56                  "\taddq%.l #1,%0\n"                            \
57                  "\tcpushl %%dc,(%0)\n"                         \
58                  "\taddq%.l #1,%0\n"                            \
59                  "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set));                \
60   }                                                             \
61 })
62
63 #define flush_bcache()                                          \
64 ({                                                              \
65   unsigned long set;                                            \
66   unsigned long start_set;                                      \
67   unsigned long end_set;                                        \
68                                                                 \
69   start_set = 0;                                                \
70   end_set = (unsigned long)LAST_DCACHE_ADDR;                    \
71                                                                 \
72   for (set = start_set; set <= end_set; set += (0x10 - 3)) {    \
73     asm volatile("cpushl %%bc,(%0)\n"                           \
74                  "\taddq%.l #1,%0\n"                            \
75                  "\tcpushl %%bc,(%0)\n"                         \
76                  "\taddq%.l #1,%0\n"                            \
77                  "\tcpushl %%bc,(%0)\n"                         \
78                  "\taddq%.l #1,%0\n"                            \
79                  "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));                \
80   }                                                             \
81 })
82
83 /*
84  * invalidate the cache for the specified memory range.
85  * It starts at the physical address specified for
86  * the given number of bytes.
87  */
88 extern void cache_clear(unsigned long paddr, int len);
89 /*
90  * push any dirty cache in the specified memory range.
91  * It starts at the physical address specified for
92  * the given number of bytes.
93  */
94 extern void cache_push(unsigned long paddr, int len);
95
96 /*
97  * push and invalidate pages in the specified user virtual
98  * memory range.
99  */
100 extern void cache_push_v(unsigned long vaddr, int len);
101
102 /* This is needed whenever the virtual mapping of the current
103    process changes.  */
104
105 /**
106  * flush_cache_mm - Flush an mm_struct
107  * @mm: mm_struct to flush
108  */
109 static inline void flush_cache_mm(struct mm_struct *mm)
110 {
111         if (mm == current->mm)
112                 flush_bcache();
113 }
114
115 #define flush_cache_dup_mm(mm)  flush_cache_mm(mm)
116
117 #define flush_cache_all()               flush_bcache()
118
119 /**
120  * flush_cache_range - Flush a cache range
121  * @vma: vma struct
122  * @start: Starting address
123  * @end: Ending address
124  *
125  * flush_cache_range must be a macro to avoid a dependency on
126  * linux/mm.h which includes this file.
127  */
128 static inline void flush_cache_range(struct vm_area_struct *vma,
129         unsigned long start, unsigned long end)
130 {
131         if (vma->vm_mm == current->mm)
132                 flush_bcache();
133 //              cf_cache_flush_range(start, end);
134 }
135
136 /**
137  * flush_cache_page - Flush a page of the cache
138  * @vma: vma struct
139  * @vmaddr:
140  * @pfn: page numer
141  *
142  * flush_cache_page must be a macro to avoid a dependency on
143  * linux/mm.h which includes this file.
144  */
145 static inline void flush_cache_page(struct vm_area_struct *vma,
146         unsigned long vmaddr, unsigned long pfn)
147 {
148         if (vma->vm_mm == current->mm)
149                 flush_bcache();
150 //              cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
151 }
152
153 /* Push the page at kernel virtual address and clear the icache */
154 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
155 #define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
156 extern inline void __flush_page_to_ram(void *address)
157 {
158   unsigned long set;
159   unsigned long start_set;
160   unsigned long end_set;
161   unsigned long addr = (unsigned long) address;
162
163   addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
164
165   start_set = addr & _ICACHE_SET_MASK;
166   end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
167
168   if (start_set > end_set) {
169     /* from the begining to the lowest address */
170     for (set = 0; set <= end_set; set += (0x10 - 3)) {
171       asm volatile("cpushl %%bc,(%0)\n"
172                    "\taddq%.l #1,%0\n"
173                    "\tcpushl %%bc,(%0)\n"
174                    "\taddq%.l #1,%0\n"
175                    "\tcpushl %%bc,(%0)\n"
176                    "\taddq%.l #1,%0\n"
177                    "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
178     }
179     /* next loop will finish the cache ie pass the hole */
180     end_set = LAST_ICACHE_ADDR;    
181   }
182   for (set = start_set; set <= end_set; set += (0x10 - 3)) {
183     asm volatile("cpushl %%bc,(%0)\n"
184                  "\taddq%.l #1,%0\n"
185                  "\tcpushl %%bc,(%0)\n"
186                  "\taddq%.l #1,%0\n"
187                  "\tcpushl %%bc,(%0)\n"
188                  "\taddq%.l #1,%0\n"
189                  "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
190   }
191 }
192
193 /* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
194 #define flush_dcache_page(page)                 \
195         __flush_page_to_ram((void *) page_address(page))
196 #define flush_icache_page(vma,pg)               \
197         __flush_page_to_ram((void *) page_address(pg))
198 #define flush_icache_user_range(adr,len)        do { } while (0)
199 /* NL */
200 #define flush_icache_user_page(vma,page,addr,len)       do { } while (0)
201
202 /* Push n pages at kernel virtual address and clear the icache */
203 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
204 extern inline void flush_icache_range (unsigned long address,
205                                        unsigned long endaddr)
206 {
207   unsigned long set;
208   unsigned long start_set;
209   unsigned long end_set;
210
211   start_set = address & _ICACHE_SET_MASK;
212   end_set = endaddr & _ICACHE_SET_MASK;
213
214   if (start_set > end_set) {
215     /* from the begining to the lowest address */
216     for (set = 0; set <= end_set; set += (0x10 - 3)) {
217       asm volatile("cpushl %%ic,(%0)\n"
218                    "\taddq%.l #1,%0\n"
219                    "\tcpushl %%ic,(%0)\n"
220                    "\taddq%.l #1,%0\n"
221                    "\tcpushl %%ic,(%0)\n"
222                    "\taddq%.l #1,%0\n"
223                    "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
224     }
225     /* next loop will finish the cache ie pass the hole */
226     end_set = LAST_ICACHE_ADDR;    
227   }
228   for (set = start_set; set <= end_set; set += (0x10 - 3)) {
229     asm volatile("cpushl %%ic,(%0)\n"
230                  "\taddq%.l #1,%0\n"
231                  "\tcpushl %%ic,(%0)\n"
232                  "\taddq%.l #1,%0\n"
233                  "\tcpushl %%ic,(%0)\n"
234                  "\taddq%.l #1,%0\n"
235                  "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
236   }
237 }
238
239 static inline void copy_to_user_page(struct vm_area_struct *vma,
240                                      struct page *page, unsigned long vaddr,
241                                      void *dst, void *src, int len)
242 {
243         memcpy(dst, src, len);
244         flush_icache_user_page(vma, page, vaddr, len);
245 }
246 static inline void copy_from_user_page(struct vm_area_struct *vma,
247                                        struct page *page, unsigned long vaddr,
248                                        void *dst, void *src, int len)
249 {
250         memcpy(dst, src, len);
251 }
252
253 #define flush_cache_vmap(start, end)            flush_cache_all()
254 #define flush_cache_vunmap(start, end)          flush_cache_all()
255 #define flush_dcache_mmap_lock(mapping)         do { } while (0)
256 #define flush_dcache_mmap_unlock(mapping)       do { } while (0)
257
258 #endif /* _M68K_CACHEFLUSH_CODLFIRE_M547X_8X_H */