]> rtime.felk.cvut.cz Git - mcf548x/linux.git/blob - arch/m68k/include/asm/mcf_5445x_cacheflush.h
Headers cleanup
[mcf548x/linux.git] / arch / m68k / include / asm / mcf_5445x_cacheflush.h
1 /*
2  * arch/m68k/include/asm/mcf_5445x_cacheflush.h - Coldfire 5445x Cache
3  *
4  * Based on arch/m68k/include/asm/cacheflush.h
5  *
6  * Coldfire pieces by:
7  *   Kurt Mahan kmahan@freescale.com
8  *
9  * Copyright Freescale Semiconductor, Inc. 2007, 2008
10  *
11  * This program is free software; you can redistribute  it and/or modify it
12  * under  the terms of  the GNU General  Public License as published by the
13  * Free Software Foundation;  either version 2 of the  License, or (at your
14  * option) any later version.
15  */
16 #ifndef M68K_CF_5445x_CACHEFLUSH_H
17 #define M68K_CF_5445x_CACHEFLUSH_H
18
19 #include <asm/mcfcache.h>
20
21 /*
22  * Coldfire Cache Model
23  *
24  * The Coldfire processors use a Harvard architecture cache configured
25  * as four-way set associative.  The cache does not implement bus snooping
26  * so cache coherency with other masters must be maintained in software.
27  *
28  * The cache is managed via the CPUSHL instruction in conjunction with
29  * bits set in the CACR (cache control register).  Currently the code
30  * uses the CPUSHL enhancement which adds the ability to
31  * invalidate/clear/push a cacheline by physical address.  This feature
32  * is designated in the Hardware Configuration Register [D1-CPES].
33  *
34  * CACR Bits:
35  *      DPI[28]         cpushl invalidate disable for d-cache
36  *      IDPI[12]        cpushl invalidate disable for i-cache
37  *      SPA[14]         cpushl search by physical address
38  *      IVO[20]         cpushl invalidate only
39  *
40  * Random Terminology:
41  *  * invalidate = reset the cache line's valid bit
42  *  * push = generate a line-sized store of the data if its contents are marked
43  *           as modifed (the modified flag is cleared after the store)
44  *  * clear = push + invalidate
45  */
46
47 /**
48  * flush_icache - Flush all of the instruction cache
49  */
50 static inline void flush_icache(void)
51 {
52         asm volatile("nop\n"
53                      "moveq%.l  #0,%%d0\n"
54                      "moveq%.l  #0,%%d1\n"
55                      "move%.l   %%d0,%%a0\n"
56                      "1:\n"
57                      "cpushl    %%ic,(%%a0)\n"
58                      "add%.l    #0x0010,%%a0\n"
59                      "addq%.l   #1,%%d1\n"
60                      "cmpi%.l   %0,%%d1\n"
61                      "bne       1b\n"
62                      "moveq%.l  #0,%%d1\n"
63                      "addq%.l   #1,%%d0\n"
64                      "move%.l   %%d0,%%a0\n"
65                      "cmpi%.l   #4,%%d0\n"
66                      "bne       1b\n"
67                      : : "i" (CACHE_SETS)
68                      : "a0", "d0", "d1");
69 }
70
71 /**
72  * flush_dcache - Flush all of the data cache
73  */
74 static inline void flush_dcache(void)
75 {
76         asm volatile("nop\n"
77                      "moveq%.l  #0,%%d0\n"
78                      "moveq%.l  #0,%%d1\n"
79                      "move%.l   %%d0,%%a0\n"
80                      "1:\n"
81                      "cpushl    %%dc,(%%a0)\n"
82                      "add%.l    #0x0010,%%a0\n"
83                      "addq%.l   #1,%%d1\n"
84                      "cmpi%.l   %0,%%d1\n"
85                      "bne       1b\n"
86                      "moveq%.l  #0,%%d1\n"
87                      "addq%.l   #1,%%d0\n"
88                      "move%.l   %%d0,%%a0\n"
89                      "cmpi%.l   #4,%%d0\n"
90                      "bne       1b\n"
91                      : : "i" (CACHE_SETS)
92                      : "a0", "d0", "d1");
93 }
94
95 /**
96  * flush_bcache - Flush all of both caches
97  */
98 static inline void flush_bcache(void)
99 {
100         asm volatile("nop\n"
101                      "moveq%.l  #0,%%d0\n"
102                      "moveq%.l  #0,%%d1\n"
103                      "move%.l   %%d0,%%a0\n"
104                      "1:\n"
105                      "cpushl    %%bc,(%%a0)\n"
106                      "add%.l    #0x0010,%%a0\n"
107                      "addq%.l   #1,%%d1\n"
108                      "cmpi%.l   %0,%%d1\n"
109                      "bne       1b\n"
110                      "moveq%.l  #0,%%d1\n"
111                      "addq%.l   #1,%%d0\n"
112                      "move%.l   %%d0,%%a0\n"
113                      "cmpi%.l   #4,%%d0\n"
114                      "bne       1b\n"
115                      : : "i" (CACHE_SETS)
116                      : "a0", "d0", "d1");
117 }
118
119 /**
120  * cf_cache_clear - invalidate cache
121  * @paddr: starting physical address
122  * @len: number of bytes
123  *
124  * Invalidate cache lines starting at paddr for len bytes.
125  * Those lines are not pushed.
126  */
127 static inline void cf_cache_clear(unsigned long paddr, int len)
128 {
129         /* number of lines */
130         len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
131         if (len == 0)
132                 return;
133
134         /* align on set boundary */
135         paddr &= 0xfffffff0;
136
137         asm volatile("nop\n"
138                      "move%.l   %2,%%d0\n"
139                      "or%.l     %3,%%d0\n"
140                      "movec     %%d0,%%cacr\n"
141                      "move%.l   %0,%%a0\n"
142                      "move%.l   %1,%%d0\n"
143                      "1:\n"
144                      "cpushl    %%bc,(%%a0)\n"
145                      "lea       0x10(%%a0),%%a0\n"
146                      "subq%.l   #1,%%d0\n"
147                      "bne%.b    1b\n"
148                      "movec     %2,%%cacr\n"
149                      : : "a" (paddr), "r" (len),
150                          "r" (shadow_cacr),
151                          "i" (CF_CACR_SPA+CF_CACR_IVO)
152                      : "a0", "d0");
153 }
154
155 /**
156  * cf_cache_push - Push dirty cache out with no invalidate
157  * @paddr: starting physical address
158  * @len: number of bytes
159  *
160  * Push the any dirty lines starting at paddr for len bytes.
161  * Those lines are not invalidated.
162  */
163 static inline void cf_cache_push(unsigned long paddr, int len)
164 {
165         /* number of lines */
166         len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
167         if (len == 0)
168                 return;
169
170         /* align on set boundary */
171         paddr &= 0xfffffff0;
172
173         asm volatile("nop\n"
174                      "move%.l   %2,%%d0\n"
175                      "or%.l     %3,%%d0\n"
176                      "movec     %%d0,%%cacr\n"
177                      "move%.l   %0,%%a0\n"
178                      "move%.l   %1,%%d0\n"
179                      "1:\n"
180                      "cpushl    %%bc,(%%a0)\n"
181                      "lea       0x10(%%a0),%%a0\n"
182                      "subq%.l   #1,%%d0\n"
183                      "bne.b     1b\n"
184                      "movec     %2,%%cacr\n"
185                      : : "a" (paddr), "r" (len),
186                          "r" (shadow_cacr),
187                          "i" (CF_CACR_SPA+CF_CACR_DPI+CF_CACR_IDPI)
188                      : "a0", "d0");
189 }
190
191 /**
192  * cf_cache_flush - Push dirty cache out and invalidate
193  * @paddr: starting physical address
194  * @len: number of bytes
195  *
196  * Push the any dirty lines starting at paddr for len bytes and
197  * invalidate those lines.
198  */
199 static inline void cf_cache_flush(unsigned long paddr, int len)
200 {
201         /* number of lines */
202         len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
203         if (len == 0)
204                 return;
205
206         /* align on set boundary */
207         paddr &= 0xfffffff0;
208
209         asm volatile("nop\n"
210                      "move%.l   %2,%%d0\n"
211                      "or%.l     %3,%%d0\n"
212                      "movec     %%d0,%%cacr\n"
213                      "move%.l   %0,%%a0\n"
214                      "move%.l   %1,%%d0\n"
215                      "1:\n"
216                      "cpushl    %%bc,(%%a0)\n"
217                      "lea       0x10(%%a0),%%a0\n"
218                      "subq%.l   #1,%%d0\n"
219                      "bne.b     1b\n"
220                      "movec     %2,%%cacr\n"
221                      : : "a" (paddr), "r" (len),
222                          "r" (shadow_cacr),
223                          "i" (CF_CACR_SPA)
224                      : "a0", "d0");
225 }
226
227 /**
228  * cf_cache_flush_range - Push dirty data/inst cache in range out and invalidate
229  * @vstart - starting virtual address
230  * @vend: ending virtual address
231  *
232  * Push the any dirty data/instr lines starting at paddr for len bytes and
233  * invalidate those lines.
234  */
235 static inline void cf_cache_flush_range(unsigned long vstart, unsigned long vend)
236 {
237         int len;
238
239         /* align on set boundary */
240         vstart &= 0xfffffff0;
241         vend = PAGE_ALIGN((vend + (CACHE_LINE_SIZE-1))) & 0xfffffff0;
242         len = vend - vstart;
243         if (len == 0)
244                 return;
245         vstart = __pa(vstart);
246         vend = vstart + len;
247
248         asm volatile("nop\n"
249                      "move%.l   %2,%%d0\n"
250                      "or%.l     %3,%%d0\n"
251                      "movec     %%d0,%%cacr\n"
252                      "move%.l   %0,%%a0\n"
253                      "move%.l   %1,%%a1\n"
254                      "1:\n"
255                      "cpushl    %%bc,(%%a0)\n"
256                      "lea       0x10(%%a0),%%a0\n"
257                      "cmpa%.l   %%a0,%%a1\n"
258                      "bne.b     1b\n"
259                      "movec     %2,%%cacr\n"
260                      : /* no return */
261                      : "a" (vstart), "a" (vend),
262                        "r" (shadow_cacr),
263                        "i" (CF_CACR_SPA)
264                      : "a0", "a1", "d0");
265 }
266
267 /**
268  * cf_dcache_flush_range - Push dirty data cache in range out and invalidate
269  * @vstart - starting virtual address
270  * @vend: ending virtual address
271  *
272  * Push the any dirty data lines starting at paddr for len bytes and
273  * invalidate those lines.
274  */
275 static inline void cf_dcache_flush_range(unsigned long vstart, unsigned long vend)
276 {
277         /* align on set boundary */
278         vstart &= 0xfffffff0;
279         vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
280
281         asm volatile("nop\n"
282                      "move%.l   %2,%%d0\n"
283                      "or%.l     %3,%%d0\n"
284                      "movec     %%d0,%%cacr\n"
285                      "move%.l   %0,%%a0\n"
286                      "move%.l   %1,%%a1\n"
287                      "1:\n"
288                      "cpushl    %%dc,(%%a0)\n"
289                      "lea       0x10(%%a0),%%a0\n"
290                      "cmpa%.l   %%a0,%%a1\n"
291                      "bne.b     1b\n"
292                      "movec     %2,%%cacr\n"
293                      : /* no return */
294                      : "a" (__pa(vstart)), "a" (__pa(vend)),
295                        "r" (shadow_cacr),
296                        "i" (CF_CACR_SPA)
297                      : "a0", "a1", "d0");
298 }
299
300 /**
301  * cf_icache_flush_range - Push dirty inst cache in range out and invalidate
302  * @vstart - starting virtual address
303  * @vend: ending virtual address
304  *
305  * Push the any dirty instr lines starting at paddr for len bytes and
306  * invalidate those lines.  This should just be an invalidate since you
307  * shouldn't be able to have dirty instruction cache.
308  */
309 static inline void cf_icache_flush_range(unsigned long vstart, unsigned long vend)
310 {
311         /* align on set boundary */
312         vstart &= 0xfffffff0;
313         vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
314
315         asm volatile("nop\n"
316                      "move%.l   %2,%%d0\n"
317                      "or%.l     %3,%%d0\n"
318                      "movec     %%d0,%%cacr\n"
319                      "move%.l   %0,%%a0\n"
320                      "move%.l   %1,%%a1\n"
321                      "1:\n"
322                      "cpushl    %%ic,(%%a0)\n"
323                      "lea       0x10(%%a0),%%a0\n"
324                      "cmpa%.l   %%a0,%%a1\n"
325                      "bne.b     1b\n"
326                      "movec     %2,%%cacr\n"
327                      : /* no return */
328                      : "a" (__pa(vstart)), "a" (__pa(vend)),
329                        "r" (shadow_cacr),
330                        "i" (CF_CACR_SPA)
331                      : "a0", "a1", "d0");
332 }
333
334 /**
335  * flush_cache_mm - Flush an mm_struct
336  * @mm: mm_struct to flush
337  */
338 static inline void flush_cache_mm(struct mm_struct *mm)
339 {
340         if (mm == current->mm)
341                 flush_bcache();
342 }
343
344 #define flush_cache_dup_mm(mm)  flush_cache_mm(mm)
345
346 /**
347  * flush_cache_range - Flush a cache range
348  * @vma: vma struct
349  * @start: Starting address
350  * @end: Ending address
351  *
352  * flush_cache_range must be a macro to avoid a dependency on
353  * linux/mm.h which includes this file.
354  */
355 static inline void flush_cache_range(struct vm_area_struct *vma,
356         unsigned long start, unsigned long end)
357 {
358         if (vma->vm_mm == current->mm)
359                 cf_cache_flush_range(start, end);
360 }
361
362 /**
363  * flush_cache_page - Flush a page of the cache
364  * @vma: vma struct
365  * @vmaddr:
366  * @pfn: page numer
367  *
368  * flush_cache_page must be a macro to avoid a dependency on
369  * linux/mm.h which includes this file.
370  */
371 static inline void flush_cache_page(struct vm_area_struct *vma,
372         unsigned long vmaddr, unsigned long pfn)
373 {
374         if (vma->vm_mm == current->mm)
375                 cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
376 }
377
378 /**
379  * __flush_page_to_ram - Push a page out of the cache
380  * @vaddr: Virtual address at start of page
381  *
382  * Push the page at kernel virtual address *vaddr* and clear
383  * the icache.
384  */
385 static inline void __flush_page_to_ram(void *vaddr)
386 {
387         asm volatile("nop\n"
388                      "move%.l   %2,%%d0\n"
389                      "or%.l     %3,%%d0\n"
390                      "movec     %%d0,%%cacr\n"
391                      "move%.l   %0,%%d0\n"
392                      "and%.l    #0xfffffff0,%%d0\n"
393                      "move%.l   %%d0,%%a0\n"
394                      "move%.l   %1,%%d0\n"
395                      "1:\n"
396                      "cpushl    %%bc,(%%a0)\n"
397                      "lea       0x10(%%a0),%%a0\n"
398                      "subq%.l   #1,%%d0\n"
399                      "bne.b     1b\n"
400                      "movec     %2,%%cacr\n"
401                      : : "a" (__pa(vaddr)), "i" (PAGE_SIZE / CACHE_LINE_SIZE),
402                          "r" (shadow_cacr), "i" (CF_CACR_SPA)
403                      : "a0", "d0");
404 }
405
406 /*
407  * Various defines for the kernel.
408  */
409
410 extern void cache_clear(unsigned long paddr, int len);
411 extern void cache_push(unsigned long paddr, int len);
412 extern void flush_icache_range(unsigned long address, unsigned long endaddr);
413
414 #define flush_cache_all()                       flush_bcache()
415 #define flush_cache_vmap(start, end)            flush_bcache()
416 #define flush_cache_vunmap(start, end)          flush_bcache()
417
418 #define flush_dcache_range(vstart, vend)        cf_dcache_flush_range(vstart, vend)
419 #define flush_dcache_page(page)                 __flush_page_to_ram(page_address(page))
420 #define flush_dcache_mmap_lock(mapping)         do { } while (0)
421 #define flush_dcache_mmap_unlock(mapping)       do { } while (0)
422
423 #define flush_icache_page(vma, page)            __flush_page_to_ram(page_address(page))
424
425 /**
426  * copy_to_user_page - Copy memory to user page
427  */
428 static inline void copy_to_user_page(struct vm_area_struct *vma,
429                                      struct page *page, unsigned long vaddr,
430                                      void *dst, void *src, int len)
431 {
432         memcpy(dst, src, len);
433         cf_cache_flush(page_to_phys(page), PAGE_SIZE);
434 }
435
436 /**
437  * copy_from_user_page - Copy memory from user page
438  */
439 static inline void copy_from_user_page(struct vm_area_struct *vma,
440                                        struct page *page, unsigned long vaddr,
441                                        void *dst, void *src, int len)
442 {
443         cf_cache_flush(page_to_phys(page), PAGE_SIZE);
444         memcpy(dst, src, len);
445 }
446
447 #endif /* M68K_CF_5445x_CACHEFLUSH_H */