]> rtime.felk.cvut.cz Git - linux-imx.git/blob - arch/parisc/kernel/cache.c
Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty...
[linux-imx.git] / arch / parisc / kernel / cache.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13  
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <asm/pdc.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30
31 int split_tlb __read_mostly;
32 int dcache_stride __read_mostly;
33 int icache_stride __read_mostly;
34 EXPORT_SYMBOL(dcache_stride);
35
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39
40
41 /* On some machines (e.g. ones with the Merced bus), there can be
42  * only a single PxTLB broadcast at a time; this must be guaranteed
43  * by software.  We put a spinlock around all TLB flushes  to
44  * ensure this.
45  */
46 DEFINE_SPINLOCK(pa_tlb_lock);
47
48 struct pdc_cache_info cache_info __read_mostly;
49 #ifndef CONFIG_PA20
50 static struct pdc_btlb_info btlb_info __read_mostly;
51 #endif
52
53 #ifdef CONFIG_SMP
54 void
55 flush_data_cache(void)
56 {
57         on_each_cpu(flush_data_cache_local, NULL, 1);
58 }
59 void 
60 flush_instruction_cache(void)
61 {
62         on_each_cpu(flush_instruction_cache_local, NULL, 1);
63 }
64 #endif
65
66 void
67 flush_cache_all_local(void)
68 {
69         flush_instruction_cache_local(NULL);
70         flush_data_cache_local(NULL);
71 }
72 EXPORT_SYMBOL(flush_cache_all_local);
73
74 /* Virtual address of pfn.  */
75 #define pfn_va(pfn)     __va(PFN_PHYS(pfn))
76
77 void
78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
79 {
80         unsigned long pfn = pte_pfn(*ptep);
81         struct page *page;
82
83         /* We don't have pte special.  As a result, we can be called with
84            an invalid pfn and we don't need to flush the kernel dcache page.
85            This occurs with FireGL card in C8000.  */
86         if (!pfn_valid(pfn))
87                 return;
88
89         page = pfn_to_page(pfn);
90         if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91                 flush_kernel_dcache_page_addr(pfn_va(pfn));
92                 clear_bit(PG_dcache_dirty, &page->flags);
93         } else if (parisc_requires_coherency())
94                 flush_kernel_dcache_page_addr(pfn_va(pfn));
95 }
96
97 void
98 show_cache_info(struct seq_file *m)
99 {
100         char buf[32];
101
102         seq_printf(m, "I-cache\t\t: %ld KB\n", 
103                 cache_info.ic_size/1024 );
104         if (cache_info.dc_loop != 1)
105                 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106         seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107                 cache_info.dc_size/1024,
108                 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
109                 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110                 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111         seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112                 cache_info.it_size,
113                 cache_info.dt_size,
114                 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115         );
116                 
117 #ifndef CONFIG_PA20
118         /* BTLB - Block TLB */
119         if (btlb_info.max_size==0) {
120                 seq_printf(m, "BTLB\t\t: not supported\n" );
121         } else {
122                 seq_printf(m, 
123                 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124                 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125                 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126                 btlb_info.max_size, (int)4096,
127                 btlb_info.max_size>>8,
128                 btlb_info.fixed_range_info.num_i,
129                 btlb_info.fixed_range_info.num_d,
130                 btlb_info.fixed_range_info.num_comb, 
131                 btlb_info.variable_range_info.num_i,
132                 btlb_info.variable_range_info.num_d,
133                 btlb_info.variable_range_info.num_comb
134                 );
135         }
136 #endif
137 }
138
139 void __init 
140 parisc_cache_init(void)
141 {
142         if (pdc_cache_info(&cache_info) < 0)
143                 panic("parisc_cache_init: pdc_cache_info failed");
144
145 #if 0
146         printk("ic_size %lx dc_size %lx it_size %lx\n",
147                 cache_info.ic_size,
148                 cache_info.dc_size,
149                 cache_info.it_size);
150
151         printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152                 cache_info.dc_base,
153                 cache_info.dc_stride,
154                 cache_info.dc_count,
155                 cache_info.dc_loop);
156
157         printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
158                 *(unsigned long *) (&cache_info.dc_conf),
159                 cache_info.dc_conf.cc_alias,
160                 cache_info.dc_conf.cc_block,
161                 cache_info.dc_conf.cc_line,
162                 cache_info.dc_conf.cc_shift);
163         printk("        wt %d sh %d cst %d hv %d\n",
164                 cache_info.dc_conf.cc_wt,
165                 cache_info.dc_conf.cc_sh,
166                 cache_info.dc_conf.cc_cst,
167                 cache_info.dc_conf.cc_hv);
168
169         printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170                 cache_info.ic_base,
171                 cache_info.ic_stride,
172                 cache_info.ic_count,
173                 cache_info.ic_loop);
174
175         printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
176                 *(unsigned long *) (&cache_info.ic_conf),
177                 cache_info.ic_conf.cc_alias,
178                 cache_info.ic_conf.cc_block,
179                 cache_info.ic_conf.cc_line,
180                 cache_info.ic_conf.cc_shift);
181         printk("        wt %d sh %d cst %d hv %d\n",
182                 cache_info.ic_conf.cc_wt,
183                 cache_info.ic_conf.cc_sh,
184                 cache_info.ic_conf.cc_cst,
185                 cache_info.ic_conf.cc_hv);
186
187         printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
188                 cache_info.dt_conf.tc_sh,
189                 cache_info.dt_conf.tc_page,
190                 cache_info.dt_conf.tc_cst,
191                 cache_info.dt_conf.tc_aid,
192                 cache_info.dt_conf.tc_pad1);
193
194         printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
195                 cache_info.it_conf.tc_sh,
196                 cache_info.it_conf.tc_page,
197                 cache_info.it_conf.tc_cst,
198                 cache_info.it_conf.tc_aid,
199                 cache_info.it_conf.tc_pad1);
200 #endif
201
202         split_tlb = 0;
203         if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
204                 if (cache_info.dt_conf.tc_sh == 2)
205                         printk(KERN_WARNING "Unexpected TLB configuration. "
206                         "Will flush I/D separately (could be optimized).\n");
207
208                 split_tlb = 1;
209         }
210
211         /* "New and Improved" version from Jim Hull 
212          *      (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
213          * The following CAFL_STRIDE is an optimized version, see
214          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
215          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
216          */
217 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
218         dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
219         icache_stride = CAFL_STRIDE(cache_info.ic_conf);
220 #undef CAFL_STRIDE
221
222 #ifndef CONFIG_PA20
223         if (pdc_btlb_info(&btlb_info) < 0) {
224                 memset(&btlb_info, 0, sizeof btlb_info);
225         }
226 #endif
227
228         if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
229                                                 PDC_MODEL_NVA_UNSUPPORTED) {
230                 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
231 #if 0
232                 panic("SMP kernel required to avoid non-equivalent aliasing");
233 #endif
234         }
235 }
236
237 void disable_sr_hashing(void)
238 {
239         int srhash_type, retval;
240         unsigned long space_bits;
241
242         switch (boot_cpu_data.cpu_type) {
243         case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
244                 BUG();
245                 return;
246
247         case pcxs:
248         case pcxt:
249         case pcxt_:
250                 srhash_type = SRHASH_PCXST;
251                 break;
252
253         case pcxl:
254                 srhash_type = SRHASH_PCXL;
255                 break;
256
257         case pcxl2: /* pcxl2 doesn't support space register hashing */
258                 return;
259
260         default: /* Currently all PA2.0 machines use the same ins. sequence */
261                 srhash_type = SRHASH_PA20;
262                 break;
263         }
264
265         disable_sr_hashing_asm(srhash_type);
266
267         retval = pdc_spaceid_bits(&space_bits);
268         /* If this procedure isn't implemented, don't panic. */
269         if (retval < 0 && retval != PDC_BAD_OPTION)
270                 panic("pdc_spaceid_bits call failed.\n");
271         if (space_bits != 0)
272                 panic("SpaceID hashing is still on!\n");
273 }
274
275 static inline void
276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
277                    unsigned long physaddr)
278 {
279         preempt_disable();
280         flush_dcache_page_asm(physaddr, vmaddr);
281         if (vma->vm_flags & VM_EXEC)
282                 flush_icache_page_asm(physaddr, vmaddr);
283         preempt_enable();
284 }
285
286 void flush_dcache_page(struct page *page)
287 {
288         struct address_space *mapping = page_mapping(page);
289         struct vm_area_struct *mpnt;
290         unsigned long offset;
291         unsigned long addr, old_addr = 0;
292         pgoff_t pgoff;
293
294         if (mapping && !mapping_mapped(mapping)) {
295                 set_bit(PG_dcache_dirty, &page->flags);
296                 return;
297         }
298
299         flush_kernel_dcache_page(page);
300
301         if (!mapping)
302                 return;
303
304         pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
305
306         /* We have carefully arranged in arch_get_unmapped_area() that
307          * *any* mappings of a file are always congruently mapped (whether
308          * declared as MAP_PRIVATE or MAP_SHARED), so we only need
309          * to flush one address here for them all to become coherent */
310
311         flush_dcache_mmap_lock(mapping);
312         vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
313                 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
314                 addr = mpnt->vm_start + offset;
315
316                 /* The TLB is the engine of coherence on parisc: The
317                  * CPU is entitled to speculate any page with a TLB
318                  * mapping, so here we kill the mapping then flush the
319                  * page along a special flush only alias mapping.
320                  * This guarantees that the page is no-longer in the
321                  * cache for any process and nor may it be
322                  * speculatively read in (until the user or kernel
323                  * specifically accesses it, of course) */
324
325                 flush_tlb_page(mpnt, addr);
326                 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
327                         __flush_cache_page(mpnt, addr, page_to_phys(page));
328                         if (old_addr)
329                                 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
330                         old_addr = addr;
331                 }
332         }
333         flush_dcache_mmap_unlock(mapping);
334 }
335 EXPORT_SYMBOL(flush_dcache_page);
336
337 /* Defined in arch/parisc/kernel/pacache.S */
338 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
339 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
340 EXPORT_SYMBOL(flush_data_cache_local);
341 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
342
343 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
344 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
345
346 void __init parisc_setup_cache_timing(void)
347 {
348         unsigned long rangetime, alltime;
349         unsigned long size;
350
351         alltime = mfctl(16);
352         flush_data_cache();
353         alltime = mfctl(16) - alltime;
354
355         size = (unsigned long)(_end - _text);
356         rangetime = mfctl(16);
357         flush_kernel_dcache_range((unsigned long)_text, size);
358         rangetime = mfctl(16) - rangetime;
359
360         printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
361                 alltime, size, rangetime);
362
363         /* Racy, but if we see an intermediate value, it's ok too... */
364         parisc_cache_flush_threshold = size * alltime / rangetime;
365
366         parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); 
367         if (!parisc_cache_flush_threshold)
368                 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
369
370         if (parisc_cache_flush_threshold > cache_info.dc_size)
371                 parisc_cache_flush_threshold = cache_info.dc_size;
372
373         printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
374 }
375
376 extern void purge_kernel_dcache_page_asm(unsigned long);
377 extern void clear_user_page_asm(void *, unsigned long);
378 extern void copy_user_page_asm(void *, void *, unsigned long);
379
380 void flush_kernel_dcache_page_addr(void *addr)
381 {
382         unsigned long flags;
383
384         flush_kernel_dcache_page_asm(addr);
385         purge_tlb_start(flags);
386         pdtlb_kernel(addr);
387         purge_tlb_end(flags);
388 }
389 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
390
391 void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
392 {
393         clear_page_asm(vto);
394         if (!parisc_requires_coherency())
395                 flush_kernel_dcache_page_asm(vto);
396 }
397 EXPORT_SYMBOL(clear_user_page);
398
399 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
400         struct page *pg)
401 {
402         /* Copy using kernel mapping.  No coherency is needed
403            (all in kmap/kunmap) on machines that don't support
404            non-equivalent aliasing.  However, the `from' page
405            needs to be flushed before it can be accessed through
406            the kernel mapping. */
407         preempt_disable();
408         flush_dcache_page_asm(__pa(vfrom), vaddr);
409         preempt_enable();
410         copy_page_asm(vto, vfrom);
411         if (!parisc_requires_coherency())
412                 flush_kernel_dcache_page_asm(vto);
413 }
414 EXPORT_SYMBOL(copy_user_page);
415
416 #ifdef CONFIG_PA8X00
417
418 void kunmap_parisc(void *addr)
419 {
420         if (parisc_requires_coherency())
421                 flush_kernel_dcache_page_addr(addr);
422 }
423 EXPORT_SYMBOL(kunmap_parisc);
424 #endif
425
426 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
427 {
428         unsigned long flags;
429
430         /* Note: purge_tlb_entries can be called at startup with
431            no context.  */
432
433         purge_tlb_start(flags);
434         mtsp(mm->context, 1);
435         pdtlb(addr);
436         pitlb(addr);
437         purge_tlb_end(flags);
438 }
439 EXPORT_SYMBOL(purge_tlb_entries);
440
441 void __flush_tlb_range(unsigned long sid, unsigned long start,
442                        unsigned long end)
443 {
444         unsigned long npages;
445
446         npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
447         if (npages >= 512)  /* 2MB of space: arbitrary, should be tuned */
448                 flush_tlb_all();
449         else {
450                 unsigned long flags;
451
452                 purge_tlb_start(flags);
453                 mtsp(sid, 1);
454                 if (split_tlb) {
455                         while (npages--) {
456                                 pdtlb(start);
457                                 pitlb(start);
458                                 start += PAGE_SIZE;
459                         }
460                 } else {
461                         while (npages--) {
462                                 pdtlb(start);
463                                 start += PAGE_SIZE;
464                         }
465                 }
466                 purge_tlb_end(flags);
467         }
468 }
469
470 static void cacheflush_h_tmp_function(void *dummy)
471 {
472         flush_cache_all_local();
473 }
474
475 void flush_cache_all(void)
476 {
477         on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
478 }
479
480 static inline unsigned long mm_total_size(struct mm_struct *mm)
481 {
482         struct vm_area_struct *vma;
483         unsigned long usize = 0;
484
485         for (vma = mm->mmap; vma; vma = vma->vm_next)
486                 usize += vma->vm_end - vma->vm_start;
487         return usize;
488 }
489
490 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
491 {
492         pte_t *ptep = NULL;
493
494         if (!pgd_none(*pgd)) {
495                 pud_t *pud = pud_offset(pgd, addr);
496                 if (!pud_none(*pud)) {
497                         pmd_t *pmd = pmd_offset(pud, addr);
498                         if (!pmd_none(*pmd))
499                                 ptep = pte_offset_map(pmd, addr);
500                 }
501         }
502         return ptep;
503 }
504
505 void flush_cache_mm(struct mm_struct *mm)
506 {
507         struct vm_area_struct *vma;
508         pgd_t *pgd;
509
510         /* Flushing the whole cache on each cpu takes forever on
511            rp3440, etc.  So, avoid it if the mm isn't too big.  */
512         if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
513                 flush_cache_all();
514                 return;
515         }
516
517         if (mm->context == mfsp(3)) {
518                 for (vma = mm->mmap; vma; vma = vma->vm_next) {
519                         flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
520                         if ((vma->vm_flags & VM_EXEC) == 0)
521                                 continue;
522                         flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
523                 }
524                 return;
525         }
526
527         pgd = mm->pgd;
528         for (vma = mm->mmap; vma; vma = vma->vm_next) {
529                 unsigned long addr;
530
531                 for (addr = vma->vm_start; addr < vma->vm_end;
532                      addr += PAGE_SIZE) {
533                         unsigned long pfn;
534                         pte_t *ptep = get_ptep(pgd, addr);
535                         if (!ptep)
536                                 continue;
537                         pfn = pte_pfn(*ptep);
538                         if (!pfn_valid(pfn))
539                                 continue;
540                         __flush_cache_page(vma, addr, PFN_PHYS(pfn));
541                 }
542         }
543 }
544
545 void
546 flush_user_dcache_range(unsigned long start, unsigned long end)
547 {
548         if ((end - start) < parisc_cache_flush_threshold)
549                 flush_user_dcache_range_asm(start,end);
550         else
551                 flush_data_cache();
552 }
553
554 void
555 flush_user_icache_range(unsigned long start, unsigned long end)
556 {
557         if ((end - start) < parisc_cache_flush_threshold)
558                 flush_user_icache_range_asm(start,end);
559         else
560                 flush_instruction_cache();
561 }
562
563 void flush_cache_range(struct vm_area_struct *vma,
564                 unsigned long start, unsigned long end)
565 {
566         unsigned long addr;
567         pgd_t *pgd;
568
569         BUG_ON(!vma->vm_mm->context);
570
571         if ((end - start) >= parisc_cache_flush_threshold) {
572                 flush_cache_all();
573                 return;
574         }
575
576         if (vma->vm_mm->context == mfsp(3)) {
577                 flush_user_dcache_range_asm(start, end);
578                 if (vma->vm_flags & VM_EXEC)
579                         flush_user_icache_range_asm(start, end);
580                 return;
581         }
582
583         pgd = vma->vm_mm->pgd;
584         for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
585                 unsigned long pfn;
586                 pte_t *ptep = get_ptep(pgd, addr);
587                 if (!ptep)
588                         continue;
589                 pfn = pte_pfn(*ptep);
590                 if (pfn_valid(pfn))
591                         __flush_cache_page(vma, addr, PFN_PHYS(pfn));
592         }
593 }
594
595 void
596 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
597 {
598         BUG_ON(!vma->vm_mm->context);
599
600         if (pfn_valid(pfn)) {
601                 flush_tlb_page(vma, vmaddr);
602                 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603         }
604 }
605
606 #ifdef CONFIG_PARISC_TMPALIAS
607
608 void clear_user_highpage(struct page *page, unsigned long vaddr)
609 {
610         void *vto;
611         unsigned long flags;
612
613         /* Clear using TMPALIAS region.  The page doesn't need to
614            be flushed but the kernel mapping needs to be purged.  */
615
616         vto = kmap_atomic(page);
617
618         /* The PA-RISC 2.0 Architecture book states on page F-6:
619            "Before a write-capable translation is enabled, *all*
620            non-equivalently-aliased translations must be removed
621            from the page table and purged from the TLB.  (Note
622            that the caches are not required to be flushed at this
623            time.)  Before any non-equivalent aliased translation
624            is re-enabled, the virtual address range for the writeable
625            page (the entire page) must be flushed from the cache,
626            and the write-capable translation removed from the page
627            table and purged from the TLB."  */
628
629         purge_kernel_dcache_page_asm((unsigned long)vto);
630         purge_tlb_start(flags);
631         pdtlb_kernel(vto);
632         purge_tlb_end(flags);
633         preempt_disable();
634         clear_user_page_asm(vto, vaddr);
635         preempt_enable();
636
637         pagefault_enable();             /* kunmap_atomic(addr, KM_USER0); */
638 }
639
640 void copy_user_highpage(struct page *to, struct page *from,
641         unsigned long vaddr, struct vm_area_struct *vma)
642 {
643         void *vfrom, *vto;
644         unsigned long flags;
645
646         /* Copy using TMPALIAS region.  This has the advantage
647            that the `from' page doesn't need to be flushed.  However,
648            the `to' page must be flushed in copy_user_page_asm since
649            it can be used to bring in executable code.  */
650
651         vfrom = kmap_atomic(from);
652         vto = kmap_atomic(to);
653
654         purge_kernel_dcache_page_asm((unsigned long)vto);
655         purge_tlb_start(flags);
656         pdtlb_kernel(vto);
657         pdtlb_kernel(vfrom);
658         purge_tlb_end(flags);
659         preempt_disable();
660         copy_user_page_asm(vto, vfrom, vaddr);
661         flush_dcache_page_asm(__pa(vto), vaddr);
662         preempt_enable();
663
664         pagefault_enable();             /* kunmap_atomic(addr, KM_USER1); */
665         pagefault_enable();             /* kunmap_atomic(addr, KM_USER0); */
666 }
667
668 #endif /* CONFIG_PARISC_TMPALIAS */