]> rtime.felk.cvut.cz Git - mcf548x/linux.git/blob - arch/m68k/kernel/sys_m68k.c
Current (FEC from 2.6.31 port, no CAN, no I2C, no PCI)
[mcf548x/linux.git] / arch / m68k / kernel / sys_m68k.c
1 /*
2  * linux/arch/m68k/kernel/sys_m68k.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/m68k
6  * platform.
7  */
8
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/smp.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/syscalls.h>
20 #include <linux/mman.h>
21 #include <linux/file.h>
22 #include <linux/ipc.h>
23
24 #include <asm/setup.h>
25 #include <asm/uaccess.h>
26 #include <asm/cachectl.h>
27 #include <asm/traps.h>
28 #include <asm/page.h>
29 #include <asm/unistd.h>
30 #include <linux/elf.h>
31 #include <asm/tlb.h>
32
33 #ifdef CONFIG_COLDFIRE
34 #include <asm/cacheflush.h> 
35 #endif 
36
37 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
38                              unsigned long error_code);
39
40 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
41         unsigned long prot, unsigned long flags,
42         unsigned long fd, unsigned long pgoff)
43 {
44         /*
45          * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
46          * so we need to shift the argument down by 1; m68k mmap64(3)
47          * (in libc) expects the last argument of mmap2 in 4Kb units.
48          */
49         return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
50 }
51
52 /* Convert virtual (user) address VADDR to physical address PADDR */
53 #ifndef CONFIG_COLDFIRE 
54 #define virt_to_phys_040(vaddr)                                         \
55 ({                                                                      \
56   unsigned long _mmusr, _paddr;                                         \
57                                                                         \
58   __asm__ __volatile__ (".chip 68040\n\t"                               \
59                         "ptestr (%1)\n\t"                               \
60                         "movec %%mmusr,%0\n\t"                          \
61                         ".chip 68k"                                     \
62                         : "=r" (_mmusr)                                 \
63                         : "a" (vaddr));                                 \
64   _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;             \
65   _paddr;                                                               \
66 })
67
68 static inline int
69 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
70 {
71   unsigned long paddr, i;
72
73   switch (scope)
74     {
75     case FLUSH_SCOPE_ALL:
76       switch (cache)
77         {
78         case FLUSH_CACHE_DATA:
79           /* This nop is needed for some broken versions of the 68040.  */
80           __asm__ __volatile__ ("nop\n\t"
81                                 ".chip 68040\n\t"
82                                 "cpusha %dc\n\t"
83                                 ".chip 68k");
84           break;
85         case FLUSH_CACHE_INSN:
86           __asm__ __volatile__ ("nop\n\t"
87                                 ".chip 68040\n\t"
88                                 "cpusha %ic\n\t"
89                                 ".chip 68k");
90           break;
91         default:
92         case FLUSH_CACHE_BOTH:
93           __asm__ __volatile__ ("nop\n\t"
94                                 ".chip 68040\n\t"
95                                 "cpusha %bc\n\t"
96                                 ".chip 68k");
97           break;
98         }
99       break;
100
101     case FLUSH_SCOPE_LINE:
102       /* Find the physical address of the first mapped page in the
103          address range.  */
104       if ((paddr = virt_to_phys_040(addr))) {
105         paddr += addr & ~(PAGE_MASK | 15);
106         len = (len + (addr & 15) + 15) >> 4;
107       } else {
108         unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
109
110         if (len <= tmp)
111           return 0;
112         addr += tmp;
113         len -= tmp;
114         tmp = PAGE_SIZE;
115         for (;;)
116           {
117             if ((paddr = virt_to_phys_040(addr)))
118               break;
119             if (len <= tmp)
120               return 0;
121             addr += tmp;
122             len -= tmp;
123           }
124         len = (len + 15) >> 4;
125       }
126       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
127       while (len--)
128         {
129           switch (cache)
130             {
131             case FLUSH_CACHE_DATA:
132               __asm__ __volatile__ ("nop\n\t"
133                                     ".chip 68040\n\t"
134                                     "cpushl %%dc,(%0)\n\t"
135                                     ".chip 68k"
136                                     : : "a" (paddr));
137               break;
138             case FLUSH_CACHE_INSN:
139               __asm__ __volatile__ ("nop\n\t"
140                                     ".chip 68040\n\t"
141                                     "cpushl %%ic,(%0)\n\t"
142                                     ".chip 68k"
143                                     : : "a" (paddr));
144               break;
145             default:
146             case FLUSH_CACHE_BOTH:
147               __asm__ __volatile__ ("nop\n\t"
148                                     ".chip 68040\n\t"
149                                     "cpushl %%bc,(%0)\n\t"
150                                     ".chip 68k"
151                                     : : "a" (paddr));
152               break;
153             }
154           if (!--i && len)
155             {
156               /*
157                * No need to page align here since it is done by
158                * virt_to_phys_040().
159                */
160               addr += PAGE_SIZE;
161               i = PAGE_SIZE / 16;
162               /* Recompute physical address when crossing a page
163                  boundary. */
164               for (;;)
165                 {
166                   if ((paddr = virt_to_phys_040(addr)))
167                     break;
168                   if (len <= i)
169                     return 0;
170                   len -= i;
171                   addr += PAGE_SIZE;
172                 }
173             }
174           else
175             paddr += 16;
176         }
177       break;
178
179     default:
180     case FLUSH_SCOPE_PAGE:
181       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
182       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
183         {
184           if (!(paddr = virt_to_phys_040(addr)))
185             continue;
186           switch (cache)
187             {
188             case FLUSH_CACHE_DATA:
189               __asm__ __volatile__ ("nop\n\t"
190                                     ".chip 68040\n\t"
191                                     "cpushp %%dc,(%0)\n\t"
192                                     ".chip 68k"
193                                     : : "a" (paddr));
194               break;
195             case FLUSH_CACHE_INSN:
196               __asm__ __volatile__ ("nop\n\t"
197                                     ".chip 68040\n\t"
198                                     "cpushp %%ic,(%0)\n\t"
199                                     ".chip 68k"
200                                     : : "a" (paddr));
201               break;
202             default:
203             case FLUSH_CACHE_BOTH:
204               __asm__ __volatile__ ("nop\n\t"
205                                     ".chip 68040\n\t"
206                                     "cpushp %%bc,(%0)\n\t"
207                                     ".chip 68k"
208                                     : : "a" (paddr));
209               break;
210             }
211         }
212       break;
213     }
214   return 0;
215 }
216
217 #define virt_to_phys_060(vaddr)                         \
218 ({                                                      \
219   unsigned long paddr;                                  \
220   __asm__ __volatile__ (".chip 68060\n\t"               \
221                         "plpar (%0)\n\t"                \
222                         ".chip 68k"                     \
223                         : "=a" (paddr)                  \
224                         : "0" (vaddr));                 \
225   (paddr); /* XXX */                                    \
226 })
227
228 static inline int
229 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
230 {
231   unsigned long paddr, i;
232
233   /*
234    * 68060 manual says:
235    *  cpush %dc : flush DC, remains valid (with our %cacr setup)
236    *  cpush %ic : invalidate IC
237    *  cpush %bc : flush DC + invalidate IC
238    */
239   switch (scope)
240     {
241     case FLUSH_SCOPE_ALL:
242       switch (cache)
243         {
244         case FLUSH_CACHE_DATA:
245           __asm__ __volatile__ (".chip 68060\n\t"
246                                 "cpusha %dc\n\t"
247                                 ".chip 68k");
248           break;
249         case FLUSH_CACHE_INSN:
250           __asm__ __volatile__ (".chip 68060\n\t"
251                                 "cpusha %ic\n\t"
252                                 ".chip 68k");
253           break;
254         default:
255         case FLUSH_CACHE_BOTH:
256           __asm__ __volatile__ (".chip 68060\n\t"
257                                 "cpusha %bc\n\t"
258                                 ".chip 68k");
259           break;
260         }
261       break;
262
263     case FLUSH_SCOPE_LINE:
264       /* Find the physical address of the first mapped page in the
265          address range.  */
266       len += addr & 15;
267       addr &= -16;
268       if (!(paddr = virt_to_phys_060(addr))) {
269         unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
270
271         if (len <= tmp)
272           return 0;
273         addr += tmp;
274         len -= tmp;
275         tmp = PAGE_SIZE;
276         for (;;)
277           {
278             if ((paddr = virt_to_phys_060(addr)))
279               break;
280             if (len <= tmp)
281               return 0;
282             addr += tmp;
283             len -= tmp;
284           }
285       }
286       len = (len + 15) >> 4;
287       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
288       while (len--)
289         {
290           switch (cache)
291             {
292             case FLUSH_CACHE_DATA:
293               __asm__ __volatile__ (".chip 68060\n\t"
294                                     "cpushl %%dc,(%0)\n\t"
295                                     ".chip 68k"
296                                     : : "a" (paddr));
297               break;
298             case FLUSH_CACHE_INSN:
299               __asm__ __volatile__ (".chip 68060\n\t"
300                                     "cpushl %%ic,(%0)\n\t"
301                                     ".chip 68k"
302                                     : : "a" (paddr));
303               break;
304             default:
305             case FLUSH_CACHE_BOTH:
306               __asm__ __volatile__ (".chip 68060\n\t"
307                                     "cpushl %%bc,(%0)\n\t"
308                                     ".chip 68k"
309                                     : : "a" (paddr));
310               break;
311             }
312           if (!--i && len)
313             {
314
315               /*
316                * We just want to jump to the first cache line
317                * in the next page.
318                */
319               addr += PAGE_SIZE;
320               addr &= PAGE_MASK;
321
322               i = PAGE_SIZE / 16;
323               /* Recompute physical address when crossing a page
324                  boundary. */
325               for (;;)
326                 {
327                   if ((paddr = virt_to_phys_060(addr)))
328                     break;
329                   if (len <= i)
330                     return 0;
331                   len -= i;
332                   addr += PAGE_SIZE;
333                 }
334             }
335           else
336             paddr += 16;
337         }
338       break;
339
340     default:
341     case FLUSH_SCOPE_PAGE:
342       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
343       addr &= PAGE_MASK;        /* Workaround for bug in some
344                                    revisions of the 68060 */
345       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
346         {
347           if (!(paddr = virt_to_phys_060(addr)))
348             continue;
349           switch (cache)
350             {
351             case FLUSH_CACHE_DATA:
352               __asm__ __volatile__ (".chip 68060\n\t"
353                                     "cpushp %%dc,(%0)\n\t"
354                                     ".chip 68k"
355                                     : : "a" (paddr));
356               break;
357             case FLUSH_CACHE_INSN:
358               __asm__ __volatile__ (".chip 68060\n\t"
359                                     "cpushp %%ic,(%0)\n\t"
360                                     ".chip 68k"
361                                     : : "a" (paddr));
362               break;
363             default:
364             case FLUSH_CACHE_BOTH:
365               __asm__ __volatile__ (".chip 68060\n\t"
366                                     "cpushp %%bc,(%0)\n\t"
367                                     ".chip 68k"
368                                     : : "a" (paddr));
369               break;
370             }
371         }
372       break;
373     }
374   return 0;
375 }
376
377 #endif /* CONFIG_COLDFIRE */ 
378
379 /* sys_cacheflush -- flush (part of) the processor cache.  */
380 asmlinkage int
381 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
382 {
383         struct vm_area_struct *vma;
384         int ret = -EINVAL;
385
386         if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
387             cache & ~FLUSH_CACHE_BOTH)
388                 goto out;
389
390         if (scope == FLUSH_SCOPE_ALL) {
391                 /* Only the superuser may explicitly flush the whole cache. */
392                 ret = -EPERM;
393                 if (!capable(CAP_SYS_ADMIN))
394                         goto out;
395         } else {
396                 /*
397                  * Verify that the specified address region actually belongs
398                  * to this process.
399                  */
400                 vma = find_vma (current->mm, addr);
401                 ret = -EINVAL;
402                 /* Check for overflow.  */
403                 if (addr + len < addr)
404                         goto out;
405                 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
406                         goto out;
407         }
408
409 #ifndef CONFIG_COLDFIRE 
410         if (CPU_IS_020_OR_030) {
411                 if (scope == FLUSH_SCOPE_LINE && len < 256) {
412                         unsigned long cacr;
413                         __asm__ ("movec %%cacr, %0" : "=r" (cacr));
414                         if (cache & FLUSH_CACHE_INSN)
415                                 cacr |= 4;
416                         if (cache & FLUSH_CACHE_DATA)
417                                 cacr |= 0x400;
418                         len >>= 2;
419                         while (len--) {
420                                 __asm__ __volatile__ ("movec %1, %%caar\n\t"
421                                                       "movec %0, %%cacr"
422                                                       : /* no outputs */
423                                                       : "r" (cacr), "r" (addr));
424                                 addr += 4;
425                         }
426                 } else {
427                         /* Flush the whole cache, even if page granularity requested. */
428                         unsigned long cacr;
429                         __asm__ ("movec %%cacr, %0" : "=r" (cacr));
430                         if (cache & FLUSH_CACHE_INSN)
431                                 cacr |= 8;
432                         if (cache & FLUSH_CACHE_DATA)
433                                 cacr |= 0x800;
434                         __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
435                 }
436                 ret = 0;
437                 goto out;
438         } else {
439             /*
440              * 040 or 060: don't blindly trust 'scope', someone could
441              * try to flush a few megs of memory.
442              */
443
444             if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
445                 scope=FLUSH_SCOPE_PAGE;
446             if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
447                 scope=FLUSH_SCOPE_ALL;
448             if (CPU_IS_040) {
449                 ret = cache_flush_040 (addr, scope, cache, len);
450             } else if (CPU_IS_060) {
451                 ret = cache_flush_060 (addr, scope, cache, len);
452             }
453         }
454 #else /* CONFIG_COLDFIRE */
455         if ((cache & FLUSH_CACHE_INSN) && (cache & FLUSH_CACHE_DATA)) 
456                 flush_bcache();
457         else if (cache & FLUSH_CACHE_INSN) 
458                 flush_icache(); 
459         else
460                 flush_dcache(); 
461         
462         ret = 0;        
463 out:
464         return ret;
465 }
466
467 asmlinkage int sys_getpagesize(void)
468 {
469         return PAGE_SIZE;
470 }
471
472 /*
473  * Do a system call from kernel instead of calling sys_execve so we
474  * end up with proper pt_regs.
475  */
476 int kernel_execve(const char *filename,
477                   const char *const argv[],
478                   const char *const envp[])
479 {
480         register long __res asm ("%d0") = __NR_execve;
481         register long __a asm ("%d1") = (long)(filename);
482         register long __b asm ("%d2") = (long)(argv);
483         register long __c asm ("%d3") = (long)(envp);
484         asm volatile ("trap  #0" : "+d" (__res)
485                         : "d" (__a), "d" (__b), "d" (__c));
486         return __res;
487 }
488
489 asmlinkage unsigned long sys_get_thread_area(void)
490 {
491         return current_thread_info()->tp_value;
492 }
493
494 asmlinkage int sys_set_thread_area(unsigned long tp)
495 {
496         current_thread_info()->tp_value = tp;
497         return 0;
498 }
499
500 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
501    D1 (newval).  */
502 asmlinkage int
503 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
504                       unsigned long __user * mem)
505 {
506         /* This was borrowed from ARM's implementation.  */
507         for (;;) {
508                 struct mm_struct *mm = current->mm;
509                 pgd_t *pgd;
510                 pmd_t *pmd;
511                 pte_t *pte;
512                 spinlock_t *ptl;
513                 unsigned long mem_value;
514
515                 down_read(&mm->mmap_sem);
516                 pgd = pgd_offset(mm, (unsigned long)mem);
517                 if (!pgd_present(*pgd))
518                         goto bad_access;
519                 pmd = pmd_offset(pgd, (unsigned long)mem);
520                 if (!pmd_present(*pmd))
521                         goto bad_access;
522                 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
523                 if (!pte_present(*pte) || !pte_dirty(*pte)
524                     || !pte_write(*pte)) {
525                         pte_unmap_unlock(pte, ptl);
526                         goto bad_access;
527                 }
528
529                 mem_value = *mem;
530                 if (mem_value == oldval)
531                         *mem = newval;
532
533                 pte_unmap_unlock(pte, ptl);
534                 up_read(&mm->mmap_sem);
535                 return mem_value;
536
537               bad_access:
538                 up_read(&mm->mmap_sem);
539                 /* This is not necessarily a bad access, we can get here if
540                    a memory we're trying to write to should be copied-on-write.
541                    Make the kernel do the necessary page stuff, then re-iterate.
542                    Simulate a write access fault to do that.  */
543                 {
544                         /* The first argument of the function corresponds to
545                            D1, which is the first field of struct pt_regs.  */
546                         struct pt_regs *fp = (struct pt_regs *)&newval;
547
548                         /* '3' is an RMW flag.  */
549                         if (do_page_fault(fp, (unsigned long)mem, 3))
550                                 /* If the do_page_fault() failed, we don't
551                                    have anything meaningful to return.
552                                    There should be a SIGSEGV pending for
553                                    the process.  */
554                                 return 0xdeadbeef;
555                 }
556         }
557 }
558
559 asmlinkage int sys_atomic_barrier(void)
560 {
561         /* no code needed for uniprocs */
562         return 0;
563 }