2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/syscalls.h>
20 #include <linux/mman.h>
21 #include <linux/file.h>
22 #include <linux/ipc.h>
24 #include <asm/setup.h>
25 #include <asm/uaccess.h>
26 #include <asm/cachectl.h>
27 #include <asm/traps.h>
29 #include <asm/unistd.h>
30 #include <linux/elf.h>
33 #ifdef CONFIG_COLDFIRE
34 #include <asm/cacheflush.h>
37 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
38 unsigned long error_code);
40 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
41 unsigned long prot, unsigned long flags,
42 unsigned long fd, unsigned long pgoff)
45 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
46 * so we need to shift the argument down by 1; m68k mmap64(3)
47 * (in libc) expects the last argument of mmap2 in 4Kb units.
49 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
52 /* Convert virtual (user) address VADDR to physical address PADDR */
53 #ifndef CONFIG_COLDFIRE
54 #define virt_to_phys_040(vaddr) \
56 unsigned long _mmusr, _paddr; \
58 __asm__ __volatile__ (".chip 68040\n\t" \
60 "movec %%mmusr,%0\n\t" \
64 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
69 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
71 unsigned long paddr, i;
78 case FLUSH_CACHE_DATA:
79 /* This nop is needed for some broken versions of the 68040. */
80 __asm__ __volatile__ ("nop\n\t"
85 case FLUSH_CACHE_INSN:
86 __asm__ __volatile__ ("nop\n\t"
92 case FLUSH_CACHE_BOTH:
93 __asm__ __volatile__ ("nop\n\t"
101 case FLUSH_SCOPE_LINE:
102 /* Find the physical address of the first mapped page in the
104 if ((paddr = virt_to_phys_040(addr))) {
105 paddr += addr & ~(PAGE_MASK | 15);
106 len = (len + (addr & 15) + 15) >> 4;
108 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
117 if ((paddr = virt_to_phys_040(addr)))
124 len = (len + 15) >> 4;
126 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
131 case FLUSH_CACHE_DATA:
132 __asm__ __volatile__ ("nop\n\t"
134 "cpushl %%dc,(%0)\n\t"
138 case FLUSH_CACHE_INSN:
139 __asm__ __volatile__ ("nop\n\t"
141 "cpushl %%ic,(%0)\n\t"
146 case FLUSH_CACHE_BOTH:
147 __asm__ __volatile__ ("nop\n\t"
149 "cpushl %%bc,(%0)\n\t"
157 * No need to page align here since it is done by
158 * virt_to_phys_040().
162 /* Recompute physical address when crossing a page
166 if ((paddr = virt_to_phys_040(addr)))
180 case FLUSH_SCOPE_PAGE:
181 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
182 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
184 if (!(paddr = virt_to_phys_040(addr)))
188 case FLUSH_CACHE_DATA:
189 __asm__ __volatile__ ("nop\n\t"
191 "cpushp %%dc,(%0)\n\t"
195 case FLUSH_CACHE_INSN:
196 __asm__ __volatile__ ("nop\n\t"
198 "cpushp %%ic,(%0)\n\t"
203 case FLUSH_CACHE_BOTH:
204 __asm__ __volatile__ ("nop\n\t"
206 "cpushp %%bc,(%0)\n\t"
217 #define virt_to_phys_060(vaddr) \
219 unsigned long paddr; \
220 __asm__ __volatile__ (".chip 68060\n\t" \
229 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
231 unsigned long paddr, i;
235 * cpush %dc : flush DC, remains valid (with our %cacr setup)
236 * cpush %ic : invalidate IC
237 * cpush %bc : flush DC + invalidate IC
241 case FLUSH_SCOPE_ALL:
244 case FLUSH_CACHE_DATA:
245 __asm__ __volatile__ (".chip 68060\n\t"
249 case FLUSH_CACHE_INSN:
250 __asm__ __volatile__ (".chip 68060\n\t"
255 case FLUSH_CACHE_BOTH:
256 __asm__ __volatile__ (".chip 68060\n\t"
263 case FLUSH_SCOPE_LINE:
264 /* Find the physical address of the first mapped page in the
268 if (!(paddr = virt_to_phys_060(addr))) {
269 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
278 if ((paddr = virt_to_phys_060(addr)))
286 len = (len + 15) >> 4;
287 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
292 case FLUSH_CACHE_DATA:
293 __asm__ __volatile__ (".chip 68060\n\t"
294 "cpushl %%dc,(%0)\n\t"
298 case FLUSH_CACHE_INSN:
299 __asm__ __volatile__ (".chip 68060\n\t"
300 "cpushl %%ic,(%0)\n\t"
305 case FLUSH_CACHE_BOTH:
306 __asm__ __volatile__ (".chip 68060\n\t"
307 "cpushl %%bc,(%0)\n\t"
316 * We just want to jump to the first cache line
323 /* Recompute physical address when crossing a page
327 if ((paddr = virt_to_phys_060(addr)))
341 case FLUSH_SCOPE_PAGE:
342 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
343 addr &= PAGE_MASK; /* Workaround for bug in some
344 revisions of the 68060 */
345 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
347 if (!(paddr = virt_to_phys_060(addr)))
351 case FLUSH_CACHE_DATA:
352 __asm__ __volatile__ (".chip 68060\n\t"
353 "cpushp %%dc,(%0)\n\t"
357 case FLUSH_CACHE_INSN:
358 __asm__ __volatile__ (".chip 68060\n\t"
359 "cpushp %%ic,(%0)\n\t"
364 case FLUSH_CACHE_BOTH:
365 __asm__ __volatile__ (".chip 68060\n\t"
366 "cpushp %%bc,(%0)\n\t"
377 #endif /* CONFIG_COLDFIRE */
379 /* sys_cacheflush -- flush (part of) the processor cache. */
381 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
383 struct vm_area_struct *vma;
386 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
387 cache & ~FLUSH_CACHE_BOTH)
390 if (scope == FLUSH_SCOPE_ALL) {
391 /* Only the superuser may explicitly flush the whole cache. */
393 if (!capable(CAP_SYS_ADMIN))
397 * Verify that the specified address region actually belongs
400 vma = find_vma (current->mm, addr);
402 /* Check for overflow. */
403 if (addr + len < addr)
405 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
409 #ifndef CONFIG_COLDFIRE
410 if (CPU_IS_020_OR_030) {
411 if (scope == FLUSH_SCOPE_LINE && len < 256) {
413 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
414 if (cache & FLUSH_CACHE_INSN)
416 if (cache & FLUSH_CACHE_DATA)
420 __asm__ __volatile__ ("movec %1, %%caar\n\t"
423 : "r" (cacr), "r" (addr));
427 /* Flush the whole cache, even if page granularity requested. */
429 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
430 if (cache & FLUSH_CACHE_INSN)
432 if (cache & FLUSH_CACHE_DATA)
434 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
440 * 040 or 060: don't blindly trust 'scope', someone could
441 * try to flush a few megs of memory.
444 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
445 scope=FLUSH_SCOPE_PAGE;
446 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
447 scope=FLUSH_SCOPE_ALL;
449 ret = cache_flush_040 (addr, scope, cache, len);
450 } else if (CPU_IS_060) {
451 ret = cache_flush_060 (addr, scope, cache, len);
454 #else /* CONFIG_COLDFIRE */
455 if ((cache & FLUSH_CACHE_INSN) && (cache & FLUSH_CACHE_DATA))
457 else if (cache & FLUSH_CACHE_INSN)
467 asmlinkage int sys_getpagesize(void)
473 * Do a system call from kernel instead of calling sys_execve so we
474 * end up with proper pt_regs.
476 int kernel_execve(const char *filename,
477 const char *const argv[],
478 const char *const envp[])
480 register long __res asm ("%d0") = __NR_execve;
481 register long __a asm ("%d1") = (long)(filename);
482 register long __b asm ("%d2") = (long)(argv);
483 register long __c asm ("%d3") = (long)(envp);
484 asm volatile ("trap #0" : "+d" (__res)
485 : "d" (__a), "d" (__b), "d" (__c));
489 asmlinkage unsigned long sys_get_thread_area(void)
491 return current_thread_info()->tp_value;
494 asmlinkage int sys_set_thread_area(unsigned long tp)
496 current_thread_info()->tp_value = tp;
500 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
503 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
504 unsigned long __user * mem)
506 /* This was borrowed from ARM's implementation. */
508 struct mm_struct *mm = current->mm;
513 unsigned long mem_value;
515 down_read(&mm->mmap_sem);
516 pgd = pgd_offset(mm, (unsigned long)mem);
517 if (!pgd_present(*pgd))
519 pmd = pmd_offset(pgd, (unsigned long)mem);
520 if (!pmd_present(*pmd))
522 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
523 if (!pte_present(*pte) || !pte_dirty(*pte)
524 || !pte_write(*pte)) {
525 pte_unmap_unlock(pte, ptl);
530 if (mem_value == oldval)
533 pte_unmap_unlock(pte, ptl);
534 up_read(&mm->mmap_sem);
538 up_read(&mm->mmap_sem);
539 /* This is not necessarily a bad access, we can get here if
540 a memory we're trying to write to should be copied-on-write.
541 Make the kernel do the necessary page stuff, then re-iterate.
542 Simulate a write access fault to do that. */
544 /* The first argument of the function corresponds to
545 D1, which is the first field of struct pt_regs. */
546 struct pt_regs *fp = (struct pt_regs *)&newval;
548 /* '3' is an RMW flag. */
549 if (do_page_fault(fp, (unsigned long)mem, 3))
550 /* If the do_page_fault() failed, we don't
551 have anything meaningful to return.
552 There should be a SIGSEGV pending for
559 asmlinkage int sys_atomic_barrier(void)
561 /* no code needed for uniprocs */