]> rtime.felk.cvut.cz Git - mcf548x/linux.git/commitdiff
Headers cleanup
authorMartin <meloumar@cmp.felk.cvut.cz>
Tue, 3 May 2011 16:11:48 +0000 (18:11 +0200)
committerMartin <meloumar@cmp.felk.cvut.cz>
Tue, 3 May 2011 16:11:48 +0000 (18:11 +0200)
arch/m68k/coldfire/head.S
arch/m68k/coldfire/muldi3.S [new file with mode: 0644]
arch/m68k/coldfire/signal.c [new file with mode: 0644]
arch/m68k/include/asm/cache.h
arch/m68k/include/asm/cacheflush.h
arch/m68k/include/asm/entry_mm.h
arch/m68k/include/asm/mcf_5445x_cacheflush.h [new file with mode: 0644]
arch/m68k/include/asm/mcf_cacheflush_m547x_8x.h [new file with mode: 0644]
arch/m68k/include/asm/mcfcache.h
arch/m68k/include/asm/mcfmmu.h [new file with mode: 0644]

index e1b53214f679a0591dcf627402c377838597bfe1..f3d053cc1592e4483a47ea93be516ed3ad332d42 100644 (file)
@@ -23,7 +23,7 @@
 #include <asm/page.h>
 #include <asm/coldfire.h>
 #include <asm/mcfuart.h>
-#include <asm/mcfcache.h>
+#include <asm/cache.h>
 #include <asm/thread_info.h>
 
 #define DEBUG
diff --git a/arch/m68k/coldfire/muldi3.S b/arch/m68k/coldfire/muldi3.S
new file mode 100644 (file)
index 0000000..90659c8
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Coldfire muldi3 assembly verion
+ */
+#include <linux/linkage.h>
+.globl __muldi3
+
+ENTRY(__muldi3)
+       linkw   %fp,#0
+       lea     %sp@(-32),%sp
+       moveml  %d2-%d7/%a2-%a3,%sp@
+       moveal  %fp@(8), %a2
+       moveal  %fp@(12), %a3
+       moveal  %fp@(16), %a0
+       moveal  %fp@(20),%a1
+       movel   %a3,%d2
+       andil   #65535,%d2
+       movel   %a3,%d3
+       clrw    %d3
+       swap    %d3
+       movel   %a1,%d0
+       andil   #65535,%d0
+       movel   %a1,%d1
+       clrw    %d1
+       swap    %d1
+       movel   %d2,%d7
+       mulsl   %d0,%d7
+       movel   %d2,%d4
+       mulsl   %d1,%d4
+       movel   %d3,%d2
+       mulsl   %d0,%d2
+       mulsl   %d1,%d3
+       movel   %d7,%d0
+       clrw    %d0
+       swap    %d0
+       addl    %d0,%d4
+       addl    %d2,%d4
+       cmpl    %d4,%d2
+       blss    1f
+       addil   #65536,%d3
+1:
+       movel   %d4,%d0
+       clrw    %d0
+       swap    %d0
+       movel   %d3,%d5
+       addl    %d0,%d5
+       movew   %d4,%d6
+       swap    %d6
+       movew   %d7,%d6
+       movel   %d5,%d0
+       movel   %d6,%d1
+       movel   %a3,%d2
+       movel   %a0,%d3
+       mulsl   %d3,%d2
+       movel   %a2,%d3
+       movel   %a1,%d4
+       mulsl   %d4,%d3
+       addl    %d3,%d2
+       movel   %d2,%d0
+       addl    %d5,%d0
+       moveml  %sp@, %d2-%d7/%a2-%a3
+       lea     %sp@(32),%sp
+       unlk    %fp
+       rts
diff --git a/arch/m68k/coldfire/signal.c b/arch/m68k/coldfire/signal.c
new file mode 100644 (file)
index 0000000..38671c4
--- /dev/null
@@ -0,0 +1,871 @@
+/*
+ *  linux/arch/m68k/kernel/signal.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Derived from m68k/kernel/signal.c and the original authors are credited
+ * there.
+ *
+ * Coldfire support by:
+ * Matt Waddel Matt.Waddel@freescale.com
+ * Copyright Freescale Semiconductor, Inc 2007
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/highuid.h>
+#include <linux/personality.h>
+#include <linux/tty.h>
+#include <linux/binfmts.h>
+
+#include <asm/setup.h>
+#include <asm/cf_uaccess.h>
+#include <asm/cf_pgtable.h>
+#include <asm/traps.h>
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
+
+const int frame_extra_sizes[16] = {
+  [1]  = -1,
+  [2]  = sizeof(((struct frame *)0)->un.fmt2),
+  [3]  = sizeof(((struct frame *)0)->un.fmt3),
+  [4]  = 0,
+  [5]  = -1,
+  [6]  = -1,
+  [7]  = sizeof(((struct frame *)0)->un.fmt7),
+  [8]  = -1,
+  [9]  = sizeof(((struct frame *)0)->un.fmt9),
+  [10] = sizeof(((struct frame *)0)->un.fmta),
+  [11] = sizeof(((struct frame *)0)->un.fmtb),
+  [12] = -1,
+  [13] = -1,
+  [14] = -1,
+  [15] = -1,
+};
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int do_sigsuspend(struct pt_regs *regs)
+{
+       old_sigset_t mask = regs->d3;
+       sigset_t saveset;
+
+       mask &= _BLOCKABLE;
+       spin_lock_irq(&current->sighand->siglock);
+       saveset = current->blocked;
+       siginitset(&current->blocked, mask);
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       regs->d0 = -EINTR;
+       while (1) {
+               current->state = TASK_INTERRUPTIBLE;
+               schedule();
+               if (do_signal(&saveset, regs))
+                       return -EINTR;
+       }
+}
+
+asmlinkage int
+do_rt_sigsuspend(struct pt_regs *regs)
+{
+       sigset_t __user *unewset = (sigset_t __user *)regs->d1;
+       size_t sigsetsize = (size_t)regs->d2;
+       sigset_t saveset, newset;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (copy_from_user(&newset, unewset, sizeof(newset)))
+               return -EFAULT;
+       sigdelsetmask(&newset, ~_BLOCKABLE);
+
+       spin_lock_irq(&current->sighand->siglock);
+       saveset = current->blocked;
+       current->blocked = newset;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       regs->d0 = -EINTR;
+       while (1) {
+               current->state = TASK_INTERRUPTIBLE;
+               schedule();
+               if (do_signal(&saveset, regs))
+                       return -EINTR;
+       }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+             struct old_sigaction __user *oact)
+{
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+
+       if (act) {
+               old_sigset_t mask;
+               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                       return -EFAULT;
+               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+               __get_user(mask, &act->sa_mask);
+               siginitset(&new_ka.sa.sa_mask, mask);
+       }
+
+       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+       if (!ret && oact) {
+               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                       return -EFAULT;
+               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+       }
+
+       return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
+{
+       return do_sigaltstack(uss, uoss, rdusp());
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ *
+ * Keep the return code on the stack quadword aligned!
+ * That makes the cache flush below easier.
+ */
+
+struct sigframe
+{
+       char __user *pretcode;
+       int sig;
+       int code;
+       struct sigcontext __user *psc;
+       char retcode[16];
+       unsigned long extramask[_NSIG_WORDS-1];
+       struct sigcontext sc;
+};
+
+struct rt_sigframe
+{
+       char __user *pretcode;
+       int sig;
+       struct siginfo __user *pinfo;
+       void __user *puc;
+       char retcode[16];
+       struct siginfo info;
+       struct ucontext uc;
+};
+
+#define FPCONTEXT_SIZE 216
+#define uc_fpstate     uc_filler[0]
+#define uc_formatvec   uc_filler[FPCONTEXT_SIZE/4]
+#define uc_extra       uc_filler[FPCONTEXT_SIZE/4+1]
+
+#ifdef CONFIG_FPU
+static unsigned char fpu_version; /* version num of fpu, set by setup_frame */
+
+static inline int restore_fpu_state(struct sigcontext *sc)
+{
+       int err = 1;
+
+       if (FPU_IS_EMU) {
+           /* restore registers */
+           memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
+           memcpy(current->thread.fp, sc->sc_fpregs, 24);
+           return 0;
+       }
+
+       if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+           /* Verify the frame format.  */
+           if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
+               goto out;
+           if (CPU_IS_020_OR_030) {
+               if (m68k_fputype & FPU_68881 &&
+                   !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
+                   goto out;
+               if (m68k_fputype & FPU_68882 &&
+                   !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
+                   goto out;
+           } else if (CPU_IS_040) {
+               if (!(sc->sc_fpstate[1] == 0x00 ||
+                     sc->sc_fpstate[1] == 0x28 ||
+                     sc->sc_fpstate[1] == 0x60))
+                   goto out;
+           } else if (CPU_IS_060) {
+               if (!(sc->sc_fpstate[3] == 0x00 ||
+                     sc->sc_fpstate[3] == 0x60 ||
+                     sc->sc_fpstate[3] == 0xe0))
+                   goto out;
+           } else
+               goto out;
+
+       }
+       err = 0;
+
+out:
+       return err;
+}
+
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
+{
+       unsigned char fpstate[FPCONTEXT_SIZE];
+       int context_size = CPU_IS_060 ? 8 : 0;
+       fpregset_t fpregs;
+       int err = 1;
+
+       if (FPU_IS_EMU) {
+               /* restore fpu control register */
+               if (__copy_from_user(current->thread.fpcntl,
+                               uc->uc_mcontext.fpregs.f_fpcntl, 12))
+                       goto out;
+               /* restore all other fpu register */
+               if (__copy_from_user(current->thread.fp,
+                               uc->uc_mcontext.fpregs.f_fpregs, 96))
+                       goto out;
+               return 0;
+       }
+
+       if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
+               goto out;
+       if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+               if (!CPU_IS_060)
+                       context_size = fpstate[1];
+               /* Verify the frame format.  */
+               if (!CPU_IS_060 && (fpstate[0] != fpu_version))
+                       goto out;
+               if (CPU_IS_020_OR_030) {
+                       if (m68k_fputype & FPU_68881 &&
+                           !(context_size == 0x18 || context_size == 0xb4))
+                               goto out;
+                       if (m68k_fputype & FPU_68882 &&
+                           !(context_size == 0x38 || context_size == 0xd4))
+                               goto out;
+               } else if (CPU_IS_040) {
+                       if (!(context_size == 0x00 ||
+                             context_size == 0x28 ||
+                             context_size == 0x60))
+                               goto out;
+               } else if (CPU_IS_060) {
+                       if (!(fpstate[3] == 0x00 ||
+                             fpstate[3] == 0x60 ||
+                             fpstate[3] == 0xe0))
+                               goto out;
+               } else
+                       goto out;
+               if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
+                                    sizeof(fpregs)))
+                       goto out;
+       }
+       if (context_size &&
+           __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
+                            context_size))
+               goto out;
+       err = 0;
+
+out:
+       return err;
+}
+#endif
+
+static inline int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc,
+       void __user *fp, int *pd0)
+{
+       int fsize, formatvec;
+       struct sigcontext context;
+       int err = 0;
+
+       /* get previous context */
+       if (copy_from_user(&context, usc, sizeof(context)))
+               goto badframe;
+
+       /* restore passed registers */
+       regs->d1 = context.sc_d1;
+       regs->a0 = context.sc_a0;
+       regs->a1 = context.sc_a1;
+       regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
+       regs->pc = context.sc_pc;
+       regs->orig_d0 = -1;             /* disable syscall checks */
+       wrusp(context.sc_usp);
+       formatvec = context.sc_formatvec;
+       regs->format = formatvec >> 12;
+       regs->vector = formatvec & 0xfff;
+
+#ifdef CONFIG_FPU
+       err = restore_fpu_state(&context);
+#endif
+
+       fsize = frame_extra_sizes[regs->format];
+       if (fsize < 0) {
+               /*
+                * user process trying to return with weird frame format
+                */
+#ifdef DEBUG
+               printk(KERN_DEBUG "user process returning with weird \
+                       frame format\n");
+#endif
+               goto badframe;
+       }
+
+       /* OK.  Make room on the supervisor stack for the extra junk,
+        * if necessary.
+        */
+
+       {
+               struct switch_stack *sw = (struct switch_stack *)regs - 1;
+               regs->d0 = context.sc_d0;
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+               __asm__ __volatile__
+                       ("   movel %0,%/sp\n\t"
+                        "   bra ret_from_signal\n"
+                        "4:\n"
+                        ".section __ex_table,\"a\"\n"
+                        "   .align 4\n"
+                        "   .long 2b,4b\n"
+                        ".previous"
+                        : /* no outputs, it doesn't ever return */
+                        : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+                          "n" (frame_offset), "a" (fp)
+                        : "a0");
+#undef frame_offset
+               /*
+                * If we ever get here an exception occurred while
+                * building the above stack-frame.
+                */
+               goto badframe;
+       }
+
+       *pd0 = context.sc_d0;
+       return err;
+
+badframe:
+       return 1;
+}
+
+static inline int
+rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
+                   struct ucontext __user *uc, int *pd0)
+{
+       int fsize, temp;
+       greg_t __user *gregs = uc->uc_mcontext.gregs;
+       unsigned long usp;
+       int err;
+
+       err = __get_user(temp, &uc->uc_mcontext.version);
+       if (temp != MCONTEXT_VERSION)
+               goto badframe;
+       /* restore passed registers */
+       err |= __get_user(regs->d0, &gregs[0]);
+       err |= __get_user(regs->d1, &gregs[1]);
+       err |= __get_user(regs->d2, &gregs[2]);
+       err |= __get_user(regs->d3, &gregs[3]);
+       err |= __get_user(regs->d4, &gregs[4]);
+       err |= __get_user(regs->d5, &gregs[5]);
+       err |= __get_user(sw->d6, &gregs[6]);
+       err |= __get_user(sw->d7, &gregs[7]);
+       err |= __get_user(regs->a0, &gregs[8]);
+       err |= __get_user(regs->a1, &gregs[9]);
+       err |= __get_user(regs->a2, &gregs[10]);
+       err |= __get_user(sw->a3, &gregs[11]);
+       err |= __get_user(sw->a4, &gregs[12]);
+       err |= __get_user(sw->a5, &gregs[13]);
+       err |= __get_user(sw->a6, &gregs[14]);
+       err |= __get_user(usp, &gregs[15]);
+       wrusp(usp);
+       err |= __get_user(regs->pc, &gregs[16]);
+       err |= __get_user(temp, &gregs[17]);
+       regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
+       regs->orig_d0 = -1;             /* disable syscall checks */
+       err |= __get_user(temp, &uc->uc_formatvec);
+       regs->format = temp >> 12;
+       regs->vector = temp & 0xfff;
+
+#ifdef CONFIG_FPU
+       err |= rt_restore_fpu_state(uc);
+#endif
+
+       if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
+               goto badframe;
+
+       fsize = frame_extra_sizes[regs->format];
+       if (fsize < 0) {
+               /*
+                * user process trying to return with weird frame format
+                */
+#ifdef DEBUG
+               printk(KERN_DEBUG "user process returning with weird \
+                       frame format\n");
+#endif
+               goto badframe;
+       }
+
+       /* OK.  Make room on the supervisor stack for the extra junk,
+        * if necessary.
+        */
+
+       {
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+               __asm__ __volatile__
+                       ("   movel %0,%/sp\n\t"
+                        "   bra ret_from_signal\n"
+                        "4:\n"
+                        ".section __ex_table,\"a\"\n"
+                        "   .align 4\n"
+                        "   .long 2b,4b\n"
+                        ".previous"
+                        : /* no outputs, it doesn't ever return */
+                        : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+                          "n" (frame_offset), "a" (&uc->uc_extra)
+                        : "a0");
+#undef frame_offset
+               /*
+                * If we ever get here an exception occurred while
+                * building the above stack-frame.
+                */
+               goto badframe;
+       }
+
+       *pd0 = regs->d0;
+       return err;
+
+badframe:
+       return 1;
+}
+
+asmlinkage int do_sigreturn(unsigned long __unused)
+{
+       struct switch_stack *sw = (struct switch_stack *) &__unused;
+       struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+       unsigned long usp = rdusp();
+       struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
+       sigset_t set;
+       int d0;
+
+       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+       if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
+           (_NSIG_WORDS > 1 &&
+            __copy_from_user(&set.sig[1], &frame->extramask,
+                             sizeof(frame->extramask))))
+               goto badframe;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
+               goto badframe;
+       return d0;
+
+badframe:
+       force_sig(SIGSEGV, current);
+       return 0;
+}
+
+asmlinkage int do_rt_sigreturn(unsigned long __unused)
+{
+       struct switch_stack *sw = (struct switch_stack *) &__unused;
+       struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+       unsigned long usp = rdusp();
+       struct rt_sigframe __user *frame =
+               (struct rt_sigframe __user *)(usp - 4);
+       sigset_t set;
+       int d0;
+
+       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+               goto badframe;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
+               goto badframe;
+       return d0;
+
+badframe:
+       force_sig(SIGSEGV, current);
+       return 0;
+}
+
+#ifdef CONFIG_FPU
+/*
+ * Set up a signal frame.
+ */
+
+static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+       if (FPU_IS_EMU) {
+               /* save registers */
+               memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
+               memcpy(sc->sc_fpregs, current->thread.fp, 24);
+               return;
+       }
+}
+
+static inline int rt_save_fpu_state(struct ucontext __user *uc,
+       struct pt_regs *regs)
+{
+       int err = 0;
+
+       if (FPU_IS_EMU) {
+               /* save fpu control register */
+               err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
+                               current->thread.fpcntl, 12);
+               /* save all other fpu register */
+               err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
+                               current->thread.fp, 96);
+               return err;
+       }
+
+       return err;
+}
+#endif
+
+static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+                            unsigned long mask)
+{
+       sc->sc_mask = mask;
+       sc->sc_usp = rdusp();
+       sc->sc_d0 = regs->d0;
+       sc->sc_d1 = regs->d1;
+       sc->sc_a0 = regs->a0;
+       sc->sc_a1 = regs->a1;
+       sc->sc_sr = regs->sr;
+       sc->sc_pc = regs->pc;
+       sc->sc_formatvec = regs->format << 12 | regs->vector;
+#ifdef CONFIG_FPU
+       save_fpu_state(sc, regs);
+#endif
+}
+
+static inline int rt_setup_ucontext(struct ucontext __user *uc,
+       struct pt_regs *regs)
+{
+       struct switch_stack *sw = (struct switch_stack *)regs - 1;
+       greg_t __user *gregs = uc->uc_mcontext.gregs;
+       int err = 0;
+
+       err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
+       err |= __put_user(regs->d0, &gregs[0]);
+       err |= __put_user(regs->d1, &gregs[1]);
+       err |= __put_user(regs->d2, &gregs[2]);
+       err |= __put_user(regs->d3, &gregs[3]);
+       err |= __put_user(regs->d4, &gregs[4]);
+       err |= __put_user(regs->d5, &gregs[5]);
+       err |= __put_user(sw->d6, &gregs[6]);
+       err |= __put_user(sw->d7, &gregs[7]);
+       err |= __put_user(regs->a0, &gregs[8]);
+       err |= __put_user(regs->a1, &gregs[9]);
+       err |= __put_user(regs->a2, &gregs[10]);
+       err |= __put_user(sw->a3, &gregs[11]);
+       err |= __put_user(sw->a4, &gregs[12]);
+       err |= __put_user(sw->a5, &gregs[13]);
+       err |= __put_user(sw->a6, &gregs[14]);
+       err |= __put_user(rdusp(), &gregs[15]);
+       err |= __put_user(regs->pc, &gregs[16]);
+       err |= __put_user(regs->sr, &gregs[17]);
+       err |= __put_user((regs->format << 12) | regs->vector,
+                         &uc->uc_formatvec);
+#ifdef CONFIG_FPU
+       err |= rt_save_fpu_state(uc, regs);
+#endif
+       return err;
+}
+
+static inline void push_cache(unsigned long vaddr)
+{
+#if 0 
+// JKM -- need to add into the old cpushl cache stuff
+       cf_cache_push(__pa(vaddr), 8);
+#endif
+}
+
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+{
+       unsigned long usp;
+
+       /* Default to using normal stack.  */
+       usp = rdusp();
+
+       /* This is the X/Open sanctioned signal stack switching.  */
+       if (ka->sa.sa_flags & SA_ONSTACK) {
+               if (!sas_ss_flags(usp))
+                       usp = current->sas_ss_sp + current->sas_ss_size;
+       }
+       return (void __user *)((usp - frame_size) & -8UL);
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+                        sigset_t *set, struct pt_regs *regs)
+{
+       struct sigframe __user *frame;
+       int fsize = frame_extra_sizes[regs->format];
+       struct sigcontext context;
+       int err = 0;
+
+       if (fsize < 0) {
+#ifdef DEBUG
+               printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
+                       regs->format);
+#endif
+               goto give_sigsegv;
+       }
+
+       frame = get_sigframe(ka, regs, sizeof(*frame));
+
+       err |= __put_user((current_thread_info()->exec_domain
+                       && current_thread_info()->exec_domain->signal_invmap
+                       && sig < 32
+                       ? current_thread_info()->exec_domain->signal_invmap[sig]
+                       : sig),
+                       &frame->sig);
+
+       err |= __put_user(regs->vector, &frame->code);
+       err |= __put_user(&frame->sc, &frame->psc);
+
+       if (_NSIG_WORDS > 1)
+               err |= copy_to_user(frame->extramask, &set->sig[1],
+                                   sizeof(frame->extramask));
+
+       setup_sigcontext(&context, regs, set->sig[0]);
+       err |= copy_to_user(&frame->sc, &context, sizeof(context));
+
+       /* Set up to return from userspace.  */
+       err |= __put_user(frame->retcode, &frame->pretcode);
+       /* moveq #,d0; trap #0 */
+       err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
+                         (long __user *)(frame->retcode));
+
+       if (err)
+               goto give_sigsegv;
+
+       push_cache((unsigned long) &frame->retcode);
+
+       /* Set up registers for signal handler */
+       wrusp((unsigned long) frame);
+       regs->pc = (unsigned long) ka->sa.sa_handler;
+
+adjust_stack:
+       /* Prepare to skip over the extra stuff in the exception frame.  */
+       if (regs->stkadj) {
+               struct pt_regs *tregs =
+                       (struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+               printk(KERN_DEBUG "Performing stackadjust=%04x\n",
+                       regs->stkadj);
+#endif
+               /* This must be copied with decreasing addresses to
+                  handle overlaps.  */
+               tregs->vector = 0;
+               tregs->format = 0;
+               tregs->pc = regs->pc;
+               tregs->sr = regs->sr;
+       }
+       return;
+
+give_sigsegv:
+       force_sigsegv(sig, current);
+       goto adjust_stack;
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+       sigset_t *set, struct pt_regs *regs)
+{
+       struct rt_sigframe __user *frame;
+       int fsize = frame_extra_sizes[regs->format];
+       int err = 0;
+
+       if (fsize < 0) {
+#ifdef DEBUG
+               printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
+                       regs->format);
+#endif
+               goto give_sigsegv;
+       }
+
+       frame = get_sigframe(ka, regs, sizeof(*frame));
+
+       if (fsize) {
+               err |= copy_to_user(&frame->uc.uc_extra, regs + 1, fsize);
+               regs->stkadj = fsize;
+       }
+
+       err |= __put_user((current_thread_info()->exec_domain
+                       && current_thread_info()->exec_domain->signal_invmap
+                       && sig < 32
+                       ? current_thread_info()->exec_domain->signal_invmap[sig]
+                       : sig),
+                       &frame->sig);
+       err |= __put_user(&frame->info, &frame->pinfo);
+       err |= __put_user(&frame->uc, &frame->puc);
+       err |= copy_siginfo_to_user(&frame->info, info);
+
+       /* Create the ucontext.  */
+       err |= __put_user(0, &frame->uc.uc_flags);
+       err |= __put_user(NULL, &frame->uc.uc_link);
+       err |= __put_user((void __user *)current->sas_ss_sp,
+                         &frame->uc.uc_stack.ss_sp);
+       err |= __put_user(sas_ss_flags(rdusp()),
+                         &frame->uc.uc_stack.ss_flags);
+       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+       err |= rt_setup_ucontext(&frame->uc, regs);
+       err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+       /* Set up to return from userspace.  */
+       err |= __put_user(frame->retcode, &frame->pretcode);
+
+       /* moveq #,d0; andi.l #,D0; trap #0 */
+       err |= __put_user(0x70AD0280, (long *)(frame->retcode + 0));
+       err |= __put_user(0x000000ff, (long *)(frame->retcode + 4));
+       err |= __put_user(0x4e400000, (long *)(frame->retcode + 8));
+
+       if (err)
+               goto give_sigsegv;
+
+       push_cache((unsigned long) &frame->retcode);
+
+       /* Set up registers for signal handler */
+       wrusp((unsigned long) frame);
+       regs->pc = (unsigned long) ka->sa.sa_handler;
+
+adjust_stack:
+       /* Prepare to skip over the extra stuff in the exception frame.  */
+       if (regs->stkadj) {
+               struct pt_regs *tregs =
+                       (struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+               printk(KERN_DEBUG "Performing stackadjust=%04x\n",
+                       regs->stkadj);
+#endif
+               /* This must be copied with decreasing addresses to
+                  handle overlaps.  */
+               tregs->vector = 0;
+               tregs->format = 0;
+               tregs->pc = regs->pc;
+               tregs->sr = regs->sr;
+       }
+       return;
+
+give_sigsegv:
+       force_sigsegv(sig, current);
+       goto adjust_stack;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+       switch (regs->d0) {
+       case -ERESTARTNOHAND:
+               if (!has_handler)
+                       goto do_restart;
+               regs->d0 = -EINTR;
+               break;
+
+       case -ERESTARTSYS:
+               if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+                       regs->d0 = -EINTR;
+                       break;
+               }
+       /* fallthrough */
+       case -ERESTARTNOINTR:
+do_restart:
+               regs->d0 = regs->orig_d0;
+               regs->pc -= 2;
+               break;
+       }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
+             sigset_t *oldset, struct pt_regs *regs)
+{
+       /* are we from a system call? */
+       if (regs->orig_d0 >= 0)
+               /* If so, check system call restarting.. */
+               handle_restart(regs, ka, 1);
+
+       /* set up the stack frame */
+       if (ka->sa.sa_flags & SA_SIGINFO)
+               setup_rt_frame(sig, ka, info, oldset, regs);
+       else
+               setup_frame(sig, ka, oldset, regs);
+
+       if (ka->sa.sa_flags & SA_ONESHOT)
+               ka->sa.sa_handler = SIG_DFL;
+
+       spin_lock_irq(&current->sighand->siglock);
+       sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
+       if (!(ka->sa.sa_flags & SA_NODEFER))
+               sigaddset(&current->blocked, sig);
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+       siginfo_t info;
+       struct k_sigaction ka;
+       int signr;
+
+       current->thread.esp0 = (unsigned long) regs;
+
+       if (!oldset)
+               oldset = &current->blocked;
+
+       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+       if (signr > 0) {
+               /* Whee!  Actually deliver the signal.  */
+               handle_signal(signr, &ka, &info, oldset, regs);
+               return 1;
+       }
+
+       /* Did we come from a system call? */
+       if (regs->orig_d0 >= 0)
+               /* Restart the system call - no handlers present */
+               handle_restart(regs, NULL, 0);
+
+       return 0;
+}
index 18963a7210894977bec7c8fd86966e9466a025c2..04a43ece7755542d5b8c8077cf21a3ab62970eb2 100644 (file)
 
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
-#ifdef CONFIG_COLDFIRE
-
-# ifdef CONFIG_M547X_8X
-#  include "asm/cache_coldfire_m547x_8x.h"
-# endif //CONFIG_M547X_8X
-
-#endif //CONFIG_COLDFIRE
-
 #endif //!__ARCH_M68K_CACHE_H
index ec683a3197daf14e8a01c60c6014cf975fe0e8f7..e5a28c67c45ef403fe0999965d1d1f6760672ec4 100644 (file)
@@ -5,11 +5,11 @@
 #else
 
 # ifdef CONFIG_COLDFIRE
-#  ifdef CONFIG_M547X_8X
-#   include "cacheflush_coldfire_m547x_8x.h"
-#  else        //CONFIG_M547X_8X
-#              error No Cache for this Coldfire.
-#  endif //CONFIG_M547X_8X
+#  ifdef CONFIG_M5445X
+#   include "mcf_5445x_cacheflush.h"
+#  else        
+#              include "mcf_548x_cacheflush.h"
+#  endif 
 # else //CONFIG_COLDFIRE
 #   include "cacheflush_mm.h"
 # endif //CONFIG_COLDFIRE
index 73b8c8fbed9cdbad94c505db43e98a3972d72d5d..2ffb1bea9283fdf010351a755a6cde7457d43aba 100644 (file)
@@ -7,6 +7,10 @@
 #include <asm/thread_info.h>
 #endif
 
+#ifdef CONFIG_COLDFIRE
+#include <asm/coldfire.h>
+#endif
+
 /*
  * Stack layout in 'ret_from_exception':
  *
@@ -26,6 +30,8 @@
  *     2C(sp) - sr
  *     2E(sp) - pc
  *     32(sp) - format & vector
+ *     36(sp) - MMUSR (Coldfire only)
+ *     3A(sp) - MMUAR (Coldfire only)
  */
 
 /*
@@ -65,6 +71,10 @@ LFLUSH_I_AND_D = 0x00000808
  * that the stack frame is NOT for syscall
  */
 .macro save_all_int
+#ifdef CONFIG_COLDFIRE
+       movel   MMUSR,%sp@-
+       movel   MMUAR,%sp@-
+#endif
        clrl    %sp@-           | stk_adj
        pea     -1:w            | orig d0
        movel   %d0,%sp@-       | d0
@@ -72,6 +82,10 @@ LFLUSH_I_AND_D = 0x00000808
 .endm
 
 .macro save_all_sys
+#ifdef CONFIG_COLDFIRE
+       movel   MMUSR,%sp@-
+       movel   MMUAR,%sp@-
+#endif
        clrl    %sp@-           | stk_adj
        movel   %d0,%sp@-       | orig d0
        movel   %d0,%sp@-       | d0
@@ -83,6 +97,9 @@ LFLUSH_I_AND_D = 0x00000808
        movel   %sp@+,%d0
        addql   #4,%sp          | orig d0
        addl    %sp@+,%sp       | stk adj
+#ifdef CONFIG_COLDFIRE
+       addql   #8,%sp          | MMUAR & MMUSR
+#endif
        rte
 .endm
 
diff --git a/arch/m68k/include/asm/mcf_5445x_cacheflush.h b/arch/m68k/include/asm/mcf_5445x_cacheflush.h
new file mode 100644 (file)
index 0000000..1e9836f
--- /dev/null
@@ -0,0 +1,447 @@
+/*
+ * arch/m68k/include/asm/mcf_5445x_cacheflush.h - Coldfire 5445x Cache
+ *
+ * Based on arch/m68k/include/asm/cacheflush.h
+ *
+ * Coldfire pieces by:
+ *   Kurt Mahan kmahan@freescale.com
+ *
+ * Copyright Freescale Semiconductor, Inc. 2007, 2008
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef M68K_CF_5445x_CACHEFLUSH_H
+#define M68K_CF_5445x_CACHEFLUSH_H
+
+#include <asm/mcfcache.h>
+
+/*
+ * Coldfire Cache Model
+ *
+ * The Coldfire processors use a Harvard architecture cache configured
+ * as four-way set associative.  The cache does not implement bus snooping
+ * so cache coherency with other masters must be maintained in software.
+ *
+ * The cache is managed via the CPUSHL instruction in conjunction with
+ * bits set in the CACR (cache control register).  Currently the code
+ * uses the CPUSHL enhancement which adds the ability to
+ * invalidate/clear/push a cacheline by physical address.  This feature
+ * is designated in the Hardware Configuration Register [D1-CPES].
+ *
+ * CACR Bits:
+ *     DPI[28]         cpushl invalidate disable for d-cache
+ *     IDPI[12]        cpushl invalidate disable for i-cache
+ *     SPA[14]         cpushl search by physical address
+ *     IVO[20]         cpushl invalidate only
+ *
+ * Random Terminology:
+ *  * invalidate = reset the cache line's valid bit
+ *  * push = generate a line-sized store of the data if its contents are marked
+ *          as modifed (the modified flag is cleared after the store)
+ *  * clear = push + invalidate
+ */
+
+/**
+ * flush_icache - Flush all of the instruction cache
+ */
+static inline void flush_icache(void)
+{
+       asm volatile("nop\n"
+                    "moveq%.l  #0,%%d0\n"
+                    "moveq%.l  #0,%%d1\n"
+                    "move%.l   %%d0,%%a0\n"
+                    "1:\n"
+                    "cpushl    %%ic,(%%a0)\n"
+                    "add%.l    #0x0010,%%a0\n"
+                    "addq%.l   #1,%%d1\n"
+                    "cmpi%.l   %0,%%d1\n"
+                    "bne       1b\n"
+                    "moveq%.l  #0,%%d1\n"
+                    "addq%.l   #1,%%d0\n"
+                    "move%.l   %%d0,%%a0\n"
+                    "cmpi%.l   #4,%%d0\n"
+                    "bne       1b\n"
+                    : : "i" (CACHE_SETS)
+                    : "a0", "d0", "d1");
+}
+
+/**
+ * flush_dcache - Flush all of the data cache
+ */
+static inline void flush_dcache(void)
+{
+       asm volatile("nop\n"
+                    "moveq%.l  #0,%%d0\n"
+                    "moveq%.l  #0,%%d1\n"
+                    "move%.l   %%d0,%%a0\n"
+                    "1:\n"
+                    "cpushl    %%dc,(%%a0)\n"
+                    "add%.l    #0x0010,%%a0\n"
+                    "addq%.l   #1,%%d1\n"
+                    "cmpi%.l   %0,%%d1\n"
+                    "bne       1b\n"
+                    "moveq%.l  #0,%%d1\n"
+                    "addq%.l   #1,%%d0\n"
+                    "move%.l   %%d0,%%a0\n"
+                    "cmpi%.l   #4,%%d0\n"
+                    "bne       1b\n"
+                    : : "i" (CACHE_SETS)
+                    : "a0", "d0", "d1");
+}
+
+/**
+ * flush_bcache - Flush all of both caches
+ */
+static inline void flush_bcache(void)
+{
+       asm volatile("nop\n"
+                    "moveq%.l  #0,%%d0\n"
+                    "moveq%.l  #0,%%d1\n"
+                    "move%.l   %%d0,%%a0\n"
+                    "1:\n"
+                    "cpushl    %%bc,(%%a0)\n"
+                    "add%.l    #0x0010,%%a0\n"
+                    "addq%.l   #1,%%d1\n"
+                    "cmpi%.l   %0,%%d1\n"
+                    "bne       1b\n"
+                    "moveq%.l  #0,%%d1\n"
+                    "addq%.l   #1,%%d0\n"
+                    "move%.l   %%d0,%%a0\n"
+                    "cmpi%.l   #4,%%d0\n"
+                    "bne       1b\n"
+                    : : "i" (CACHE_SETS)
+                    : "a0", "d0", "d1");
+}
+
+/**
+ * cf_cache_clear - invalidate cache
+ * @paddr: starting physical address
+ * @len: number of bytes
+ *
+ * Invalidate cache lines starting at paddr for len bytes.
+ * Those lines are not pushed.
+ */
+static inline void cf_cache_clear(unsigned long paddr, int len)
+{
+       /* number of lines */
+       len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
+       if (len == 0)
+               return;
+
+       /* align on set boundary */
+       paddr &= 0xfffffff0;
+
+       asm volatile("nop\n"
+                    "move%.l   %2,%%d0\n"
+                    "or%.l     %3,%%d0\n"
+                    "movec     %%d0,%%cacr\n"
+                    "move%.l   %0,%%a0\n"
+                    "move%.l   %1,%%d0\n"
+                    "1:\n"
+                    "cpushl    %%bc,(%%a0)\n"
+                    "lea       0x10(%%a0),%%a0\n"
+                    "subq%.l   #1,%%d0\n"
+                    "bne%.b    1b\n"
+                    "movec     %2,%%cacr\n"
+                    : : "a" (paddr), "r" (len),
+                        "r" (shadow_cacr),
+                        "i" (CF_CACR_SPA+CF_CACR_IVO)
+                    : "a0", "d0");
+}
+
+/**
+ * cf_cache_push - Push dirty cache out with no invalidate
+ * @paddr: starting physical address
+ * @len: number of bytes
+ *
+ * Push the any dirty lines starting at paddr for len bytes.
+ * Those lines are not invalidated.
+ */
+static inline void cf_cache_push(unsigned long paddr, int len)
+{
+       /* number of lines */
+       len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
+       if (len == 0)
+               return;
+
+       /* align on set boundary */
+       paddr &= 0xfffffff0;
+
+       asm volatile("nop\n"
+                    "move%.l   %2,%%d0\n"
+                    "or%.l     %3,%%d0\n"
+                    "movec     %%d0,%%cacr\n"
+                    "move%.l   %0,%%a0\n"
+                    "move%.l   %1,%%d0\n"
+                    "1:\n"
+                    "cpushl    %%bc,(%%a0)\n"
+                    "lea       0x10(%%a0),%%a0\n"
+                    "subq%.l   #1,%%d0\n"
+                    "bne.b     1b\n"
+                    "movec     %2,%%cacr\n"
+                    : : "a" (paddr), "r" (len),
+                        "r" (shadow_cacr),
+                        "i" (CF_CACR_SPA+CF_CACR_DPI+CF_CACR_IDPI)
+                    : "a0", "d0");
+}
+
+/**
+ * cf_cache_flush - Push dirty cache out and invalidate
+ * @paddr: starting physical address
+ * @len: number of bytes
+ *
+ * Push the any dirty lines starting at paddr for len bytes and
+ * invalidate those lines.
+ */
+static inline void cf_cache_flush(unsigned long paddr, int len)
+{
+       /* number of lines */
+       len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
+       if (len == 0)
+               return;
+
+       /* align on set boundary */
+       paddr &= 0xfffffff0;
+
+       asm volatile("nop\n"
+                    "move%.l   %2,%%d0\n"
+                    "or%.l     %3,%%d0\n"
+                    "movec     %%d0,%%cacr\n"
+                    "move%.l   %0,%%a0\n"
+                    "move%.l   %1,%%d0\n"
+                    "1:\n"
+                    "cpushl    %%bc,(%%a0)\n"
+                    "lea       0x10(%%a0),%%a0\n"
+                    "subq%.l   #1,%%d0\n"
+                    "bne.b     1b\n"
+                    "movec     %2,%%cacr\n"
+                    : : "a" (paddr), "r" (len),
+                        "r" (shadow_cacr),
+                        "i" (CF_CACR_SPA)
+                    : "a0", "d0");
+}
+
+/**
+ * cf_cache_flush_range - Push dirty data/inst cache in range out and invalidate
+ * @vstart - starting virtual address
+ * @vend: ending virtual address
+ *
+ * Push the any dirty data/instr lines starting at paddr for len bytes and
+ * invalidate those lines.
+ */
+static inline void cf_cache_flush_range(unsigned long vstart, unsigned long vend)
+{
+       int len;
+
+       /* align on set boundary */
+       vstart &= 0xfffffff0;
+       vend = PAGE_ALIGN((vend + (CACHE_LINE_SIZE-1))) & 0xfffffff0;
+       len = vend - vstart;
+       if (len == 0)
+               return;
+       vstart = __pa(vstart);
+       vend = vstart + len;
+
+       asm volatile("nop\n"
+                    "move%.l   %2,%%d0\n"
+                    "or%.l     %3,%%d0\n"
+                    "movec     %%d0,%%cacr\n"
+                    "move%.l   %0,%%a0\n"
+                    "move%.l   %1,%%a1\n"
+                    "1:\n"
+                    "cpushl    %%bc,(%%a0)\n"
+                    "lea       0x10(%%a0),%%a0\n"
+                    "cmpa%.l   %%a0,%%a1\n"
+                    "bne.b     1b\n"
+                    "movec     %2,%%cacr\n"
+                    : /* no return */
+                    : "a" (vstart), "a" (vend),
+                      "r" (shadow_cacr),
+                      "i" (CF_CACR_SPA)
+                    : "a0", "a1", "d0");
+}
+
+/**
+ * cf_dcache_flush_range - Push dirty data cache in range out and invalidate
+ * @vstart - starting virtual address
+ * @vend: ending virtual address
+ *
+ * Push the any dirty data lines starting at paddr for len bytes and
+ * invalidate those lines.
+ */
+static inline void cf_dcache_flush_range(unsigned long vstart, unsigned long vend)
+{
+       /* align on set boundary */
+       vstart &= 0xfffffff0;
+       vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
+
+       asm volatile("nop\n"
+                    "move%.l   %2,%%d0\n"
+                    "or%.l     %3,%%d0\n"
+                    "movec     %%d0,%%cacr\n"
+                    "move%.l   %0,%%a0\n"
+                    "move%.l   %1,%%a1\n"
+                    "1:\n"
+                    "cpushl    %%dc,(%%a0)\n"
+                    "lea       0x10(%%a0),%%a0\n"
+                    "cmpa%.l   %%a0,%%a1\n"
+                    "bne.b     1b\n"
+                    "movec     %2,%%cacr\n"
+                    : /* no return */
+                    : "a" (__pa(vstart)), "a" (__pa(vend)),
+                      "r" (shadow_cacr),
+                      "i" (CF_CACR_SPA)
+                    : "a0", "a1", "d0");
+}
+
+/**
+ * cf_icache_flush_range - Push dirty inst cache in range out and invalidate
+ * @vstart - starting virtual address
+ * @vend: ending virtual address
+ *
+ * Push the any dirty instr lines starting at paddr for len bytes and
+ * invalidate those lines.  This should just be an invalidate since you
+ * shouldn't be able to have dirty instruction cache.
+ */
+static inline void cf_icache_flush_range(unsigned long vstart, unsigned long vend)
+{
+       /* align on set boundary */
+       vstart &= 0xfffffff0;
+       vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
+
+       asm volatile("nop\n"
+                    "move%.l   %2,%%d0\n"
+                    "or%.l     %3,%%d0\n"
+                    "movec     %%d0,%%cacr\n"
+                    "move%.l   %0,%%a0\n"
+                    "move%.l   %1,%%a1\n"
+                    "1:\n"
+                    "cpushl    %%ic,(%%a0)\n"
+                    "lea       0x10(%%a0),%%a0\n"
+                    "cmpa%.l   %%a0,%%a1\n"
+                    "bne.b     1b\n"
+                    "movec     %2,%%cacr\n"
+                    : /* no return */
+                    : "a" (__pa(vstart)), "a" (__pa(vend)),
+                      "r" (shadow_cacr),
+                      "i" (CF_CACR_SPA)
+                    : "a0", "a1", "d0");
+}
+
+/**
+ * flush_cache_mm - Flush an mm_struct
+ * @mm: mm_struct to flush
+ */
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+       if (mm == current->mm)
+               flush_bcache();
+}
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+/**
+ * flush_cache_range - Flush a cache range
+ * @vma: vma struct
+ * @start: Starting address
+ * @end: Ending address
+ *
+ * flush_cache_range must be a macro to avoid a dependency on
+ * linux/mm.h which includes this file.
+ */
+static inline void flush_cache_range(struct vm_area_struct *vma,
+       unsigned long start, unsigned long end)
+{
+       if (vma->vm_mm == current->mm)
+               cf_cache_flush_range(start, end);
+}
+
+/**
+ * flush_cache_page - Flush a page of the cache
+ * @vma: vma struct
+ * @vmaddr:
+ * @pfn: page numer
+ *
+ * flush_cache_page must be a macro to avoid a dependency on
+ * linux/mm.h which includes this file.
+ */
+static inline void flush_cache_page(struct vm_area_struct *vma,
+       unsigned long vmaddr, unsigned long pfn)
+{
+       if (vma->vm_mm == current->mm)
+               cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
+}
+
+/**
+ * __flush_page_to_ram - Push a page out of the cache
+ * @vaddr: Virtual address at start of page
+ *
+ * Push the page at kernel virtual address *vaddr* and clear
+ * the icache.
+ */
+static inline void __flush_page_to_ram(void *vaddr)
+{
+       asm volatile("nop\n"
+                    "move%.l   %2,%%d0\n"
+                    "or%.l     %3,%%d0\n"
+                    "movec     %%d0,%%cacr\n"
+                    "move%.l   %0,%%d0\n"
+                    "and%.l    #0xfffffff0,%%d0\n"
+                    "move%.l   %%d0,%%a0\n"
+                    "move%.l   %1,%%d0\n"
+                    "1:\n"
+                    "cpushl    %%bc,(%%a0)\n"
+                    "lea       0x10(%%a0),%%a0\n"
+                    "subq%.l   #1,%%d0\n"
+                    "bne.b     1b\n"
+                    "movec     %2,%%cacr\n"
+                    : : "a" (__pa(vaddr)), "i" (PAGE_SIZE / CACHE_LINE_SIZE),
+                        "r" (shadow_cacr), "i" (CF_CACR_SPA)
+                    : "a0", "d0");
+}
+
+/*
+ * Various defines for the kernel.
+ */
+
+extern void cache_clear(unsigned long paddr, int len);
+extern void cache_push(unsigned long paddr, int len);
+extern void flush_icache_range(unsigned long address, unsigned long endaddr);
+
+#define flush_cache_all()                      flush_bcache()
+#define flush_cache_vmap(start, end)           flush_bcache()
+#define flush_cache_vunmap(start, end)         flush_bcache()
+
+#define flush_dcache_range(vstart, vend)       cf_dcache_flush_range(vstart, vend)
+#define flush_dcache_page(page)                        __flush_page_to_ram(page_address(page))
+#define flush_dcache_mmap_lock(mapping)                do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
+
+#define flush_icache_page(vma, page)           __flush_page_to_ram(page_address(page))
+
+/**
+ * copy_to_user_page - Copy memory to user page
+ */
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+                                    struct page *page, unsigned long vaddr,
+                                    void *dst, void *src, int len)
+{
+       memcpy(dst, src, len);
+       cf_cache_flush(page_to_phys(page), PAGE_SIZE);
+}
+
+/**
+ * copy_from_user_page - Copy memory from user page
+ */
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+                                      struct page *page, unsigned long vaddr,
+                                      void *dst, void *src, int len)
+{
+       cf_cache_flush(page_to_phys(page), PAGE_SIZE);
+       memcpy(dst, src, len);
+}
+
+#endif /* M68K_CF_5445x_CACHEFLUSH_H */
diff --git a/arch/m68k/include/asm/mcf_cacheflush_m547x_8x.h b/arch/m68k/include/asm/mcf_cacheflush_m547x_8x.h
new file mode 100644 (file)
index 0000000..7898c82
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * arch/m68k/include/asm/mcf_m547x_8x_cacheflush.h - Coldfire 547x/548x Cache
+ *
+ * Based on arch/m68k/include/asm/cacheflush.h
+ *
+ * Coldfire pieces by:
+ *   Kurt Mahan kmahan@freescale.com
+ *
+ * Copyright Freescale Semiconductor, Inc. 2007, 2008
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef _M68K_MCF_M547X_8X_CACHEFLUSH_H
+#define _M68K_MCF_M547X_8X_CACHEFLUSH_H
+
+/*
+ * Cache handling functions
+ */
+
+#define flush_icache()                                         \
+({                                                             \
+  unsigned long set;                                           \
+  unsigned long start_set;                                     \
+  unsigned long end_set;                                       \
+                                                               \
+  start_set = 0;                                               \
+  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
+                                                               \
+  for (set = start_set; set <= end_set; set += (0x10 - 3)) {   \
+    asm volatile("cpushl %%ic,(%0)\n"                          \
+                 "\taddq%.l #1,%0\n"                           \
+                 "\tcpushl %%ic,(%0)\n"                                \
+                 "\taddq%.l #1,%0\n"                           \
+                 "\tcpushl %%ic,(%0)\n"                                \
+                 "\taddq%.l #1,%0\n"                           \
+                 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));               \
+  }                                                            \
+})
+
+#define flush_dcache()                                         \
+({                                                             \
+  unsigned long set;                                           \
+  unsigned long start_set;                                     \
+  unsigned long end_set;                                       \
+                                                               \
+  start_set = 0;                                               \
+  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
+                                                               \
+  for (set = start_set; set <= end_set; set += (0x10 - 3)) {   \
+    asm volatile("cpushl %%dc,(%0)\n"                          \
+                 "\taddq%.l #1,%0\n"                           \
+                 "\tcpushl %%dc,(%0)\n"                                \
+                 "\taddq%.l #1,%0\n"                           \
+                 "\tcpushl %%dc,(%0)\n"                                \
+                 "\taddq%.l #1,%0\n"                           \
+                 "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set));               \
+  }                                                            \
+})
+
+#define flush_bcache()                                         \
+({                                                             \
+  unsigned long set;                                           \
+  unsigned long start_set;                                     \
+  unsigned long end_set;                                       \
+                                                               \
+  start_set = 0;                                               \
+  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
+                                                               \
+  for (set = start_set; set <= end_set; set += (0x10 - 3)) {   \
+    asm volatile("cpushl %%bc,(%0)\n"                          \
+                 "\taddq%.l #1,%0\n"                           \
+                 "\tcpushl %%bc,(%0)\n"                                \
+                 "\taddq%.l #1,%0\n"                           \
+                 "\tcpushl %%bc,(%0)\n"                                \
+                 "\taddq%.l #1,%0\n"                           \
+                 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));               \
+  }                                                            \
+})
+
+/*
+ * invalidate the cache for the specified memory range.
+ * It starts at the physical address specified for
+ * the given number of bytes.
+ */
+extern void cache_clear(unsigned long paddr, int len);
+/*
+ * push any dirty cache in the specified memory range.
+ * It starts at the physical address specified for
+ * the given number of bytes.
+ */
+extern void cache_push(unsigned long paddr, int len);
+
+/*
+ * push and invalidate pages in the specified user virtual
+ * memory range.
+ */
+extern void cache_push_v(unsigned long vaddr, int len);
+
+/* This is needed whenever the virtual mapping of the current
+   process changes.  */
+
+/**
+ * flush_cache_mm - Flush an mm_struct
+ * @mm: mm_struct to flush
+ */
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+       if (mm == current->mm)
+               flush_bcache();
+}
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+#define flush_cache_all()              flush_bcache()
+
+/**
+ * flush_cache_range - Flush a cache range
+ * @vma: vma struct
+ * @start: Starting address
+ * @end: Ending address
+ *
+ * flush_cache_range must be a macro to avoid a dependency on
+ * linux/mm.h which includes this file.
+ */
+static inline void flush_cache_range(struct vm_area_struct *vma,
+       unsigned long start, unsigned long end)
+{
+       if (vma->vm_mm == current->mm)
+               flush_bcache();
+//             cf_cache_flush_range(start, end);
+}
+
+/**
+ * flush_cache_page - Flush a page of the cache
+ * @vma: vma struct
+ * @vmaddr:
+ * @pfn: page numer
+ *
+ * flush_cache_page must be a macro to avoid a dependency on
+ * linux/mm.h which includes this file.
+ */
+static inline void flush_cache_page(struct vm_area_struct *vma,
+       unsigned long vmaddr, unsigned long pfn)
+{
+       if (vma->vm_mm == current->mm)
+               flush_bcache();
+//             cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
+}
+
+/* Push the page at kernel virtual address and clear the icache */
+/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
+#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
+extern inline void __flush_page_to_ram(void *address)
+{
+  unsigned long set;
+  unsigned long start_set;
+  unsigned long end_set;
+  unsigned long addr = (unsigned long) address;
+
+  addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
+
+  start_set = addr & _ICACHE_SET_MASK;
+  end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
+
+  if (start_set > end_set) {
+    /* from the begining to the lowest address */
+    for (set = 0; set <= end_set; set += (0x10 - 3)) {
+      asm volatile("cpushl %%bc,(%0)\n"
+                   "\taddq%.l #1,%0\n"
+                   "\tcpushl %%bc,(%0)\n"
+                   "\taddq%.l #1,%0\n"
+                   "\tcpushl %%bc,(%0)\n"
+                   "\taddq%.l #1,%0\n"
+                   "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
+    }
+    /* next loop will finish the cache ie pass the hole */
+    end_set = LAST_ICACHE_ADDR;    
+  }
+  for (set = start_set; set <= end_set; set += (0x10 - 3)) {
+    asm volatile("cpushl %%bc,(%0)\n"
+                 "\taddq%.l #1,%0\n"
+                 "\tcpushl %%bc,(%0)\n"
+                 "\taddq%.l #1,%0\n"
+                 "\tcpushl %%bc,(%0)\n"
+                 "\taddq%.l #1,%0\n"
+                 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
+  }
+}
+
+/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
+#define flush_dcache_page(page)                        \
+       __flush_page_to_ram((void *) page_address(page))
+#define flush_icache_page(vma,pg)              \
+       __flush_page_to_ram((void *) page_address(pg))
+#define flush_icache_user_range(adr,len)       do { } while (0)
+/* NL */
+#define flush_icache_user_page(vma,page,addr,len)      do { } while (0)
+
+/* Push n pages at kernel virtual address and clear the icache */
+/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
+extern inline void flush_icache_range (unsigned long address,
+                                      unsigned long endaddr)
+{
+  unsigned long set;
+  unsigned long start_set;
+  unsigned long end_set;
+
+  start_set = address & _ICACHE_SET_MASK;
+  end_set = endaddr & _ICACHE_SET_MASK;
+
+  if (start_set > end_set) {
+    /* from the begining to the lowest address */
+    for (set = 0; set <= end_set; set += (0x10 - 3)) {
+      asm volatile("cpushl %%ic,(%0)\n"
+                   "\taddq%.l #1,%0\n"
+                   "\tcpushl %%ic,(%0)\n"
+                   "\taddq%.l #1,%0\n"
+                   "\tcpushl %%ic,(%0)\n"
+                   "\taddq%.l #1,%0\n"
+                   "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
+    }
+    /* next loop will finish the cache ie pass the hole */
+    end_set = LAST_ICACHE_ADDR;    
+  }
+  for (set = start_set; set <= end_set; set += (0x10 - 3)) {
+    asm volatile("cpushl %%ic,(%0)\n"
+                 "\taddq%.l #1,%0\n"
+                 "\tcpushl %%ic,(%0)\n"
+                 "\taddq%.l #1,%0\n"
+                 "\tcpushl %%ic,(%0)\n"
+                 "\taddq%.l #1,%0\n"
+                 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
+  }
+}
+
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+                                    struct page *page, unsigned long vaddr,
+                                    void *dst, void *src, int len)
+{
+       memcpy(dst, src, len);
+       flush_icache_user_page(vma, page, vaddr, len);
+}
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+                                      struct page *page, unsigned long vaddr,
+                                      void *dst, void *src, int len)
+{
+       memcpy(dst, src, len);
+}
+
+#define flush_cache_vmap(start, end)           flush_cache_all()
+#define flush_cache_vunmap(start, end)         flush_cache_all()
+#define flush_dcache_mmap_lock(mapping)                do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
+
+#endif /* _M68K_CACHEFLUSH_CODLFIRE_M547X_8X_H */
index f49dfc09f70a6949375cd9c2d628373762fc4b49..6ffd51937f2b64a8ecea3d92bb5d6a45e2489729 100644 (file)
@@ -7,11 +7,10 @@
  */
 
 /****************************************************************************/
-#ifndef        __M68KNOMMU_MCFCACHE_H
-#define        __M68KNOMMU_MCFCACHE_H
+#ifndef        __M68K_MCFCACHE_H
+#define        __M68K_MCFCACHE_H
 /****************************************************************************/
 
-
 /*
  *     The different ColdFire families have different cache arrangments.
  *     Everything from a small instruction only cache, to configurable
  *     harvard style separate instruction and data caches.
  */
 
+/* ========================================================================= */
+/* NoMMU Coldfire                                                            */
+/* ========================================================================= */
+
 #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || defined(CONFIG_M5272)
 /*
  *     Simple version 2 core cache. These have instruction cache only,
 .endm
 #endif /* CONFIG_M520x */
 
+/* ========================================================================= */
+/* MMU Coldfire                                                              */
+/* ========================================================================= */
+
+#if defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X)
+
+/*
+ * CACR Cache Control Register
+ */
+#define CF_CACR_DEC         (0x80000000) /* Data Cache Enable                */
+#define CF_CACR_DW          (0x40000000) /* Data default Write-protect       */
+#define CF_CACR_DESB        (0x20000000) /* Data Enable Store Buffer         */
+#define CF_CACR_DPI         (0x10000000) /* Data Disable CPUSHL Invalidate   */
+#define CF_CACR_DHLCK       (0x08000000) /* 1/2 Data Cache Lock Mode         */
+#define CF_CACR_DDCM_00     (0x00000000) /* Cacheable writethrough imprecise */
+#define CF_CACR_DDCM_01     (0x02000000) /* Cacheable copyback               */
+#define CF_CACR_DDCM_10     (0x04000000) /* Noncacheable precise             */
+#define CF_CACR_DDCM_11     (0x06000000) /* Noncacheable imprecise           */
+#define CF_CACR_DCINVA      (0x01000000) /* Data Cache Invalidate All        */
+#define CF_CACR_DDSP        (0x00800000) /* Data default supervisor-protect  */
+#define CF_CACR_IVO         (0x00100000) /* Invalidate only                  */
+#define CF_CACR_BEC         (0x00080000) /* Branch Cache Enable              */
+#define CF_CACR_BCINVA      (0x00040000) /* Branch Cache Invalidate All      */
+#define CF_CACR_IEC         (0x00008000) /* Instruction Cache Enable         */
+#define CF_CACR_SPA         (0x00004000) /* Search by Physical Address       */
+#define CF_CACR_DNFB        (0x00002000) /* Default cache-inhibited fill buf */
+#define CF_CACR_IDPI        (0x00001000) /* Instr Disable CPUSHL Invalidate  */
+#define CF_CACR_IHLCK       (0x00000800) /* 1/2 Instruction Cache Lock Mode  */
+#define CF_CACR_IDCM        (0x00000400) /* Noncacheable Instr default mode  */
+#define CF_CACR_ICINVA      (0x00000100) /* Instr Cache Invalidate All       */
+#define CF_CACR_IDSP       (0x00000080) /* Ins default supervisor-protect   */
+#define CF_CACR_EUSP        (0x00000020) /* Switch stacks in user mode       */
+
+#ifdef CONFIG_M5445X
+/*
+ * M5445x Cache Configuration
+ *     - cache line size is 16 bytes
+ *     - cache is 4-way set associative
+ *     - each cache has 256 sets (64k / 16bytes / 4way)
+ *     - I-Cache size is 16KB
+ *     - D-Cache size is 16KB
+ */
+#define ICACHE_SIZE 0x4000             /* instruction - 16k */
+#define DCACHE_SIZE 0x4000             /* data - 16k */
+
+#define CACHE_LINE_SIZE 0x0010         /* 16 bytes */
+#define CACHE_SETS 0x0100              /* 256 sets */
+#define CACHE_WAYS 0x0004              /* 4 way */
+
+#define CACHE_DISABLE_MODE     (CF_CACR_DCINVA+        \
+                                CF_CACR_BCINVA+        \
+                                CF_CACR_ICINVA)
+
+#ifndef CONFIG_M5445X_DISABLE_CACHE
+#define CACHE_INITIAL_MODE     (CF_CACR_DEC+           \
+                                CF_CACR_BEC+           \
+                                CF_CACR_IEC+           \
+                                CF_CACR_DESB+          \
+                                CF_CACR_EUSP)
+#else
+/* cache disabled for testing */
+#define CACHE_INITIAL_MODE     (CF_CACR_EUSP)
+#endif /* CONFIG_M5445X_DISABLE_CACHE */
+
+#elif defined(CONFIG_M547X_8X)
+/*
+ * M547x/M548x Cache Configuration
+ *     - cache line size is 16 bytes
+ *     - cache is 4-way set associative
+ *     - each cache has 512 sets (128k / 16bytes / 4way)
+ *     - I-Cache size is 32KB
+ *     - D-Cache size is 32KB
+ */
+#define ICACHE_SIZE 0x8000             /* instruction - 32k */
+#define DCACHE_SIZE 0x8000             /* data - 32k */
+
+#define CACHE_LINE_SIZE 0x0010         /* 16 bytes */
+#define CACHE_SETS 0x0200              /* 512 sets */
+#define CACHE_WAYS 0x0004              /* 4 way */
+
+/* in for the old cpushl caching code */
+#define _DCACHE_SET_MASK ((DCACHE_SIZE/64-1)<<CACHE_WAYS)
+#define _ICACHE_SET_MASK ((ICACHE_SIZE/64-1)<<CACHE_WAYS)
+#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
+#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
+
+#define CACHE_DISABLE_MODE     (CF_CACR_DCINVA+        \
+                                CF_CACR_BCINVA+        \
+                                CF_CACR_ICINVA)
+
+#define CACHE_INITIAL_MODE     (CF_CACR_DEC+           \
+                                CF_CACR_BEC+           \
+                                CF_CACR_IEC+           \
+                                CF_CACR_DESB+          \
+                                CF_CACR_EUSP)
+#endif /* CONFIG_M547X_8X */
+
+#ifndef __ASSEMBLY__
+
+extern unsigned long shadow_cacr;
+extern void cacr_set(unsigned long x);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_M5445X || CONFIG_M547X_8X */
+
 /****************************************************************************/
-#endif /* __M68KNOMMU_MCFCACHE_H */
+#endif /* __M68K_MCFCACHE_H */
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h
new file mode 100644 (file)
index 0000000..bc6e937
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Definitions for Coldfire V4e MMU
+ */
+#include <asm/movs.h>
+
+#ifndef __M68K_MCFMMU_H
+#define __M68K_MCFMMU_H
+
+
+#define MMU_BASE 0xE1000000
+
+
+#define MMUCR (MMU_BASE+0x00)
+#define MMUCR_ASMN  1
+#define MMUCR_ASM   (1<<MMUCR_ASMN)
+#define MMUCR_ENN   0
+#define MMUCR_EN    (1<<MMUCR_ENN)
+
+#define MMUOR REG16(MMU_BASE+0x04+0x02)
+#define MMUOR_AAN   16
+#define MMUOR_AA    (0xffff<<MMUOR_AAN)
+#define MMUOR_STLBN 8
+#define MMUOR_STLB  (1<<MMUOR_STLBN)
+#define MMUOR_CAN   7
+#define MMUOR_CA    (1<<MMUOR_CAN)
+#define MMUOR_CNLN  6
+#define MMUOR_CNL   (1<<MMUOR_CNLN)
+#define MMUOR_CASN  5
+#define MMUOR_CAS   (1<<MMUOR_CASN)
+#define MMUOR_ITLBN 4
+#define MMUOR_ITLB  (1<<MMUOR_ITLBN)
+#define MMUOR_ADRN  3
+#define MMUOR_ADR   (1<<MMUOR_ADRN)
+#define MMUOR_RWN   2
+#define MMUOR_RW    (1<<MMUOR_RWN)
+#define MMUOR_ACCN  1
+#define MMUOR_ACC   (1<<MMUOR_ACCN)
+#define MMUOR_UAAN  0
+#define MMUOR_UAA   (1<<MMUOR_UAAN)
+
+#define MMUSR REG32(MMU_BASE+0x08)
+#define MMUSR_SPFN  5
+#define MMUSR_SPF   (1<<MMUSR_SPFN)
+#define MMUSR_RFN   4
+#define MMUSR_RF    (1<<MMUSR_RFN)
+#define MMUSR_WFN   3
+#define MMUSR_WF    (1<<MMUSR_WFN)
+#define MMUSR_HITN  1
+#define MMUSR_HIT   (1<<MMUSR_HITN)
+
+#define MMUAR REG32(MMU_BASE+0x10)
+#define MMUAR_VPN   1
+#define MMUAR_VP    (0xfffffffe)
+#define MMUAR_SN    0
+#define MMUAR_S     (1<<MMUAR_SN)
+
+#define MMUTR REG32(MMU_BASE+0x14)
+#define MMUTR_VAN   10
+#define MMUTR_VA    (0xfffffc00)
+#define MMUTR_IDN   2
+#define MMUTR_ID    (0xff<<MMUTR_IDN)
+#define MMUTR_SGN   1
+#define MMUTR_SG    (1<<MMUTR_SGN)
+#define MMUTR_VN    0
+#define MMUTR_V     (1<<MMUTR_VN)
+
+#define MMUDR REG32(MMU_BASE+0x18)
+#define MMUDR_PAN   10
+#define MMUDR_PA    (0xfffffc00)
+#define MMUDR_SZN   8
+#define MMUDR_SZ_MASK (0x2<<MMUDR_SZN)
+#define MMUDR_SZ1M  (0<<MMUDR_SZN)
+#define MMUDR_SZ4K  (1<<MMUDR_SZN)
+#define MMUDR_SZ8K  (2<<MMUDR_SZN)
+#define MMUDR_SZ16M (3<<MMUDR_SZN)
+#define MMUDR_CMN   6
+#define MMUDR_INC   (2<<MMUDR_CMN)
+#define MMUDR_IC    (0<<MMUDR_CMN)
+#define MMUDR_DWT   (0<<MMUDR_CMN)
+#define MMUDR_DCB   (1<<MMUDR_CMN)
+#define MMUDR_DNCP  (2<<MMUDR_CMN)
+#define MMUDR_DNCIP (3<<MMUDR_CMN)
+#define MMUDR_SPN   5
+#define MMUDR_SP    (1<<MMUDR_SPN)
+#define MMUDR_RN    4
+#define MMUDR_R     (1<<MMUDR_RN)
+#define MMUDR_WN    3
+#define MMUDR_W     (1<<MMUDR_WN)
+#define MMUDR_XN    2
+#define MMUDR_X     (1<<MMUDR_XN)
+#define MMUDR_LKN   1
+#define MMUDR_LK    (1<<MMUDR_LKN)
+
+
+#ifndef __ASSEMBLY__
+#define CF_PMEGS_NUM           256
+#define CF_INVALID_CONTEXT     255
+#define CF_PAGE_PGNUM_MASK     (PAGE_MASK)
+
+extern int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb,
+                      int extension_word);
+#endif /* __ASSEMBLY__*/
+
+#endif /* !__M68K_MCFMMU_H */