.globl auto_irqhandler_fixup
.globl user_irqvec_fixup, user_irqhandler_fixup
+#ifdef CONFIG_COLDFIRE
+.global sw_usp, sw_ksp
+#endif
+
.text
ENTRY(buserr)
SAVE_ALL_INT
jra .Lret_from_exception
do_trace_entry:
+#ifdef CONFIG_COLDFIRE
+ movel #-ENOSYS,%d1 /* needed for strace */
+ movel %d1,%sp@(PT_OFF_D0)
+#else
movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
+#endif
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
cmpl #NR_syscalls,%d0
jcs syscall
badsys:
+#ifdef CONFIG_COLDFIRE
+ movel #-ENOSYS,%d1
+ movel %d1,%sp@(PT_OFF_D0)
+#else
movel #-ENOSYS,%sp@(PT_OFF_D0)
+#endif
jra ret_from_syscall
do_trace_exit:
cmpl #NR_syscalls,%d0
jcc badsys
syscall:
+#ifdef CONFIG_COLDFIRE
+ movel #sys_call_table,%a0
+ asll #2,%d0
+ addl %d0,%a0
+ movel %a0@,%a0
+ jsr %a0@
+ movel %d0,%sp@(PT_OFF_D0) | save the return value
+#else
jbsr @(sys_call_table,%d0:l:4)@(0)
movel %d0,%sp@(PT_OFF_D0) | save the return value
+#endif
ret_from_syscall:
|oriw #0x0700,%sr
movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
syscall_exit_work:
btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1b | if so, skip resched, signals
+#ifdef CONFIG_COLDFIRE
+ btstl #15,%d0
+ jne do_trace_exit
+ btstl #14,%d0
+ jne do_delayed_trace
+ btstl #6,%d0
+ jne do_signal_return
+#else
lslw #1,%d0
jcs do_trace_exit
jmi do_delayed_trace
lslw #8,%d0
jmi do_signal_return
+#endif
pea resume_userspace
jra schedule
| only allow interrupts when we are really the last one on the
| kernel stack, otherwise stack overflow can occur during
| heavy interrupt load
+
+#ifdef CONFIG_COLDFIRE
+ movel %d0,%sp@-
+ move %sr,%d0
+ andl #ALLOWINT,%d0
+ move %d0,%sr
+ movel %sp@+,%d0
+#else
andw #ALLOWINT,%sr
+#endif
resume_userspace:
moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
exit_work:
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+#ifdef CONFIG_COLDFIRE
+ btstl #6,%d0
+ jne do_signal_return
+#else
lslb #1,%d0
jmi do_signal_return
+#endif
pea resume_userspace
jra schedule
ENTRY(auto_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
+#ifdef CONFIG_COLDFIRE
+ addql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
+ | put exception # in d0
+ movel %sp@(PT_OFF_FORMATVEC),%d0
+ swap %d0 | extract bits 25:18
+ lsrl #2,%d0
+ andl #0x0ff,%d0
+#else
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
subw #VEC_SPUR,%d0
+#endif
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
addql #8,%sp | pop parameters off stack
ret_from_interrupt:
+#ifdef CONFIG_COLDFIRE
+ subql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
+#else
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+#endif
jeq ret_from_last_interrupt
2: RESTORE_ALL
ALIGN
ret_from_last_interrupt:
+#ifdef CONFIG_COLDFIRE
+ moveb %sp@(PT_OFF_SR),%d0
+ andl #(~ALLOWINT>>8)&0xff,%d0
+#else
moveq #(~ALLOWINT>>8)&0xff,%d0
andb %sp@(PT_OFF_SR),%d0
+#endif
jne 2b
/* check if we need to do software interrupts */
ENTRY(user_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
+#ifdef CONFIG_COLDFIRE
+ addql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
+ | put exception # in d0
+ movel %sp@(PT_OFF_FORMATVEC),%d0
+user_irqvec_fixup = . + 2
+ swap %d0 | extract bits 25:18
+ lsrl #2,%d0
+ andl #0x0ff,%d0
+#else
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
user_irqvec_fixup = . + 2
- subw #VEC_USER,%d0
+ subw #VEC_SPUR,%d0
+#endif
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
jsr __m68k_handle_int | process the IRQ
addql #8,%sp | pop parameters off stack
+#ifdef CONFIG_COLDFIRE
+ subql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
+#else
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+#endif
jeq ret_from_last_interrupt
RESTORE_ALL
ENTRY(bad_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
+#ifdef CONFIG_COLDFIRE
+ addql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
+ | put exception # in d0
+ movel %sp@(PT_OFF_FORMATVEC),%d0
+ swap %d0 | extract bits 25:18
+ lsrl #2,%d0
+ andl #0x0ff,%d0
+#else
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+ | put exception # in d0
+ bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
+ subw #VEC_SPUR,%d0
+#endif
movel %sp,%sp@-
jsr handle_badint
addql #4,%sp
+#ifdef CONFIG_COLDFIRE
+ subql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
+#else
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+#endif
jeq ret_from_last_interrupt
RESTORE_ALL
* registers until their contents are no longer needed.
*/
+#ifdef CONFIG_COLDFIRE
+ /* save sr */
+ movew %sr,%d0
+ movew %d0,%a0@(TASK_THREAD+THREAD_SR)
+
+ /* On CF use %a1 to save usp */
+ movel %a1,%d0
+
+ /* save usp */
+ movel %usp,%a1
+ movel %a1,%a0@(TASK_THREAD+THREAD_USP)
+
+ movel %d0,%a1
+#else
/* save sr */
movew %sr,%a0@(TASK_THREAD+THREAD_SR)
/* it is better to use a movel here instead of a movew 8*) */
movec %usp,%d0
movel %d0,%a0@(TASK_THREAD+THREAD_USP)
+#endif
/* save non-scratch registers on stack */
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
/* save floating point context */
-#ifndef CONFIG_M68KFPU_EMU_ONLY
+#if !defined(CONFIG_M68KFPU_EMU_ONLY) && !defined(CONFIG_COLDFIRE)
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 3f
movel %a1,%curptr
/* restore floating point context */
-#ifndef CONFIG_M68KFPU_EMU_ONLY
+#if !defined(CONFIG_M68KFPU_EMU_ONLY) && !defined(CONFIG_COLDFIRE)
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 4f
movel %a1@(TASK_THREAD+THREAD_USP),%a0
movel %a0,%usp
+#ifdef CONFIG_COLDFIRE
+ /* restore status register */
+ movew %a1@(TASK_THREAD+THREAD_SR),%d0
+ movew %d0,%sr
+#else
/* restore fs (sfc,%dfc) */
movew %a1@(TASK_THREAD+THREAD_FS),%a0
movec %a0,%sfc
/* restore status register */
movew %a1@(TASK_THREAD+THREAD_SR),%sr
+#endif
rts
.data
ALIGN
+
+#ifdef CONFIG_COLDFIRE
+sw_ksp:
+ .long 0
+
+sw_usp:
+ .long 0
+#endif
+
sys_call_table:
.long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
.long sys_exit