2 * Low-level system-call handling, trap handlers and context-switching
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
29 #include <asm/unistd.h>
31 #include <linux/errno.h>
32 #include <asm/signal.h>
34 /* The size of a state save frame. */
35 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
37 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
38 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
40 #define C_ENTRY(name) .globl name; .align 4; name
43 * Various ways of setting and clearing BIP in flags reg.
44 * This is mucky, but necessary using microblaze version that
45 * allows msr ops to write to BIP
47 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
102 andi r11, r11, ~MSR_BIP
110 ori r11, r11, MSR_BIP
118 andi r11, r11, ~MSR_EIP
134 andi r11, r11, ~MSR_IE
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
168 andni r11, r11, (MSR_VMS|MSR_UMS)
174 /* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 /* turn on virtual protected mode save */
185 /* turn off virtual protected mode save and user mode save*/
188 rted r0, TOPHYS(1f); \
192 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
193 swi r5, r1, PTO+PT_R5; \
194 swi r6, r1, PTO+PT_R6; \
195 swi r7, r1, PTO+PT_R7; \
196 swi r8, r1, PTO+PT_R8; \
197 swi r9, r1, PTO+PT_R9; \
198 swi r10, r1, PTO+PT_R10; \
199 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
200 swi r12, r1, PTO+PT_R12; \
201 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
202 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
203 swi r15, r1, PTO+PT_R15; /* Save LP */ \
204 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
205 swi r19, r1, PTO+PT_R19; \
206 swi r20, r1, PTO+PT_R20; \
207 swi r21, r1, PTO+PT_R21; \
208 swi r22, r1, PTO+PT_R22; \
209 swi r23, r1, PTO+PT_R23; \
210 swi r24, r1, PTO+PT_R24; \
211 swi r25, r1, PTO+PT_R25; \
212 swi r26, r1, PTO+PT_R26; \
213 swi r27, r1, PTO+PT_R27; \
214 swi r28, r1, PTO+PT_R28; \
215 swi r29, r1, PTO+PT_R29; \
216 swi r30, r1, PTO+PT_R30; \
217 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
218 mfs r11, rmsr; /* save MSR */ \
220 swi r11, r1, PTO+PT_MSR;
222 #define RESTORE_REGS \
223 lwi r11, r1, PTO+PT_MSR; \
226 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
227 lwi r5, r1, PTO+PT_R5; \
228 lwi r6, r1, PTO+PT_R6; \
229 lwi r7, r1, PTO+PT_R7; \
230 lwi r8, r1, PTO+PT_R8; \
231 lwi r9, r1, PTO+PT_R9; \
232 lwi r10, r1, PTO+PT_R10; \
233 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
234 lwi r12, r1, PTO+PT_R12; \
235 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
236 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
237 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
238 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
239 lwi r19, r1, PTO+PT_R19; \
240 lwi r20, r1, PTO+PT_R20; \
241 lwi r21, r1, PTO+PT_R21; \
242 lwi r22, r1, PTO+PT_R22; \
243 lwi r23, r1, PTO+PT_R23; \
244 lwi r24, r1, PTO+PT_R24; \
245 lwi r25, r1, PTO+PT_R25; \
246 lwi r26, r1, PTO+PT_R26; \
247 lwi r27, r1, PTO+PT_R27; \
248 lwi r28, r1, PTO+PT_R28; \
249 lwi r29, r1, PTO+PT_R29; \
250 lwi r30, r1, PTO+PT_R30; \
251 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
258 * System calls are handled here.
261 * Syscall number in r12, args in r5-r10
264 * Trap entered via brki instruction, so BIP bit is set, and interrupts
265 * are masked. This is nice, means we don't have to CLI before state save
267 C_ENTRY(_user_exception):
268 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
269 addi r14, r14, 4 /* return address is 4 byte after call */
270 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
272 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
273 beqi r11, 1f; /* Jump ahead if coming from user */
274 /* Kernel-mode state save. */
275 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
277 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
278 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
280 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
283 addi r11, r0, 1; /* Was in kernel-mode. */
284 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
286 nop; /* Fill delay slot */
288 /* User-mode state save. */
290 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
291 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
293 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
294 /* calculate kernel stack pointer from task struct 8k */
295 addik r1, r1, THREAD_SIZE;
298 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
301 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
302 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
303 swi r11, r1, PTO+PT_R1; /* Store user SP. */
305 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
306 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
307 /* Save away the syscall number. */
308 swi r12, r1, PTO+PT_R0;
311 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
312 /* Jump to the appropriate function for the system call number in r12
313 * (r12 is not preserved), or return an error if r12 is not valid. The LP
314 * register should point to the location where
315 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
317 # Step into virtual mode.
323 add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */
324 lwi r11, r11, TS_THREAD_INFO /* get thread info */
325 lwi r11, r11, TI_FLAGS /* get flags in thread info */
326 andi r11, r11, _TIF_WORK_SYSCALL_MASK
329 addik r3, r0, -ENOSYS
330 swi r3, r1, PTO + PT_R3
331 brlid r15, do_syscall_trace_enter
332 addik r5, r1, PTO + PT_R0
334 # do_syscall_trace_enter returns the new syscall nr.
336 lwi r5, r1, PTO+PT_R5;
337 lwi r6, r1, PTO+PT_R6;
338 lwi r7, r1, PTO+PT_R7;
339 lwi r8, r1, PTO+PT_R8;
340 lwi r9, r1, PTO+PT_R9;
341 lwi r10, r1, PTO+PT_R10;
344 /* Jump to the appropriate function for the system call number in r12 (r12 is not preserved),
345 * or return an error if r12 is not valid. The LP register should point to the location where
346 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
347 /* See if the system call number is valid. */
348 addi r11, r12, -__NR_syscalls;
350 /* Figure out which function to use for this system call. */
351 /* Note Microblaze barrel shift is optional, so don't rely on it */
352 add r12, r12, r12; /* convert num -> ptr */
355 /* Trac syscalls and stored them to r0_ram */
356 lwi r3, r12, 0x400 + r0_ram
358 swi r3, r12, 0x400 + r0_ram
360 # Find and jump into the syscall handler.
361 lwi r12, r12, sys_call_table
362 la r15, r0, ret_from_trap-8 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
365 /* The syscall number is invalid, return an error. */
367 addi r3, r0, -ENOSYS;
368 rtsd r15,8; /* looks like a normal subroutine return */
372 /* Entry point used to return from a syscall/trap. */
373 /* We re-enable BIP bit before state restore */
374 C_ENTRY(ret_from_trap):
375 set_bip; /* Ints masked for state restore*/
376 lwi r11, r1, PTO+PT_MODE;
377 /* See if returning to kernel mode, if so, skip resched &c. */
380 /* We're returning to user mode, so check for various conditions that
381 * trigger rescheduling. */
382 # FIXME: Restructure all these flag checks.
383 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
384 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
385 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
386 andi r11, r11, _TIF_WORK_SYSCALL_MASK
389 swi r3, r1, PTO + PT_R3
390 swi r4, r1, PTO + PT_R4
391 brlid r15, do_syscall_trace_leave
392 addik r5, r1, PTO + PT_R0
393 lwi r3, r1, PTO + PT_R3
394 lwi r4, r1, PTO + PT_R4
397 /* We're returning to user mode, so check for various conditions that trigger rescheduling. */
398 /* Get current task ptr into r11 */
399 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
400 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
401 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
402 andi r11, r11, _TIF_NEED_RESCHED;
405 swi r3, r1, PTO + PT_R3; /* store syscall result */
406 swi r4, r1, PTO + PT_R4;
407 bralid r15, schedule; /* Call scheduler */
408 nop; /* delay slot */
409 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
410 lwi r4, r1, PTO + PT_R4;
412 /* Maybe handle a signal */
413 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
414 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
415 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
416 andi r11, r11, _TIF_SIGPENDING;
417 beqi r11, 1f; /* Signals to handle, handle them */
419 swi r3, r1, PTO + PT_R3; /* store syscall result */
420 swi r4, r1, PTO + PT_R4;
421 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
422 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
423 addi r7, r0, 1; /* Arg 3: int in_syscall */
424 bralid r15, do_signal; /* Handle any signals */
426 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
427 lwi r4, r1, PTO + PT_R4;
429 /* Finally, return to user state. */
430 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
431 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
432 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
436 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
437 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
440 /* Return to kernel state. */
444 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
447 TRAP_return: /* Make global symbol for debugging */
448 rtbd r14, 0; /* Instructions to return from an IRQ */
452 /* These syscalls need access to the struct pt_regs on the stack, so we
453 implement them in assembly (they're basically all wrappers anyway). */
455 C_ENTRY(sys_fork_wrapper):
456 addi r5, r0, SIGCHLD /* Arg 0: flags */
457 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
458 la r7, r1, PTO /* Arg 2: parent context */
459 add r8. r0, r0 /* Arg 3: (unused) */
460 add r9, r0, r0; /* Arg 4: (unused) */
461 add r10, r0, r0; /* Arg 5: (unused) */
462 brid do_fork /* Do real work (tail-call) */
465 /* This the initial entry point for a new child thread, with an appropriate
466 stack in place that makes it look the the child is in the middle of an
467 syscall. This function is actually `returned to' from switch_thread
468 (copy_thread makes ret_from_fork the return address in each new thread's
470 C_ENTRY(ret_from_fork):
471 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
472 add r3, r5, r0; /* switch_thread returns the prev task */
473 /* ( in the delay slot ) */
474 add r3, r0, r0; /* Child's fork call should return 0. */
475 brid ret_from_trap; /* Do normal trap return */
479 brid microblaze_vfork /* Do real work (tail-call) */
483 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
484 lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
485 1: la r7, r1, PTO; /* Arg 2: parent context */
486 add r8, r0, r0; /* Arg 3: (unused) */
487 add r9, r0, r0; /* Arg 4: (unused) */
488 add r10, r0, r0; /* Arg 5: (unused) */
489 brid do_fork /* Do real work (tail-call) */
493 la r8, r1, PTO; /* add user context as 4th arg */
494 brid microblaze_execve; /* Do real work (tail-call).*/
497 C_ENTRY(sys_rt_sigsuspend_wrapper):
498 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
499 swi r4, r1, PTO+PT_R4;
500 la r7, r1, PTO; /* add user context as 3rd arg */
501 brlid r15, sys_rt_sigsuspend; /* Do real work.*/
503 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
504 lwi r4, r1, PTO+PT_R4;
505 bri ret_from_trap /* fall through will not work here due to align */
508 C_ENTRY(sys_rt_sigreturn_wrapper):
509 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
510 swi r4, r1, PTO+PT_R4;
511 la r5, r1, PTO; /* add user context as 1st arg */
512 brlid r15, sys_rt_sigreturn /* Do real work */
514 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
515 lwi r4, r1, PTO+PT_R4;
516 bri ret_from_trap /* fall through will not work here due to align */
520 * HW EXCEPTION rutine start
524 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
525 set_bip; /*equalize initial state for all possible entries*/\
529 /* See if already in kernel mode.*/ \
530 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
531 beqi r11, 1f; /* Jump ahead if coming from user */\
532 /* Kernel-mode state save. */ \
533 /* Reload kernel stack-ptr. */ \
534 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
536 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
537 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
538 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
539 /* store return registers separately because \
540 * this macros is use for others exceptions */ \
541 swi r3, r1, PTO + PT_R3; \
542 swi r4, r1, PTO + PT_R4; \
544 /* PC, before IRQ/trap - this is one instruction above */ \
545 swi r17, r1, PTO+PT_PC; \
547 addi r11, r0, 1; /* Was in kernel-mode. */ \
548 swi r11, r1, PTO+PT_MODE; \
550 nop; /* Fill delay slot */ \
551 1: /* User-mode state save. */ \
552 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
553 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
555 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
556 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
559 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
560 /* store return registers separately because this macros \
561 * is use for others exceptions */ \
562 swi r3, r1, PTO + PT_R3; \
563 swi r4, r1, PTO + PT_R4; \
565 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
566 swi r17, r1, PTO+PT_PC; \
568 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
569 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
570 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
572 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
573 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
574 /* Save away the syscall number. */ \
575 swi r0, r1, PTO+PT_R0; \
578 C_ENTRY(full_exception_trap):
579 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
580 /* adjust exception address for privileged instruction
581 * for finding where is it */
583 SAVE_STATE /* Save registers */
584 /* FIXME this can be store directly in PT_ESR reg.
585 * I tested it but there is a fault */
586 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
587 la r15, r0, ret_from_exc - 8
588 la r5, r1, PTO /* parameter struct pt_regs * regs */
591 mfs r7, rfsr; /* save FSR */
593 la r12, r0, full_exception
599 * Unaligned data trap.
601 * Unaligned data trap last on 4k page is handled here.
603 * Trap entered via exception, so EE bit is set, and interrupts
604 * are masked. This is nice, means we don't have to CLI before state save
606 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
608 C_ENTRY(unaligned_data_trap):
609 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
610 SAVE_STATE /* Save registers.*/
611 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
612 la r15, r0, ret_from_exc-8
613 mfs r3, resr /* ESR */
615 mfs r4, rear /* EAR */
617 la r7, r1, PTO /* parameter struct pt_regs * regs */
618 la r12, r0, _unaligned_data_exception
620 rtbd r12, 0; /* interrupts enabled */
626 * If the real exception handler (from hw_exception_handler.S) didn't find
627 * the mapping for the process, then we're thrown here to handle such situation.
629 * Trap entered via exceptions, so EE bit is set, and interrupts
630 * are masked. This is nice, means we don't have to CLI before state save
632 * Build a standard exception frame for TLB Access errors. All TLB exceptions
633 * will bail out to this point if they can't resolve the lightweight TLB fault.
635 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
636 * void do_page_fault(struct pt_regs *regs,
637 * unsigned long address,
638 * unsigned long error_code)
640 /* data and intruction trap - which is choose is resolved int fault.c */
641 C_ENTRY(page_fault_data_trap):
642 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
643 SAVE_STATE /* Save registers.*/
644 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
645 la r15, r0, ret_from_exc-8
646 la r5, r1, PTO /* parameter struct pt_regs * regs */
647 mfs r6, rear /* parameter unsigned long address */
649 mfs r7, resr /* parameter unsigned long error_code */
651 la r12, r0, do_page_fault
653 rtbd r12, 0; /* interrupts enabled */
656 C_ENTRY(page_fault_instr_trap):
657 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
658 SAVE_STATE /* Save registers.*/
659 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
660 la r15, r0, ret_from_exc-8
661 la r5, r1, PTO /* parameter struct pt_regs * regs */
662 mfs r6, rear /* parameter unsigned long address */
664 ori r7, r0, 0 /* parameter unsigned long error_code */
665 la r12, r0, do_page_fault
667 rtbd r12, 0; /* interrupts enabled */
670 /* Entry point used to return from an exception. */
671 C_ENTRY(ret_from_exc):
672 set_bip; /* Ints masked for state restore*/
673 lwi r11, r1, PTO+PT_MODE;
674 bnei r11, 2f; /* See if returning to kernel mode, */
675 /* ... if so, skip resched &c. */
677 /* We're returning to user mode, so check for various conditions that
678 trigger rescheduling. */
679 /* Get current task ptr into r11 */
680 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
681 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
682 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
683 andi r11, r11, _TIF_NEED_RESCHED;
686 /* Call the scheduler before returning from a syscall/trap. */
687 bralid r15, schedule; /* Call scheduler */
688 nop; /* delay slot */
690 /* Maybe handle a signal */
691 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
692 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
693 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
694 andi r11, r11, _TIF_SIGPENDING;
695 beqi r11, 1f; /* Signals to handle, handle them */
698 * Handle a signal return; Pending signals should be in r18.
700 * Not all registers are saved by the normal trap/interrupt entry
701 * points (for instance, call-saved registers (because the normal
702 * C-compiler calling sequence in the kernel makes sure they're
703 * preserved), and call-clobbered registers in the case of
704 * traps), but signal handlers may want to examine or change the
705 * complete register state. Here we save anything not saved by
706 * the normal entry sequence, so that it may be safely restored
707 * (in a possibly modified form) after do_signal returns.
708 * store return registers separately because this macros is use
709 * for others exceptions */
710 swi r3, r1, PTO + PT_R3;
711 swi r4, r1, PTO + PT_R4;
712 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
713 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
714 addi r7, r0, 0; /* Arg 3: int in_syscall */
715 bralid r15, do_signal; /* Handle any signals */
717 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
718 lwi r4, r1, PTO+PT_R4;
720 /* Finally, return to user state. */
721 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
722 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
723 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
727 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
728 lwi r4, r1, PTO+PT_R4;
730 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
732 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
734 /* Return to kernel state. */
737 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
738 lwi r4, r1, PTO+PT_R4;
740 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
744 EXC_return: /* Make global symbol for debugging */
745 rtbd r14, 0; /* Instructions to return from an IRQ */
749 * HW EXCEPTION rutine end
753 * Hardware maskable interrupts.
755 * The stack-pointer (r1) should have already been saved to the memory
756 * location PER_CPU(ENTRY_SP).
759 /* MS: we are in physical address */
760 /* Save registers, switch to proper stack, convert SP to virtual.*/
761 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
762 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
763 /* MS: See if already in kernel mode. */
764 lwi r11, r0, TOPHYS(PER_CPU(KM));
765 beqi r11, 1f; /* MS: Jump ahead if coming from user */
767 /* Kernel-mode state save. */
769 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
770 /* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
771 swi r11, r1, (PT_R1 - PT_SIZE);
772 /* MS: restore r11 because of saving in SAVE_REGS */
773 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
775 /* MS: Make room on the stack -> activation record */
776 addik r1, r1, -STATE_SAVE_SIZE;
777 /* MS: store return registers separately because
778 * this macros is use for others exceptions */
779 swi r3, r1, PTO + PT_R3;
780 swi r4, r1, PTO + PT_R4;
783 addi r11, r0, 1; /* MS: Was in kernel-mode. */
784 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
786 nop; /* MS: Fill delay slot */
789 /* User-mode state save. */
790 /* MS: restore r11 -> FIXME move before SAVE_REG */
791 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
792 /* MS: get the saved current */
793 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
795 lwi r1, r1, TS_THREAD_INFO;
796 addik r1, r1, THREAD_SIZE;
799 addik r1, r1, -STATE_SAVE_SIZE;
800 swi r3, r1, PTO+PT_R3;
801 swi r4, r1, PTO+PT_R4;
804 swi r0, r1, PTO + PT_MODE;
805 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
806 swi r11, r1, PTO+PT_R1;
807 /* setup kernel mode to KM */
809 swi r11, r0, TOPHYS(PER_CPU(KM));
812 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
813 swi r0, r1, PTO + PT_R0;
818 la r15, r0, irq_call;
819 irq_call:rtbd r11, 0;
822 /* MS: we are in virtual mode */
824 lwi r11, r1, PTO + PT_MODE;
827 add r11, r0, CURRENT_TASK;
828 lwi r11, r11, TS_THREAD_INFO;
829 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
830 andi r11, r11, _TIF_NEED_RESCHED;
832 bralid r15, schedule;
833 nop; /* delay slot */
835 /* Maybe handle a signal */
836 5: add r11, r0, CURRENT_TASK;
837 lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
838 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
839 andi r11, r11, _TIF_SIGPENDING;
840 beqid r11, no_intr_resched
841 /* Handle a signal return; Pending signals should be in r18. */
842 addi r7, r0, 0; /* Arg 3: int in_syscall */
843 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
844 bralid r15, do_signal; /* Handle any signals */
845 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
847 /* Finally, return to user state. */
849 /* Disable interrupts, we are now committed to the state restore */
851 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
852 add r11, r0, CURRENT_TASK;
853 swi r11, r0, PER_CPU(CURRENT_SAVE);
856 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
857 lwi r4, r1, PTO + PT_R4;
859 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
860 lwi r1, r1, PT_R1 - PT_SIZE;
862 /* MS: Return to kernel state. */
863 2: VM_OFF /* MS: turn off MMU */
865 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
866 lwi r4, r1, PTO + PT_R4;
868 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
871 IRQ_return: /* MS: Make global symbol for debugging */
877 * We enter dbtrap in "BIP" (breakpoint) mode.
878 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
880 * however, wait to save state first
882 C_ENTRY(_debug_exception):
883 /* BIP bit is set on entry, no interrupts can occur */
884 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
886 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
887 set_bip; /*equalize initial state for all possible entries*/
890 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
891 beqi r11, 1f; /* Jump ahead if coming from user */
892 /* Kernel-mode state save. */
893 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
895 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
896 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
898 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
899 swi r3, r1, PTO + PT_R3;
900 swi r4, r1, PTO + PT_R4;
903 addi r11, r0, 1; /* Was in kernel-mode. */
904 swi r11, r1, PTO + PT_MODE;
906 nop; /* Fill delay slot */
907 1: /* User-mode state save. */
908 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
909 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
911 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
912 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
915 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
916 swi r3, r1, PTO + PT_R3;
917 swi r4, r1, PTO + PT_R4;
920 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
921 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
922 swi r11, r1, PTO+PT_R1; /* Store user SP. */
924 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
925 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
926 /* Save away the syscall number. */
927 swi r0, r1, PTO+PT_R0;
930 addi r5, r0, SIGTRAP /* send the trap signal */
931 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
932 addk r7, r0, r0 /* 3rd param zero */
935 la r11, r0, send_sig;
936 la r15, r0, dbtrap_call;
937 dbtrap_call: rtbd r11, 0;
940 set_bip; /* Ints masked for state restore*/
941 lwi r11, r1, PTO+PT_MODE;
944 /* Get current task ptr into r11 */
945 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
946 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
947 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
948 andi r11, r11, _TIF_NEED_RESCHED;
951 /* Call the scheduler before returning from a syscall/trap. */
953 bralid r15, schedule; /* Call scheduler */
954 nop; /* delay slot */
955 /* XXX Is PT_DTRACE handling needed here? */
956 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
958 /* Maybe handle a signal */
959 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
960 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
961 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
962 andi r11, r11, _TIF_SIGPENDING;
963 beqi r11, 1f; /* Signals to handle, handle them */
965 /* Handle a signal return; Pending signals should be in r18. */
966 /* Not all registers are saved by the normal trap/interrupt entry
967 points (for instance, call-saved registers (because the normal
968 C-compiler calling sequence in the kernel makes sure they're
969 preserved), and call-clobbered registers in the case of
970 traps), but signal handlers may want to examine or change the
971 complete register state. Here we save anything not saved by
972 the normal entry sequence, so that it may be safely restored
973 (in a possibly modified form) after do_signal returns. */
975 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
976 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
977 addi r7, r0, 0; /* Arg 3: int in_syscall */
978 bralid r15, do_signal; /* Handle any signals */
982 /* Finally, return to user state. */
983 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
984 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
985 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
989 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
990 lwi r4, r1, PTO+PT_R4;
992 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
995 lwi r1, r1, PT_R1 - PT_SIZE;
996 /* Restore user stack pointer. */
999 /* Return to kernel state. */
1002 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
1003 lwi r4, r1, PTO+PT_R4;
1005 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
1009 DBTRAP_return: /* Make global symbol for debugging */
1010 rtbd r14, 0; /* Instructions to return from an IRQ */
1016 /* prepare return value */
1019 /* save registers in cpu_context */
1020 /* use r11 and r12, volatile registers, as temp register */
1021 /* give start of cpu_context for previous process */
1022 addik r11, r5, TI_CPU_CONTEXT
1025 /* skip volatile registers.
1026 * they are saved on stack when we jumped to _switch_to() */
1027 /* dedicated registers */
1028 swi r13, r11, CC_R13
1029 swi r14, r11, CC_R14
1030 swi r15, r11, CC_R15
1031 swi r16, r11, CC_R16
1032 swi r17, r11, CC_R17
1033 swi r18, r11, CC_R18
1034 /* save non-volatile registers */
1035 swi r19, r11, CC_R19
1036 swi r20, r11, CC_R20
1037 swi r21, r11, CC_R21
1038 swi r22, r11, CC_R22
1039 swi r23, r11, CC_R23
1040 swi r24, r11, CC_R24
1041 swi r25, r11, CC_R25
1042 swi r26, r11, CC_R26
1043 swi r27, r11, CC_R27
1044 swi r28, r11, CC_R28
1045 swi r29, r11, CC_R29
1046 swi r30, r11, CC_R30
1047 /* special purpose registers */
1050 swi r12, r11, CC_MSR
1053 swi r12, r11, CC_EAR
1056 swi r12, r11, CC_ESR
1059 swi r12, r11, CC_FSR
1061 /* update r31, the current */
1062 lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
1063 /* stored it to current_save too */
1064 swi r31, r0, PER_CPU(CURRENT_SAVE)
1066 /* get new process' cpu context and restore */
1067 /* give me start where start context of next task */
1068 addik r11, r6, TI_CPU_CONTEXT
1070 /* non-volatile registers */
1071 lwi r30, r11, CC_R30
1072 lwi r29, r11, CC_R29
1073 lwi r28, r11, CC_R28
1074 lwi r27, r11, CC_R27
1075 lwi r26, r11, CC_R26
1076 lwi r25, r11, CC_R25
1077 lwi r24, r11, CC_R24
1078 lwi r23, r11, CC_R23
1079 lwi r22, r11, CC_R22
1080 lwi r21, r11, CC_R21
1081 lwi r20, r11, CC_R20
1082 lwi r19, r11, CC_R19
1083 /* dedicated registers */
1084 lwi r18, r11, CC_R18
1085 lwi r17, r11, CC_R17
1086 lwi r16, r11, CC_R16
1087 lwi r15, r11, CC_R15
1088 lwi r14, r11, CC_R14
1089 lwi r13, r11, CC_R13
1090 /* skip volatile registers */
1094 /* special purpose registers */
1095 lwi r12, r11, CC_FSR
1098 lwi r12, r11, CC_MSR
1106 brai 0x70; /* Jump back to FS-boot */
1111 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1114 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1117 /* These are compiled and loaded into high memory, then
1118 * copied into place in mach_early_setup */
1119 .section .init.ivt, "ax"
1121 /* this is very important - here is the reset vector */
1122 /* in current MMU branch you don't care what is here - it is
1123 * used from bootloader site - but this is correct for FS-BOOT */
1126 brai TOPHYS(_user_exception); /* syscall handler */
1127 brai TOPHYS(_interrupt); /* Interrupt handler */
1128 brai TOPHYS(_break); /* nmi trap handler */
1129 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1132 brai TOPHYS(_debug_exception); /* debug trap handler*/
1134 .section .rodata,"a"
1135 #include "syscall_table.S"
1137 syscall_table_size=(.-sys_call_table)