3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #include <linux/errno.h>
22 #include <asm/unistd.h>
23 #include <asm/processor.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/cputable.h>
31 #ifdef CONFIG_PPC_ISERIES
32 #define DO_SOFT_DISABLE
40 .tc .sys_call_table[TC],.sys_call_table
42 /* This value is used to mark exception frames on the stack. */
44 .tc ID_72656773_68657265[TC],0x7265677368657265
51 .globl system_call_common
55 addi r1,r1,-INT_FRAME_SIZE
64 ACCOUNT_CPU_USER_ENTRY(r10, r11)
90 addi r9,r1,STACK_FRAME_OVERHEAD
91 ld r11,exception_marker@toc(r2)
92 std r11,-16(r9) /* "regshere" marker */
93 #ifdef CONFIG_PPC_ISERIES
94 /* Hack for handling interrupts when soft-enabling on iSeries */
95 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
96 andi. r10,r12,MSR_PR /* from kernel */
97 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
98 beq hardware_interrupt_entry
99 lbz r10,PACAPROCENABLED(r13)
111 addi r9,r1,STACK_FRAME_OVERHEAD
113 clrrdi r11,r1,THREAD_SHIFT
115 andi. r11,r10,_TIF_SYSCALL_T_OR_A
117 syscall_dotrace_cont:
118 cmpldi 0,r0,NR_syscalls
121 system_call: /* label this so stack traces look sane */
123 * Need to vector to 32 Bit or default sys_call_table here,
124 * based on caller's run-mode / personality.
126 ld r11,.SYS_CALL_TABLE@toc(2)
127 andi. r10,r10,_TIF_32BIT
129 addi r11,r11,8 /* use 32-bit syscall entries */
138 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
140 bctrl /* Call handler */
145 bl .do_show_syscall_exit
148 clrrdi r12,r1,THREAD_SHIFT
150 /* disable interrupts so current_thread_info()->flags can't change,
151 and so that we don't get interrupted after loading SRR0/1. */
161 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
162 bne- syscall_exit_work
168 stdcx. r0,0,r1 /* to clear the reservation */
172 ACCOUNT_CPU_USER_EXIT(r11, r12)
173 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
177 mtmsrd r11,1 /* clear MSR.RI */
184 b . /* prevent speculative execution */
187 oris r5,r5,0x1000 /* Set SO bit in CR */
192 /* Traced system call support */
195 addi r3,r1,STACK_FRAME_OVERHEAD
196 bl .do_syscall_trace_enter
197 ld r0,GPR0(r1) /* Restore original registers */
204 addi r9,r1,STACK_FRAME_OVERHEAD
205 clrrdi r10,r1,THREAD_SHIFT
207 b syscall_dotrace_cont
214 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
215 If TIF_NOERROR is set, just save r3 as it is. */
217 andi. r0,r9,_TIF_RESTOREALL
221 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */
223 andi. r0,r9,_TIF_NOERROR
227 oris r5,r5,0x1000 /* Set SO bit in CR */
230 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
233 /* Clear per-syscall TIF flags if any are set. */
235 li r11,_TIF_PERSYSCALL_MASK
236 addi r12,r12,TI_FLAGS
241 subi r12,r12,TI_FLAGS
243 4: /* Anything else left to do? */
244 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
245 beq .ret_from_except_lite
247 /* Re-enable interrupts */
253 addi r3,r1,STACK_FRAME_OVERHEAD
254 bl .do_syscall_trace_leave
257 /* Save non-volatile GPRs, if not already saved. */
269 * The sigsuspend and rt_sigsuspend system calls can call do_signal
270 * and thus put the process into the stopped state where we might
271 * want to examine its user state with ptrace. Therefore we need
272 * to save all the nonvolatile registers (r14 - r31) before calling
273 * the C code. Similarly, fork, vfork and clone need the full
274 * register state on the stack so that it can be copied to the child.
292 _GLOBAL(ppc32_swapcontext)
294 bl .compat_sys_swapcontext
297 _GLOBAL(ppc64_swapcontext)
302 _GLOBAL(ret_from_fork)
309 * This routine switches between two different tasks. The process
310 * state of one is saved on its kernel stack. Then the state
311 * of the other is restored from its kernel stack. The memory
312 * management hardware is updated to the second process's state.
313 * Finally, we can return to the second process, via ret_from_except.
314 * On entry, r3 points to the THREAD for the current task, r4
315 * points to the THREAD for the new task.
317 * Note: there are two ways to get to the "going out" portion
318 * of this code; either by coming in via the entry (_switch)
319 * or via "fork" which must set up an environment equivalent
320 * to the "_switch" path. If you change this you'll have to change
321 * the fork code also.
323 * The code which creates the new task context is in 'copy_thread'
324 * in arch/powerpc/kernel/process.c
326 #define SHADOW_SLB_BOLTED_STACK_ESID \
327 (SLBSHADOW_SAVEAREA + 0x10*(SLB_NUM_BOLTED-1))
328 #define SHADOW_SLB_BOLTED_STACK_VSID \
329 (SLBSHADOW_SAVEAREA + 0x10*(SLB_NUM_BOLTED-1) + 8)
335 stdu r1,-SWITCH_FRAME_SIZE(r1)
336 /* r3-r13 are caller saved -- Cort */
339 mflr r20 /* Return to switch caller */
342 #ifdef CONFIG_ALTIVEC
344 oris r0,r0,MSR_VEC@h /* Disable altivec */
345 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
346 std r24,THREAD_VRSAVE(r3)
347 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
348 #endif /* CONFIG_ALTIVEC */
357 std r1,KSP(r3) /* Set old stack pointer */
360 /* We need a sync somewhere here to make sure that if the
361 * previous task gets rescheduled on another CPU, it sees all
362 * stores it has performed on this one.
365 #endif /* CONFIG_SMP */
367 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
368 std r6,PACACURRENT(r13) /* Set new 'current' */
370 ld r8,KSP(r4) /* new stack pointer */
372 clrrdi r6,r8,28 /* get its ESID */
373 clrrdi r9,r1,28 /* get current sp ESID */
374 clrldi. r0,r6,2 /* is new ESID c00000000? */
375 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
377 beq 2f /* if yes, don't slbie it */
379 /* Bolt in the new stack SLB entry */
380 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
381 oris r0,r6,(SLB_ESID_V)@h
382 ori r0,r0,(SLB_NUM_BOLTED-1)@l
384 /* Update the last bolted SLB */
385 ld r9,PACA_SLBSHADOWPTR(r13)
387 std r12,SHADOW_SLB_BOLTED_STACK_ESID(r9) /* Clear ESID */
388 std r7,SHADOW_SLB_BOLTED_STACK_VSID(r9) /* Save VSID */
389 std r0,SHADOW_SLB_BOLTED_STACK_ESID(r9) /* Save ESID */
392 slbie r6 /* Workaround POWER5 < DD2.1 issue */
397 END_FTR_SECTION_IFSET(CPU_FTR_SLB)
398 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
399 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
400 because we don't need to leave the 288-byte ABI gap at the
401 top of the kernel stack. */
402 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
404 mr r1,r8 /* start using new stack pointer */
405 std r7,PACAKSAVE(r13)
410 #ifdef CONFIG_ALTIVEC
412 ld r0,THREAD_VRSAVE(r4)
413 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
414 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
415 #endif /* CONFIG_ALTIVEC */
417 /* r3-r13 are destroyed -- Cort */
421 /* convert old thread to its task_struct for return value */
423 ld r7,_NIP(r1) /* Return to _switch caller in new task */
425 addi r1,r1,SWITCH_FRAME_SIZE
429 _GLOBAL(ret_from_except)
432 bne .ret_from_except_lite
435 _GLOBAL(ret_from_except_lite)
437 * Disable interrupts so that current_thread_info()->flags
438 * can't change between when we test it and when we return
439 * from the interrupt.
441 mfmsr r10 /* Get current interrupt state */
442 rldicl r9,r10,48,1 /* clear MSR_EE */
444 mtmsrd r9,1 /* Update machine state */
446 #ifdef CONFIG_PREEMPT
447 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
448 li r0,_TIF_NEED_RESCHED /* bits to check */
451 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
452 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
453 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
456 #else /* !CONFIG_PREEMPT */
457 ld r3,_MSR(r1) /* Returning to user mode? */
459 beq restore /* if not, just restore regs and return */
461 /* Check current_thread_info()->flags */
462 clrrdi r9,r1,THREAD_SHIFT
464 andi. r0,r4,_TIF_USER_WORK_MASK
469 #ifdef CONFIG_PPC_ISERIES
473 /* Check for pending interrupts (iSeries) */
474 ld r3,PACALPPACAPTR(r13)
475 ld r3,LPPACAANYINT(r3)
477 beq+ 4f /* skip do_IRQ if no interrupts */
480 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
482 mtmsrd r10 /* hard-enable again */
483 addi r3,r1,STACK_FRAME_OVERHEAD
485 b .ret_from_except_lite /* loop back and handle more */
487 4: stb r5,PACAPROCENABLED(r13)
497 * r13 is our per cpu area, only restore it if we are returning to
501 ACCOUNT_CPU_USER_EXIT(r3, r4)
513 stdcx. r0,0,r1 /* to clear the reservation */
535 b . /* prevent speculative execution */
537 /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
539 #ifdef CONFIG_PREEMPT
540 andi. r0,r3,MSR_PR /* Returning to user mode? */
542 /* Check that preempt_count() == 0 and interrupts are enabled */
543 lwz r8,TI_PREEMPT(r9)
545 #ifdef CONFIG_PPC_ISERIES
551 crandc eq,cr1*4+eq,eq
553 /* here we are preempting the current task */
555 #ifdef CONFIG_PPC_ISERIES
557 stb r0,PACAPROCENABLED(r13)
560 mtmsrd r10,1 /* reenable interrupts */
563 clrrdi r9,r1,THREAD_SHIFT
564 rldicl r10,r10,48,1 /* disable interrupts again */
568 andi. r0,r4,_TIF_NEED_RESCHED
574 /* Enable interrupts */
578 andi. r0,r4,_TIF_NEED_RESCHED
581 b .ret_from_except_lite
585 addi r4,r1,STACK_FRAME_OVERHEAD
590 addi r3,r1,STACK_FRAME_OVERHEAD
591 bl .unrecoverable_exception
594 #ifdef CONFIG_PPC_RTAS
596 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
597 * called with the MMU off.
599 * In addition, we need to be in 32b mode, at least for now.
601 * Note: r3 is an input parameter to rtas, so don't trash it...
606 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
608 /* Because RTAS is running in 32b mode, it clobbers the high order half
609 * of all registers that it saves. We therefore save those registers
610 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
612 SAVE_GPR(2, r1) /* Save the TOC */
613 SAVE_GPR(13, r1) /* Save paca */
614 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
615 SAVE_10GPRS(22, r1) /* ditto */
632 /* Temporary workaround to clear CR until RTAS can be modified to
638 /* There is no way it is acceptable to get here with interrupts enabled,
639 * check it with the asm equivalent of WARN_ON
644 .section __bug_table,"a"
645 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
649 2: .asciz "enter_rtas"
652 /* Unfortunately, the stack pointer and the MSR are also clobbered,
653 * so they are saved in the PACA which allows us to restore
654 * our original state after RTAS returns.
657 std r6,PACASAVEDMSR(r13)
659 /* Setup our real return addr */
660 LOAD_REG_ADDR(r4,.rtas_return_loc)
661 clrldi r4,r4,2 /* convert to realmode address */
665 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
669 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
670 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
673 sync /* disable interrupts so SRR0/1 */
674 mtmsrd r0 /* don't get trashed */
676 LOAD_REG_ADDR(r4, rtas)
677 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
678 ld r4,RTASBASE(r4) /* get the rtas->base value */
683 b . /* prevent speculative execution */
685 _STATIC(rtas_return_loc)
686 /* relocation is off at this point */
687 mfspr r4,SPRN_SPRG3 /* Get PACA */
688 clrldi r4,r4,2 /* convert to realmode address */
696 ld r1,PACAR1(r4) /* Restore our SP */
697 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
698 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
703 b . /* prevent speculative execution */
705 _STATIC(rtas_restore_regs)
706 /* relocation is on at this point */
707 REST_GPR(2, r1) /* Restore the TOC */
708 REST_GPR(13, r1) /* Restore paca */
709 REST_8GPRS(14, r1) /* Restore the non-volatiles */
710 REST_10GPRS(22, r1) /* ditto */
729 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
730 ld r0,16(r1) /* get return address */
733 blr /* return to caller */
735 #endif /* CONFIG_PPC_RTAS */
737 #ifdef CONFIG_PPC_MULTIPLATFORM
742 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
744 /* Because PROM is running in 32b mode, it clobbers the high order half
745 * of all registers that it saves. We therefore save those registers
746 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
769 /* Get the PROM entrypoint */
773 /* Switch MSR to 32 bits mode
777 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
780 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
785 /* Restore arguments & enter PROM here... */
789 /* Just make sure that r1 top 32 bits didn't get
794 /* Restore the MSR (back to 64 bits) */
799 /* Restore other registers */
819 addi r1,r1,PROM_FRAME_SIZE
824 #endif /* CONFIG_PPC_MULTIPLATFORM */