3 #include "globalconfig.h"
4 #include "config_tcbsize.h"
9 /**********************************************************************
10 * calculate the TCB address from a stack pointer
12 .macro CONTEXT_OF reg, ptr
13 bic \reg, \ptr, #((THREAD_BLOCK_SIZE-1) & 0xff)
14 bic \reg, \reg, #((THREAD_BLOCK_SIZE-1) & 0xff00)
17 /**********************************************************************
18 * Reset the thread cancel flag.
19 * Register r0 is scratched and contains the thread state afterwards
21 .macro RESET_THREAD_CANCEL_AT tcb
22 ldr r0, [\tcb, #(OFS__THREAD__STATE)]
24 str r0, [\tcb, #(OFS__THREAD__STATE)]
27 /****************************
28 * some handy definitions
36 #define RF(reg, offs) (RF_##reg + (offs))
38 /**************************************************************************
39 * Enter kernel mode (i.e. switch from any exception mode to the
40 * kernel mode and transfer the exception state).
43 .macro atomic_fixup insn do_store_pc
44 #ifndef CONFIG_ARM_V6PLUS
45 @ Adjust PC if it is in the special atomic insns area
46 @ Zero-flag set after this fixup code
47 cmp \insn, #0xffffe000
49 cmp \insn, #0xfffff000
52 biceq \insn, \insn, #0x0ff
54 str \insn, [sp, #RF(PC, RF_SIZE)]
57 @ ---------------------------------------------------
61 .macro __switch_to_kernel reg adjust atomic_fixup not_svc
65 #ifdef CONFIG_ARM_V6PLUS
67 // todo: do clrex with strex for CPUs without clrex
72 #if defined(CONFIG_ARM_V6PLUS)
74 msr cpsr_c, #0xd3 @cpsid f, #0x13
82 .endif @ syscall (already in svc mode)
86 str lr, [sp, #RF(PSR, -8)]
88 str lr, [sp, #RF(PC, -8)]
90 str lr, [sp, #RF(PC, -8)]
92 str lr, [sp, #RF(PSR, -8)]
96 .macro switch_to_kernel adjust atomic_fixup not_svc
97 __switch_to_kernel r14 \adjust \atomic_fixup \not_svc
100 /*************************************************************************
101 * return from an exception
103 .macro return_from_exception
104 ldr lr, [sp, #RF(PSR,0)] @ Unstack SPSR
105 tst lr, #0x0f @ Mask all but relevant mode bits
106 add sp, sp, #RF_SIZE @ SP to top of stack
107 #if defined(CONFIG_ARM_V6PLUS)
108 ldrne lr, [sp, #RF(SVC_LR, -RF_SIZE)] @ load old kernel lr
111 msr spsr_cfsx, lr @ Load SPSR from kernel_lr
112 ldr lr, [sp, #RF(PC, -RF_SIZE)] @ copy PC on psr field for
113 str lr, [sp, #RF(PSR, -RF_SIZE)] @ final ldmdb and proper ksp
114 ldrne lr, [sp, #RF(SVC_LR, -RF_SIZE)] @ load old kernel lr
115 ldmdb sp, {pc}^ @ go back to interrupted insn
121 /***********************************************************************
122 * Enter the kernel slowtrap handler
124 * Stack the state and call 'slowtrap_entry' with sp and error code
126 .macro enter_slowtrap_w_stack errorcode
130 adr lr, exception_return
131 ldr pc, .LCslowtrap_entry
134 .macro enter_slowtrap errorcode
135 stmdb sp!, {r0 - r12}
136 enter_slowtrap_w_stack \errorcode
142 * after SWITCH_TO_SVC !!!!
146 * | lr' | (pc after syscall)
154 * sp -> | sp^ | (user sp)
161 /*************************************************************************
163 * Generate stack for exception entries
164 * - Adjust return address
165 * - Store return address at [sp + 8]
166 * - Store spsr at [sp + 4]
168 * - Store user sp at [sp]
169 * - Store user lr at [sp + 4]
171 .macro exceptionframe
173 @ stmia sp, {sp,lr}^ @ now done lazy
177 /***************************************************************************
178 * Generate stack for system call entries
182 * after SWITCH_TO_SVC !!!!
186 * | lr^ | (pc after syscall)
194 * sp -> | sp^ | (user sp)
199 * lr: must contain fault addr (from switch_to_kernel)
202 add lr, sp, #RF(PC, -8)
205 @ stmia sp, {sp}^ @ now done lazy
208 .macro enter_sys_call no_sys_call
209 ldr lr, [sp, #RF(PC, -8)]
210 cmn lr, #0x2a @ Range Check !!! UNSIGNED !!!
211 bls \no_sys_call @ no syscall
215 stmdb sp!, {r0 - r12}
217 RESET_THREAD_CANCEL_AT r1 @ sets r0 to state
220 ldr r0, [sp, #RF(SVC_LR, 13*4)] @ read exception PC from stack (km_lr)
221 adr r1, sys_call_table
225 .global fast_ret_from_irq
227 2: ldmia sp, {r0 - r12}^
228 msr cpsr_c, #0xd3 // disable IRQs
232 ldr lr, [sp, #RF(PSR,0)]
234 @ ldmia sp, {sp,lr}^ @ done lazy
236 ldr lr, [sp, #RF(PC, -RF_SIZE)]
241 /**************************************************************************
242 * The Exception vector table.
245 .globl exception_vector
248 b undef_entry /* UNDEF */
249 b swi_entry /* SWI */
250 b inst_abort_entry /* IABORT */
251 b data_abort_entry /* DABORT */
253 b irq_entry /* IRQ */
254 b fiq_entry /* FIQ */
257 /* locations to pass lr and spsr from one mode to the other
258 these are globally shared !!! */
259 .section .excp.text,"xa"
263 /***************************************************************************
265 ** Exception entry points.
269 /***************************************************************************
272 * Exception is an undefined instruction.
276 switch_to_kernel 0 0 1
278 enter_slowtrap 0x00100000
280 /**************************************************************************
283 * Exception is a software interrupt (typically a syscall in normal
288 switch_to_kernel 0 0 0
289 enter_sys_call no_sys_call
292 enter_slowtrap 0x00200000
296 /***************************************************************************
297 * Exception inst_abort ()
299 * Exception is a prefetch (instruction) abort. This exception is also
300 * used for L4 syscalls. If the exception address is in the range 0x00
301 * to 0x24 (in the exception vector page), this is interpreted as a
302 * syscall number. Some platforms allow the exception vector to be
303 * relocated to the beginning of the last 64K of memory. For these
304 * platforms, we use a negative (i.e. end of address space) value to
305 * indicate the syscall number. If exception is not within the syscall
306 * range, generate a pager IPC (or panic if within the kernel).
311 switch_to_kernel 4 0 1
314 /**************************************************************************/
315 prefetch_abort: @ A real prefetch abort occured --- handled as a page fault
317 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
318 ldr lr, [sp, #RF(PSR, 5*4)] @ get spsr from stack
319 ands lr, lr, #0x0f @ Mask all but relevant mode bits
320 bne kernel_prefetch_abort @ Kernel abort?
321 /* user prefetch abort */
322 mrc p15, 0, r1, c5, c0, 1 @ Load IFSR into r1
323 bic r1, r1, #0x00ff0000
324 orr r1, r1, #0x00330000 @ Set read bit and prefetch abort
325 #if defined(CONFIG_ARM_V6PLUS)
326 mrc p15, 0, r0, c6, c0, 2 @ Read fault address, for T2: pfa != pc
328 ldr r0, [sp, #RF(PC, 5*4)] @ Get PC from RF and use as pfa
333 adr lr, pagefault_return
334 ldr pc, .LCpagefault_entry @ Jump to C code
336 kernel_prefetch_abort: @ Kernel generated IAbort
337 @ Should not get IAborts in kernel
339 adr r0, kernel_prefetch_abort_label
346 /****************************************************************************
347 * Exception data_abort ()
349 * Exception is a data abort. If exception happened in user mode,
350 * generate pager IPC. If exception happened in kernel mode, it is
351 * probably due to a non-mapped TCB (or else we panic).
380 .macro check_ldrd_insn jmp_to_if_ldrd
383 and r12, r3, #0x000000f0
392 switch_to_kernel 8 0 1
395 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
397 /* user data abort */
398 #ifdef CONFIG_ARM_V6PLUS
399 mrc p15, 0, r1, c5, c0, 0 @ Load DFSR into r1
400 bic r1, r1, #0x00ff0000
401 mrc p15, 0, r0, c6, c0, 0 @ Load DFAR into r0
403 ldr r2, [sp, #RF(PC, 5*4)] @ Load PC into r2
404 ldr lr, [sp, #RF(PSR, 5*4)] @ load spsr, from stack
406 ands lr, lr, #0x0f @ Mask all but relevant mode bits
409 orreq r1, r1, #0x00010000
410 orr r1, r1, #0x00400000 @ Set error code to data abort
413 adr lr, pagefault_return @ set return address
415 ldr pc, .LCpagefault_entry @ page fault
417 mrc p15, 0, r1, c5, c0, 0 @ Load FSR into r1
418 bic r1, r1, #(1 << 11) @ clear bit 11 (write indicator)
419 bic r1, r1, #0x00ff0000
420 mrc p15, 0, r0, c6, c0, 0 @ Load FAR into r0
421 ldr r2, [sp, #RF(PC, 5*4)] @ Load PC into r2
422 ldr lr, [sp, #RF(PSR, 5*4)] @ load spsr, from stack
423 tst lr, #0x20 @ comes from thumb mode?
426 ldr r3, [r2] @ Load faulting insn
427 check_ldrd_insn .LCwas_ldrd
429 orreq r1, r1, #(1 << 11) @ Set FSR write bit
433 ands lr, lr, #0x0f @ Mask all but relevant mode bits
436 orreq r1, r1, #0x00010000
437 orr r1, r1, #0x00400000 @ Set error code to data abort
439 adr lr, pagefault_return @ set return address
441 ldr pc, .LCpagefault_entry @ page fault
448 beq .LCret_handle_thumb
450 orreq r1, r1, #(1 << 11) @ Set FSR write bit
451 b .LCret_handle_thumb
454 .LCpagefault_entry: .word pagefault_entry
455 .LCslowtrap_entry: .word slowtrap_entry
458 /***************************************************************************
459 * Generic return code for restoring the thread state after exceptions.
479 * old sp -> | r0 | +0
488 beq slowtrap_from_pagefault
490 msrne cpsr_c, #0xd3 // disable IRQs
491 ldmneia sp!, {r0 - r3, r12} @ Restore user state
492 return_from_exception
494 slowtrap_from_pagefault:
495 msr cpsr_c, #0xd3 // disable IRQs
497 stmdb sp!, {r0 - r11}
500 adr lr, exception_return
501 ldr pc, .LCslowtrap_entry @ slow trap
503 .global __return_from_exception
504 __return_from_exception:
506 msr cpsr_c, #0xd3 // disable IRQs
508 ldmia sp!, {r0 - r12}
509 return_from_exception
514 return_from_exception
517 /***************************************************************************
520 * Exception is an interrupt. Generate interrupt IPC.
524 switch_to_kernel 4 1 1
527 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
528 @ add r0, sp, #(5*4) @ debug
531 ldmia sp, {r0 - r3, r12} @ Restore user state
532 msr cpsr_c, #0xd3 // disable IRQs
534 return_from_exception
540 /******************************************************************************
543 * Exception is a fast interrupt.
547 switch_to_kernel 4 1 1
550 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
551 @ add r0, sp, #(5*4) @ debug
554 ldmia sp, {r0 - r3, r12} @ Restore user state
555 msr cpsr_c, #0xd3 // disable IRQs
557 return_from_exception
563 /**************************************************************************/
564 /* The alien stuff is below */
565 /**************************************************************************/
566 alien_syscall: @ Do it for an alien ---------------------------------------
568 bicne r0, r0, #0x20000
570 @ Trap alien before system call -----------------------------------
571 @ The trap is an insn abort on the syscall address in the kernel.
572 ldr lr, [sp, #RF(PC, 13*4)]
573 str lr, [sp, #RF(USR_LR, 13*4)]
574 ldr lr, [sp, #RF(SVC_LR, 13*4)] @ read orig exception PC
575 sub lr, lr, #4 @ adjust pc to be on insn
576 str lr, [sp, #RF(PC, 13*4)] @ store to entry_stack_PC
577 enter_slowtrap_w_stack 0x00300000
578 @ Never reach this -- end up in user land after exception reply
580 1: @ Resume the alien system call ------------------------------------
581 str r0, [r1, #(OFS__THREAD__STATE)]
582 ldr r0, [sp, #RF(SVC_LR, 13*4)] @ read orig excpetion PC
583 adr r1, sys_call_table
587 2: nop @ The return point after the resumed alien system call --------
588 msr cpsr_c, #0xd3 // disable IRQs
589 @ Trap after the resumed alien system call ------------------------
590 @ The trap occurs at the insn where the system call returns to.
591 @ Set the bit 0x00010000 to indicate a trap after the resumed
593 enter_slowtrap_w_stack 0x00310000
596 /*****************************************************************************/
597 /* The syscall table stuff */
598 /*****************************************************************************/
599 #define SYSCALL(name) .word sys_##name##_wrapper
601 .globl sys_call_table
606 .word sys_ipc_wrapper
608 SYSCALL(invoke_debug)
617 .global leave_by_trigger_exception
619 leave_by_trigger_exception:
620 sub sp, sp, #RF_SIZE @ restore old return frame
621 stmdb sp!, {r0 - r12}
623 /* restore original IP */
625 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
626 str r0, [sp, #RF(PC, 13*4)]
628 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_PSR)]
629 str r0, [sp, #RF(PSR, 13*4)]
632 str r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
634 enter_slowtrap_w_stack 0x00500000
637 .global leave_by_vcpu_upcall;
639 leave_by_vcpu_upcall:
640 sub sp, sp, #RF_SIZE @ restore old return frame
643 /* restore original IP */
646 /* access_vcpu() for the local case */
647 ldr r2, [r1, #(OFS__THREAD__USER_VCPU)]
648 add r2, r2, #(VAL__SIZEOF_TRAP_STATE - RF_SIZE)
650 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
651 str r0, [r2, #RF(PC, 0)]
653 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_PSR)]
654 str r0, [r2, #RF(PSR, 0)]
655 bic r0, #0x20 // force ARM mode
656 str r0, [sp, #RF(PSR, 3*4)]
659 str r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
661 ldr r0, [sp, #RF(USR_LR, 3*4)]
662 str r0, [r2, #RF(USR_LR, 0)]
664 ldr r0, [sp, #RF(USR_SP, 3*4)]
665 str r0, [r2, #RF(USR_SP, 0)]
680 add r0, r2, #(-8 + OFS__VCPU_STATE__ENTRY_SP)
683 ldr r0, [r2, #(-8 + OFS__VCPU_STATE__ENTRY_IP)]
685 str r0, [sp, #RF(PC, 0)]
691 kernel_prefetch_abort_label: .string "Kernel prefetch abort"
692 missed_excpt_ret_label: .string "ERROR in exception return"
693 fiq_label: .string "FIQ entry"
695 /**********************************************************************
697 **********************************************************************/
699 .macro DEBUGGER_ENTRY errorcode
701 str sp, [sp, #(RF(USR_SP, -RF_SIZE))] @ save r[13]
702 sub sp, sp, #(RF_SIZE)
704 str lr, [sp, #RF(SVC_LR, 0)]
705 str lr, [sp, #RF(PC, 0)]
707 str lr, [sp, #RF(PSR, 0)]
709 stmdb sp!, {r0 - r12}
711 mov r1, #\errorcode @ err
719 add sp, sp, #8 @ pfa and err
720 ldmia sp!, {r0 - r12}
721 ldr lr, [sp, #RF(PSR, 0)]
723 ldr lr, [sp, #RF(SVC_LR, 0)]
725 ldr sp, [sp, #(RF(USR_SP, 0))]
729 3: .word call_nested_trap_handler
735 .global kern_kdebug_entry
738 DEBUGGER_ENTRY 0x00e00000
743 .global kern_kdebug_ipi_entry
745 kern_kdebug_ipi_entry:
746 DEBUGGER_ENTRY 0x00f00000
758 mcr p15, 0, lr, c7, c5, 4 @ cp15isb
762 /**********************************************************************
763 * Secure and Nonsecure switching stuff
765 *********************************************************************/
766 .macro SAVE_NONSECURE_STATE off
768 // save exit reason temporarily on stack
771 // switch to secure world
773 mcr p15, 0, lr, c1, c1, 0
778 stmia lr!, {r0 - r12}
794 stmia r0!, {r8 - r12, sp, lr}
818 // copy return pc/cpsr from stack
823 // save pending virtual interrupt state
824 mrc p15, 0, r1, c12, c1, 1
827 // switch to non-secure world
829 mcr p15, 0, r1, c1, c1, 0
832 mrc p15, 0, r1, c2, c0, 0 @ read CP15_TTB0
835 mrc p15, 0, r1, c2, c0, 1 @ read CP15_TTB1
838 mrc p15, 0, r1, c2, c0, 2 @ read CP15_TTBC
841 mrc p15, 0, r1, c12, c0, 0 @ read CP15_VECTOR_BASE
844 mrc p15, 0, r1, c5, c0, 0 @ read CP15_DFSR
847 mrc p15, 0, r1, c6, c0, 0 @ read CP15_DFAR
850 mrc p15, 0, r1, c5, c0, 1 @ read CP15_IFSR
853 mrc p15, 0, r1, c6, c0, 2 @ read CP15_IFAR
856 mrc p15, 0, r1, c1, c0, 0 @ read CP15_CONTROL
859 mrc p15, 0, r1, c10, c2, 0 @ read CP15_PRIM_REGION_REMAP
862 mrc p15, 0, r1, c10, c2, 1 @ read CP15_NORM_REGION_REMAP
865 mrc p15, 0, r1, c13, c0, 1 @ read CP15_CID
868 // switch to secure world
870 mcr p15, 0, r1, c1, c1, 0
873 mrc p15, 0, r1, c5, c0, 0 @ read CP15_DFSR
876 mrc p15, 0, r1, c6, c0, 0 @ read CP15_DFAR
879 // copy the exit reason from stack
884 .macro RESTORE_NONSECURE_STATE off
888 // jump over general purpose register
903 ldmia r0!, {r8 - r12, sp, lr}
927 // copy return pc/cpsr on stack
931 // set pending events
934 mcr p15, 0, r1, c12, c1, 1
937 // switch to non-secure world
939 mcr p15, 0, r1, c1, c1, 0
943 mcr p15, 0, r1, c2, c0, 0 @ write CP15_TTB0
946 mcr p15, 0, r1, c2, c0, 1 @ write CP15_TTB1
949 mcr p15, 0, r1, c2, c0, 2 @ write CP15_TTBC
952 mcr p15, 0, r1, c12, c0, 0 @ write CP15_VECTOR_BASE
955 mcr p15, 0, r1, c5, c0, 0 @ write CP15_DFSR
958 mcr p15, 0, r1, c6, c0, 0 @ write CP15_DFAR
961 mcr p15, 0, r1, c5, c0, 1 @ write CP15_IFSR
964 mcr p15, 0, r1, c6, c0, 2 @ write CP15_IFAR
967 mcr p15, 0, r1, c1, c0, 0 @ write CP15_CONTROL
970 mcr p15, 0, r1, c10, c2, 0 @ write CP15_PRIM_REGION_REMAP
973 mcr p15, 0, r1, c10, c2, 1 @ write CP15_NORM_REGION_REMAP
976 mcr p15, 0, r1, c13, c0, 1 @ write CP15_CID
978 // switch to secure world
980 mcr p15, 0, r1, c1, c1, 0
988 ldmia lr!, {r0 - r12}
991 /**********************************************************************
992 * Save secure state on top of the stack.
994 * We save also the user-level registers here, because we need to
995 * restore some on FIQ.
998 .macro SAVE_SECURE_STATE
1000 stmdb sp!, {r3, r4} @ save supervisor return values
1001 stmdb sp, {sp, lr}^ @ save user-level return values
1005 /**********************************************************************
1006 * Restore secure state when guest returns with monitor call.
1008 * This removes the secure state from the top of the stack.
1010 .macro RESTORE_SECURE_STATE
1012 mov r0, sp @ restore stack pointer from supervisor mode
1016 ldmia sp, {sp, lr}^ @ restore user-level return values
1018 ldmia sp!, {r3, r4} @ restore supervisor return values
1021 /**********************************************************************
1022 * Restore secure state when guest is interrupted by FIQ
1024 * Don't remove secure state from stack as we need it
1025 * when application guest exits.
1026 * Just restore user-level state as this is spilled by the irq handler
1028 .macro RESTORE_SECURE_STATE_FIQ
1030 mov r0, sp @ restore stack pointer from supervisor mode
1034 ldmia sp, {sp, lr}^ @ restore user-level return values
1037 .macro SWITCH_TO_NONSECURE_MODE
1039 mcr p15, 0, lr, c1, c1, 0
1043 .macro SWITCH_TO_SECURE_MODE
1045 mcr p15, 0, lr, c1, c1, 0
1050 /*****************************************************************************/
1051 /* The monitor entry table stuff */
1052 /*****************************************************************************/
1054 .globl monitor_vector_base
1055 monitor_vector_base:
1057 b mon_undef_entry /* UNDEF */
1058 b mon_swi_entry /* SWI */
1059 b mon_inst_abort_entry /* IABORT */
1060 b mon_data_abort_entry /* DABORT */
1062 b mon_irq_entry /* IRQ */
1063 b mon_fiq_entry /* FIQ */
1070 srsdb sp, #0x16 @ save return state temporarily on stack
1071 mov lr, #1 @ set exit reason
1074 mon_inst_abort_entry:
1077 mov lr, #2 @ set exit reason
1080 mon_data_abort_entry:
1083 mov lr, #3 @ set exit reason
1089 mov lr, #4 @ set exit reason
1093 sub lr, lr, #4 @ adjust saved ip
1095 mov lr, #4 @ set exit reason
1098 // cps #0x12 @ switch to irq mode
1099 // adr lr, go_nonsecure_after_fiq + 4 @ set lr_irq
1100 // msr spsr, #0xd3 @ set spsr_irq
1103 /**********************************************************************
1104 * Go to secure world
1108 SAVE_NONSECURE_STATE 16
1109 RESTORE_SECURE_STATE
1115 /**********************************************************************
1116 * Go to nonsecure world
1118 * When the guest was interrupted by an FIQ, we don't need to save
1119 * secure state again, because it is still on top of the stack.
1122 //go_nonsecure_after_fiq:
1123 // mov r2, sp @ copy sp_svc to sv_mon
1127 // b go_nonsecure_after_fiq_2
1132 RESTORE_NONSECURE_STATE 16
1133 SWITCH_TO_NONSECURE_MODE
1135 // mcr p15, 0, lr, c7, c10, 4 @ drain write buffer
1136 // mcr p15, 0, lr, c8, c7, 0 @ flush TLB entry
1139 msr spsr, lr @ set spsr_mon with unsecure spsr
1140 ldr lr, [sp, #-8] @ set lr_mon with unsecure ip
1144 /* -------------------------------------- TEXT ---------------------------*/
1149 add sp, r1, #RF_SIZE
1151 ldr r1, [lr, #RF(PSR, 13*4)] @ Unstack SPSR
1152 msr spsr, r1 @ Load SPSR from kernel_lr
1153 ldmia lr!, {r0 - r12}
1154 ldmia lr, {sp,lr}^ @ restore user sp and lr (now lazy)
1155 #if defined(CONFIG_ARM_V6PLUS)
1156 add lr, lr, #RF_SIZE @ Read return address
1159 add lr, lr, #(RF_SIZE - 4) @ Read return address
1160 ldmdb lr, {pc}^ @ go back to interrupted insn