3 #include "globalconfig.h"
4 #include "config_tcbsize.h"
9 /**********************************************************************
10 * calculate the TCB address from a stack pointer
12 .macro CONTEXT_OF reg, ptr
13 bic \reg, \ptr, #((THREAD_BLOCK_SIZE-1) & 0xff)
14 bic \reg, \reg, #((THREAD_BLOCK_SIZE-1) & 0xff00)
17 /**********************************************************************
18 * Reset the thread cancel flag.
19 * Register r0 is scratched and contains the thread state afterwards
21 .macro RESET_THREAD_CANCEL_AT tcb
22 ldr r0, [\tcb, #(OFS__THREAD__STATE)]
24 str r0, [\tcb, #(OFS__THREAD__STATE)]
27 /****************************
28 * some handy definitions
36 #define RF(reg, offs) (RF_##reg + (offs))
38 /**************************************************************************
39 * Enter kernel mode (i.e. switch from any exception mode to the
40 * kernel mode and transfer the exception state).
43 .macro atomic_fixup insn do_store_pc
44 #ifndef CONFIG_ARM_V6PLUS
45 @ Adjust PC if it is in the special atomic insns area
46 @ Zero-flag set after this fixup code
47 cmp \insn, #0xffffe000
49 cmp \insn, #0xfffff000
52 biceq \insn, \insn, #0x0ff
54 str \insn, [sp, #RF(PC, RF_SIZE)]
57 @ ---------------------------------------------------
61 .macro __switch_to_kernel reg adjust atomic_fixup not_svc
65 #ifdef CONFIG_ARM_V6PLUS
67 // todo: do clrex with strex for CPUs without clrex
72 #if defined(CONFIG_ARM_V6PLUS)
74 msr cpsr_c, #0xd3 @cpsid f, #0x13
82 .endif @ syscall (already in svc mode)
86 str lr, [sp, #RF(PSR, -8)]
88 str lr, [sp, #RF(PC, -8)]
90 str lr, [sp, #RF(PC, -8)]
92 str lr, [sp, #RF(PSR, -8)]
96 .macro switch_to_kernel adjust atomic_fixup not_svc
97 __switch_to_kernel r14 \adjust \atomic_fixup \not_svc
100 /*************************************************************************
101 * return from an exception
103 .macro return_from_exception
104 ldr lr, [sp, #RF(PSR,0)] @ Unstack SPSR
105 tst lr, #0x0f @ Mask all but relevant mode bits
106 add sp, sp, #RF_SIZE @ SP to top of stack
107 #if defined(CONFIG_ARM_V6PLUS)
108 ldrne lr, [sp, #RF(SVC_LR, -RF_SIZE)] @ load old kernel lr
111 msr spsr_cfsx, lr @ Load SPSR from kernel_lr
112 ldr lr, [sp, #RF(PC, -RF_SIZE)] @ copy PC on psr field for
113 str lr, [sp, #RF(PSR, -RF_SIZE)] @ final ldmdb and proper ksp
114 ldrne lr, [sp, #RF(SVC_LR, -RF_SIZE)] @ load old kernel lr
115 ldmdb sp, {pc}^ @ go back to interrupted insn
121 /***********************************************************************
122 * Enter the kernel slowtrap handler
124 * Stack the state and call 'slowtrap_entry' with sp and error code
126 .macro enter_slowtrap_w_stack errorcode
130 adr lr, exception_return
131 ldr pc, .LCslowtrap_entry
134 .macro enter_slowtrap errorcode
135 stmdb sp!, {r0 - r12}
136 enter_slowtrap_w_stack \errorcode
142 * after SWITCH_TO_SVC !!!!
146 * | lr' | (pc after syscall)
154 * sp -> | sp^ | (user sp)
161 /*************************************************************************
163 * Generate stack for exception entries
164 * - Adjust return address
165 * - Store return address at [sp + 8]
166 * - Store spsr at [sp + 4]
168 * - Store user sp at [sp]
169 * - Store user lr at [sp + 4]
171 .macro exceptionframe
173 @ stmia sp, {sp,lr}^ @ now done lazy
177 /***************************************************************************
178 * Generate stack for system call entries
182 * after SWITCH_TO_SVC !!!!
186 * | lr^ | (pc after syscall)
194 * sp -> | sp^ | (user sp)
199 * lr: must contain fault addr (from switch_to_kernel)
202 add lr, sp, #RF(PC, -8)
205 @ stmia sp, {sp}^ @ now done lazy
208 .macro enter_sys_call no_sys_call
209 ldr lr, [sp, #RF(PC, -8)]
210 cmn lr, #0x2a @ Range Check !!! UNSIGNED !!!
211 bls \no_sys_call @ no syscall
215 stmdb sp!, {r0 - r12}
217 RESET_THREAD_CANCEL_AT r1 @ sets r0 to state
220 ldr r0, [sp, #RF(SVC_LR, 13*4)] @ read exception PC from stack (km_lr)
221 adr r1, sys_call_table
225 .global fast_ret_from_irq
227 2: ldmia sp, {r0 - r12}^
228 msr cpsr_c, #0xd3 // disable IRQs
232 ldr lr, [sp, #RF(PSR,0)]
234 @ ldmia sp, {sp,lr}^ @ done lazy
236 ldr lr, [sp, #RF(PC, -RF_SIZE)]
241 /**************************************************************************
242 * The Exception vector table.
245 .globl exception_vector
248 b undef_entry /* UNDEF */
249 b swi_entry /* SWI */
250 b inst_abort_entry /* IABORT */
251 b data_abort_entry /* DABORT */
253 b irq_entry /* IRQ */
254 b fiq_entry /* FIQ */
257 /* locations to pass lr and spsr from one mode to the other
258 these are globally shared !!! */
259 .section .excp.text,"xa"
263 /***************************************************************************
265 ** Exception entry points.
269 /***************************************************************************
272 * Exception is an undefined instruction.
276 switch_to_kernel 0 0 1
278 enter_slowtrap 0x00100000
280 /**************************************************************************
283 * Exception is a software interrupt (typically a syscall in normal
288 switch_to_kernel 0 0 0
289 enter_sys_call no_sys_call
292 enter_slowtrap 0x00200000
296 /***************************************************************************
297 * Exception inst_abort ()
299 * Exception is a prefetch (instruction) abort. This exception is also
300 * used for L4 syscalls. If the exception address is in the range 0x00
301 * to 0x24 (in the exception vector page), this is interpreted as a
302 * syscall number. Some platforms allow the exception vector to be
303 * relocated to the beginning of the last 64K of memory. For these
304 * platforms, we use a negative (i.e. end of address space) value to
305 * indicate the syscall number. If exception is not within the syscall
306 * range, generate a pager IPC (or panic if within the kernel).
311 switch_to_kernel 4 0 1
314 /**************************************************************************/
315 prefetch_abort: @ A real prefetch abort occured --- handled as a page fault
317 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
318 ldr lr, [sp, #RF(PSR, 5*4)] @ get spsr from stack
319 ands lr, lr, #0x0f @ Mask all but relevant mode bits
320 bne kernel_prefetch_abort @ Kernel abort?
321 /* user prefetch abort */
322 mrc p15, 0, r1, c5, c0, 1 @ Load IFSR into r1
323 bic r1, r1, #0x00ff0000
324 orr r1, r1, #0x00330000 @ Set read bit and prefetch abort
325 ldr r0, [sp, #RF(PC, 5*4)] @ get PC from RF and use as pfa
329 adr lr, pagefault_return
330 ldr pc, .LCpagefault_entry @ Jump to C code
332 kernel_prefetch_abort: @ Kernel generated IAbort
333 @ Should not get IAborts in kernel
335 adr r0, kernel_prefetch_abort_label
342 /****************************************************************************
343 * Exception data_abort ()
345 * Exception is a data abort. If exception happened in user mode,
346 * generate pager IPC. If exception happened in kernel mode, it is
347 * probably due to a non-mapped TCB (or else we panic).
376 .macro check_ldrd_insn jmp_to_if_ldrd
379 and r12, r3, #0x000000f0
388 switch_to_kernel 8 0 1
391 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
393 /* user data abort */
394 #ifdef CONFIG_ARM_V6PLUS
395 mrc p15, 0, r1, c5, c0, 0 @ Load DFSR into r1
396 bic r1, r1, #0x00ff0000
397 mrc p15, 0, r0, c6, c0, 0 @ Load DFAR into r0
399 ldr r2, [sp, #RF(PC, 5*4)] @ Load PC into r2
400 ldr lr, [sp, #RF(PSR, 5*4)] @ load spsr, from stack
402 ands lr, lr, #0x0f @ Mask all but relevant mode bits
405 orreq r1, r1, #0x00010000
406 orr r1, r1, #0x00400000 @ Set error code to data abort
409 adr lr, pagefault_return @ set return address
411 ldr pc, .LCpagefault_entry @ page fault
413 mrc p15, 0, r1, c5, c0, 0 @ Load FSR into r1
414 bic r1, r1, #(1 << 11) @ clear bit 11 (write indicator)
415 bic r1, r1, #0x00ff0000
416 mrc p15, 0, r0, c6, c0, 0 @ Load FAR into r0
417 ldr r2, [sp, #RF(PC, 5*4)] @ Load PC into r2
418 ldr lr, [sp, #RF(PSR, 5*4)] @ load spsr, from stack
419 tst lr, #0x20 @ comes from thumb mode?
422 ldr r3, [r2] @ Load faulting insn
423 check_ldrd_insn .LCwas_ldrd
425 orreq r1, r1, #(1 << 11) @ Set FSR write bit
429 ands lr, lr, #0x0f @ Mask all but relevant mode bits
432 orreq r1, r1, #0x00010000
433 orr r1, r1, #0x00400000 @ Set error code to data abort
435 adr lr, pagefault_return @ set return address
437 ldr pc, .LCpagefault_entry @ page fault
444 beq .LCret_handle_thumb
446 orreq r1, r1, #(1 << 11) @ Set FSR write bit
447 b .LCret_handle_thumb
450 .LCpagefault_entry: .word pagefault_entry
451 .LCslowtrap_entry: .word slowtrap_entry
454 /***************************************************************************
455 * Generic return code for restoring the thread state after exceptions.
475 * old sp -> | r0 | +0
484 beq slowtrap_from_pagefault
486 msrne cpsr_c, #0xd3 // disable IRQs
487 ldmneia sp!, {r0 - r3, r12} @ Restore user state
488 return_from_exception
490 slowtrap_from_pagefault:
491 msr cpsr_c, #0xd3 // disable IRQs
493 stmdb sp!, {r0 - r11}
496 adr lr, exception_return
497 ldr pc, .LCslowtrap_entry @ slow trap
499 .global __return_from_exception
500 __return_from_exception:
502 msr cpsr_c, #0xd3 // disable IRQs
504 ldmia sp!, {r0 - r12}
505 return_from_exception
510 return_from_exception
513 /***************************************************************************
516 * Exception is an interrupt. Generate interrupt IPC.
520 switch_to_kernel 4 1 1
523 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
524 @ add r0, sp, #(5*4) @ debug
527 ldmia sp, {r0 - r3, r12} @ Restore user state
528 msr cpsr_c, #0xd3 // disable IRQs
530 return_from_exception
536 /******************************************************************************
539 * Exception is a fast interrupt.
543 switch_to_kernel 4 1 1
546 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
547 @ add r0, sp, #(5*4) @ debug
550 ldmia sp, {r0 - r3, r12} @ Restore user state
551 msr cpsr_c, #0xd3 // disable IRQs
553 return_from_exception
559 /**************************************************************************/
560 /* The alien stuff is below */
561 /**************************************************************************/
562 alien_syscall: @ Do it for an alien ---------------------------------------
564 bicne r0, r0, #0x20000
566 @ Trap alien before system call -----------------------------------
567 @ The trap is an insn abort on the syscall address in the kernel.
568 ldr lr, [sp, #RF(PC, 13*4)]
569 str lr, [sp, #RF(USR_LR, 13*4)]
570 ldr lr, [sp, #RF(SVC_LR, 13*4)] @ read orig exception PC
571 sub lr, lr, #4 @ adjust pc to be on insn
572 str lr, [sp, #RF(PC, 13*4)] @ store to entry_stack_PC
573 enter_slowtrap_w_stack 0x00300000
574 @ Never reach this -- end up in user land after exception reply
576 1: @ Resume the alien system call ------------------------------------
577 str r0, [r1, #(OFS__THREAD__STATE)]
578 ldr r0, [sp, #RF(SVC_LR, 13*4)] @ read orig excpetion PC
579 adr r1, sys_call_table
583 2: nop @ The return point after the resumed alien system call --------
584 msr cpsr_c, #0xd3 // disable IRQs
585 @ Trap after the resumed alien system call ------------------------
586 @ The trap occurs at the insn where the system call returns to.
587 @ Set the bit 0x00010000 to indicate a trap after the resumed
589 enter_slowtrap_w_stack 0x00310000
592 /*****************************************************************************/
593 /* The syscall table stuff */
594 /*****************************************************************************/
595 #define SYSCALL(name) .word sys_##name##_wrapper
597 .globl sys_call_table
602 .word sys_ipc_wrapper
604 SYSCALL(invoke_debug)
613 .global leave_by_trigger_exception
615 leave_by_trigger_exception:
616 sub sp, sp, #RF_SIZE @ restore old return frame
617 stmdb sp!, {r0 - r12}
619 /* restore original IP */
621 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
622 str r0, [sp, #RF(PC, 13*4)]
624 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_PSR)]
625 str r0, [sp, #RF(PSR, 13*4)]
628 str r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
630 enter_slowtrap_w_stack 0x00500000
633 .global leave_by_vcpu_upcall;
635 leave_by_vcpu_upcall:
636 sub sp, sp, #RF_SIZE @ restore old return frame
639 /* restore original IP */
642 /* access_vcpu() for the local case */
643 ldr r2, [r1, #(OFS__THREAD__LOCAL_ID)]
644 add r2, r2, #(OFS__THREAD__UTCB_SIZE + VAL__SIZEOF_TRAP_STATE - RF_SIZE)
646 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
647 str r0, [r2, #RF(PC, 0)]
649 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_PSR)]
650 str r0, [r2, #RF(PSR, 0)]
651 bic r0, #0x20 // force ARM mode
652 str r0, [sp, #RF(PSR, 3*4)]
655 str r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
657 ldr r0, [sp, #RF(USR_LR, 3*4)]
658 str r0, [r2, #RF(USR_LR, 0)]
660 ldr r0, [sp, #RF(USR_SP, 3*4)]
661 str r0, [r2, #RF(USR_SP, 0)]
676 add r0, r2, #(-8 + OFS__VCPU_STATE__ENTRY_SP)
679 ldr r0, [r2, #(-8 + OFS__VCPU_STATE__ENTRY_IP)]
681 str r0, [sp, #RF(PC, 0)]
686 kernel_prefetch_abort_label: .string "Kernel prefetch abort"
687 missed_excpt_ret_label: .string "ERROR in exception return"
688 fiq_label: .string "FIQ entry"
690 /**********************************************************************
692 **********************************************************************/
694 .macro DEBUGGER_ENTRY errorcode
696 str sp, [sp, #(RF(USR_SP, -RF_SIZE))] @ save r[13]
697 sub sp, sp, #(RF_SIZE)
699 str lr, [sp, #RF(SVC_LR, 0)]
700 str lr, [sp, #RF(PC, 0)]
702 str lr, [sp, #RF(PSR, 0)]
704 stmdb sp!, {r0 - r12}
706 mov r1, #\errorcode @ err
714 add sp, sp, #8 @ pfa and err
715 ldmia sp!, {r0 - r12}
716 ldr lr, [sp, #RF(PSR, 0)]
718 ldr lr, [sp, #RF(SVC_LR, 0)]
720 ldr sp, [sp, #(RF(USR_SP, 0))]
724 3: .word call_nested_trap_handler
730 .global kern_kdebug_entry
733 DEBUGGER_ENTRY 0x00e00000
738 .global kern_kdebug_ipi_entry
740 kern_kdebug_ipi_entry:
741 DEBUGGER_ENTRY 0x00f00000
753 mcr p15, 0, lr, c7, c5, 4 @ cp15isb
757 /**********************************************************************
758 * Secure and Nonsecure switching stuff
760 *********************************************************************/
761 .macro SAVE_NONSECURE_STATE off
763 // save exit reason temporarily on stack
766 // switch to secure world
768 mcr p15, 0, lr, c1, c1, 0
773 stmia lr!, {r0 - r12}
789 stmia r0!, {r8 - r12, sp, lr}
813 // copy return pc/cpsr from stack
818 // save pending virtual interrupt state
819 mrc p15, 0, r1, c12, c1, 1
822 // switch to non-secure world
824 mcr p15, 0, r1, c1, c1, 0
827 mrc p15, 0, r1, c2, c0, 0 @ read CP15_TTB0
830 mrc p15, 0, r1, c2, c0, 1 @ read CP15_TTB1
833 mrc p15, 0, r1, c2, c0, 2 @ read CP15_TTBC
836 mrc p15, 0, r1, c12, c0, 0 @ read CP15_VECTOR_BASE
839 mrc p15, 0, r1, c5, c0, 0 @ read CP15_DFSR
842 mrc p15, 0, r1, c6, c0, 0 @ read CP15_DFAR
845 mrc p15, 0, r1, c5, c0, 1 @ read CP15_IFSR
848 mrc p15, 0, r1, c6, c0, 2 @ read CP15_IFAR
851 mrc p15, 0, r1, c1, c0, 0 @ read CP15_CONTROL
854 mrc p15, 0, r1, c10, c2, 0 @ read CP15_PRIM_REGION_REMAP
857 mrc p15, 0, r1, c10, c2, 1 @ read CP15_NORM_REGION_REMAP
860 mrc p15, 0, r1, c13, c0, 1 @ read CP15_CID
863 // switch to secure world
865 mcr p15, 0, r1, c1, c1, 0
868 mrc p15, 0, r1, c5, c0, 0 @ read CP15_DFSR
871 mrc p15, 0, r1, c6, c0, 0 @ read CP15_DFAR
874 // copy the exit reason from stack
879 .macro RESTORE_NONSECURE_STATE off
883 // jump over general purpose register
898 ldmia r0!, {r8 - r12, sp, lr}
922 // copy return pc/cpsr on stack
926 // set pending events
929 mcr p15, 0, r1, c12, c1, 1
932 // switch to non-secure world
934 mcr p15, 0, r1, c1, c1, 0
938 mcr p15, 0, r1, c2, c0, 0 @ write CP15_TTB0
941 mcr p15, 0, r1, c2, c0, 1 @ write CP15_TTB1
944 mcr p15, 0, r1, c2, c0, 2 @ write CP15_TTBC
947 mcr p15, 0, r1, c12, c0, 0 @ write CP15_VECTOR_BASE
950 mcr p15, 0, r1, c5, c0, 0 @ write CP15_DFSR
953 mcr p15, 0, r1, c6, c0, 0 @ write CP15_DFAR
956 mcr p15, 0, r1, c5, c0, 1 @ write CP15_IFSR
959 mcr p15, 0, r1, c6, c0, 2 @ write CP15_IFAR
962 mcr p15, 0, r1, c1, c0, 0 @ write CP15_CONTROL
965 mcr p15, 0, r1, c10, c2, 0 @ write CP15_PRIM_REGION_REMAP
968 mcr p15, 0, r1, c10, c2, 1 @ write CP15_NORM_REGION_REMAP
971 mcr p15, 0, r1, c13, c0, 1 @ write CP15_CID
973 // switch to secure world
975 mcr p15, 0, r1, c1, c1, 0
983 ldmia lr!, {r0 - r12}
986 /**********************************************************************
987 * Save secure state on top of the stack.
989 * We save also the user-level registers here, because we need to
990 * restore some on FIQ.
993 .macro SAVE_SECURE_STATE
995 stmdb sp!, {r3, r4} @ save supervisor return values
996 stmdb sp, {sp, lr}^ @ save user-level return values
1000 /**********************************************************************
1001 * Restore secure state when guest returns with monitor call.
1003 * This removes the secure state from the top of the stack.
1005 .macro RESTORE_SECURE_STATE
1007 mov r0, sp @ restore stack pointer from supervisor mode
1011 ldmia sp, {sp, lr}^ @ restore user-level return values
1013 ldmia sp!, {r3, r4} @ restore supervisor return values
1016 /**********************************************************************
1017 * Restore secure state when guest is interrupted by FIQ
1019 * Don't remove secure state from stack as we need it
1020 * when application guest exits.
1021 * Just restore user-level state as this is spilled by the irq handler
1023 .macro RESTORE_SECURE_STATE_FIQ
1025 mov r0, sp @ restore stack pointer from supervisor mode
1029 ldmia sp, {sp, lr}^ @ restore user-level return values
1032 .macro SWITCH_TO_NONSECURE_MODE
1034 mcr p15, 0, lr, c1, c1, 0
1038 .macro SWITCH_TO_SECURE_MODE
1040 mcr p15, 0, lr, c1, c1, 0
1045 /*****************************************************************************/
1046 /* The monitor entry table stuff */
1047 /*****************************************************************************/
1049 .globl monitor_vector_base
1050 monitor_vector_base:
1052 b mon_undef_entry /* UNDEF */
1053 b mon_swi_entry /* SWI */
1054 b mon_inst_abort_entry /* IABORT */
1055 b mon_data_abort_entry /* DABORT */
1057 b mon_irq_entry /* IRQ */
1058 b mon_fiq_entry /* FIQ */
1065 srsdb sp, #0x16 @ save return state temporarily on stack
1066 mov lr, #1 @ set exit reason
1069 mon_inst_abort_entry:
1072 mov lr, #2 @ set exit reason
1075 mon_data_abort_entry:
1078 mov lr, #3 @ set exit reason
1084 mov lr, #4 @ set exit reason
1088 sub lr, lr, #4 @ adjust saved ip
1090 mov lr, #4 @ set exit reason
1093 // cps #0x12 @ switch to irq mode
1094 // adr lr, go_nonsecure_after_fiq + 4 @ set lr_irq
1095 // msr spsr, #0xd3 @ set spsr_irq
1098 /**********************************************************************
1099 * Go to secure world
1103 SAVE_NONSECURE_STATE 16
1104 RESTORE_SECURE_STATE
1110 /**********************************************************************
1111 * Go to nonsecure world
1113 * When the guest was interrupted by an FIQ, we don't need to save
1114 * secure state again, because it is still on top of the stack.
1117 //go_nonsecure_after_fiq:
1118 // mov r2, sp @ copy sp_svc to sv_mon
1122 // b go_nonsecure_after_fiq_2
1127 RESTORE_NONSECURE_STATE 16
1128 SWITCH_TO_NONSECURE_MODE
1130 // mcr p15, 0, lr, c7, c10, 4 @ drain write buffer
1131 // mcr p15, 0, lr, c8, c7, 0 @ flush TLB entry
1134 msr spsr, lr @ set spsr_mon with unsecure spsr
1135 ldr lr, [sp, #-8] @ set lr_mon with unsecure ip
1139 /* -------------------------------------- TEXT ---------------------------*/
1144 add sp, r1, #RF_SIZE
1146 ldr r1, [lr, #RF(PSR, 13*4)] @ Unstack SPSR
1147 msr spsr, r1 @ Load SPSR from kernel_lr
1148 ldmia lr!, {r0 - r12}
1149 ldmia lr, {sp,lr}^ @ restore user sp and lr (now lazy)
1150 #if defined(CONFIG_ARM_V6PLUS)
1151 add lr, lr, #RF_SIZE @ Read return address
1154 add lr, lr, #(RF_SIZE - 4) @ Read return address
1155 ldmdb lr, {pc}^ @ go back to interrupted insn