3 #include "globalconfig.h"
4 #include "config_tcbsize.h"
9 /**************************************************************************
10 * Enter kernel mode (i.e. switch from any exception mode to the
11 * kernel mode and transfer the exception state).
14 .macro atomic_fixup insn do_store_pc
15 #ifndef CONFIG_ARM_V6PLUS
16 @ Adjust PC if it is in the special atomic insns area
17 @ Zero-flag set after this fixup code
18 cmp \insn, #0xffffe000
20 cmp \insn, #0xfffff000
23 biceq \insn, \insn, #0x0ff
25 str \insn, [sp, #RF(PC, RF_SIZE)]
28 @ ---------------------------------------------------
32 .macro __switch_to_kernel reg adjust atomic_fixup not_svc
36 #ifdef CONFIG_ARM_V6PLUS
37 #ifdef CONFIG_ARM_1136
38 // todo: do clrex with strex for CPUs without clrex
46 #if defined(CONFIG_ARM_V6PLUS)
48 msr cpsr_c, #0xd3 @cpsid f, #0x13
56 .endif @ syscall (already in svc mode)
60 str lr, [sp, #RF(PSR, -8)]
62 str lr, [sp, #RF(PC, -8)]
64 str lr, [sp, #RF(PC, -8)]
66 str lr, [sp, #RF(PSR, -8)]
70 .macro switch_to_kernel adjust atomic_fixup not_svc
71 __switch_to_kernel r14 \adjust \atomic_fixup \not_svc
74 /*************************************************************************
75 * return from an exception
77 .macro return_from_exception
78 ldr lr, [sp, #RF(PSR,0)] @ Unstack SPSR
79 tst lr, #0x0f @ Mask all but relevant mode bits
80 add sp, sp, #RF_SIZE @ SP to top of stack
81 #if defined(CONFIG_ARM_V6PLUS)
82 ldrne lr, [sp, #RF(SVC_LR, -RF_SIZE)] @ load old kernel lr
85 msr spsr_cfsx, lr @ Load SPSR from kernel_lr
86 ldr lr, [sp, #RF(PC, -RF_SIZE)] @ copy PC on psr field for
87 str lr, [sp, #RF(PSR, -RF_SIZE)] @ final ldmdb and proper ksp
88 ldrne lr, [sp, #RF(SVC_LR, -RF_SIZE)] @ load old kernel lr
89 ldmdb sp, {pc}^ @ go back to interrupted insn
94 .macro make_tpidruro_space base
98 /***********************************************************************
99 * Enter the kernel slowtrap handler
101 * Stack the state and call 'slowtrap_entry' with sp and error code
103 .macro enter_slowtrap errorcode
104 stmdb sp!, {r0 - r12}
105 make_tpidruro_space sp
106 enter_slowtrap_w_stack \errorcode
112 * after SWITCH_TO_SVC !!!!
116 * | lr' | (pc after syscall)
124 * sp -> | sp^ | (user sp)
131 /*************************************************************************
132 * Generate stack for exception entries
134 .macro exceptionframe
139 /***************************************************************************
140 * Generate stack for system call entries
144 * after SWITCH_TO_SVC !!!!
148 * | lr^ | (pc after syscall)
156 * sp -> | sp^ | (user sp)
161 * lr: must contain fault addr (from switch_to_kernel)
163 .macro enter_sys_call no_sys_call
164 ldr lr, [sp, #RF(PC, 0)]
165 cmn lr, #0x2a @ Range Check !!! UNSIGNED !!!
166 bls \no_sys_call @ no syscall
170 add lr, sp, #RF(PC, 0)
173 stmdb sp!, {r0 - r12}
175 RESET_THREAD_CANCEL_AT r1 @ sets r0 to state
178 ldr r0, [sp, #RF(SVC_LR, 13*4)] @ read exception PC from stack (km_lr)
179 adr r1, sys_call_table
183 .global fast_ret_from_irq
185 2: ldmia sp, {r0 - r12}^
186 msr cpsr_c, #0xd3 // disable IRQs
191 ldr lr, [sp, #RF(PSR,0)]
193 @ ldmia sp, {sp,lr}^ @ done lazy
195 ldr lr, [sp, #RF(PC, -RF_SIZE)]
199 /**************************************************************************
200 * The Exception vector table.
203 .globl exception_vector
205 #ifndef CONFIG_ARM_EM_TZ
207 b undef_entry /* UNDEF */
208 b swi_entry /* SWI */
209 b inst_abort_entry /* IABORT */
210 b data_abort_entry /* DABORT */
212 b irq_entry /* IRQ */
213 b fiq_entry /* FIQ */
216 adr pc, (fiq_b_vector + 8) /* UNDEF */
217 adr pc, (fiq_b_vector + 16) /* SWI */
218 adr pc, (fiq_b_vector + 24) /* IABORT */
219 adr pc, (fiq_b_vector + 32) /* DABORT */
221 b irq_entry /* IRQ */
222 b fiq_entry /* FIQ */
228 b undef_entry /* UNDEF */
230 b swi_entry /* SWI */
232 b inst_abort_entry /* IABORT */
234 b data_abort_entry /* DABORT */
237 /* locations to pass lr and spsr from one mode to the other
238 these are globally shared !!! */
239 .section .excp.text,"xa"
240 #if !defined(CONFIG_ARM_V6PLUS)
245 /***************************************************************************
247 ** Exception entry points.
251 /***************************************************************************
254 * Exception is an undefined instruction.
258 switch_to_kernel 0 0 1
260 enter_slowtrap 0x00100000
262 /**************************************************************************
265 * Exception is a software interrupt (typically a syscall in normal
270 switch_to_kernel 0 0 0
272 enter_sys_call no_sys_call
274 enter_slowtrap 0x00200000
278 /***************************************************************************
279 * Exception inst_abort ()
281 * Exception is a prefetch (instruction) abort. This exception is also
282 * used for L4 syscalls. If the exception address is in the range 0x00
283 * to 0x24 (in the exception vector page), this is interpreted as a
284 * syscall number. Some platforms allow the exception vector to be
285 * relocated to the beginning of the last 64K of memory. For these
286 * platforms, we use a negative (i.e. end of address space) value to
287 * indicate the syscall number. If exception is not within the syscall
288 * range, generate a pager IPC (or panic if within the kernel).
293 switch_to_kernel 4 0 1
296 /**************************************************************************/
297 prefetch_abort: @ A real prefetch abort occured --- handled as a page fault
299 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
300 ldr lr, [sp, #RF(PSR, 5*4)] @ get spsr from stack
301 ands lr, lr, #0x0f @ Mask all but relevant mode bits
302 bne kernel_prefetch_abort @ Kernel abort?
303 /* user prefetch abort */
304 mrc p15, 0, r1, c5, c0, 1 @ Load IFSR into r1
305 bic r1, r1, #0x00ff0000
306 orr r1, r1, #0x00330000 @ Set read bit and prefetch abort
307 #if defined(CONFIG_ARM_V6PLUS) && !defined(CONFIG_ARM_1136) && !defined(CONFIG_ARM_MPCORE)
308 mrc p15, 0, r0, c6, c0, 2 @ Read fault address, for T2: pfa != pc
310 ldr r0, [sp, #RF(PC, 5*4)] @ Get PC from RF and use as pfa
315 adr lr, pagefault_return
316 ldr pc, .LCpagefault_entry @ Jump to C code
318 kernel_prefetch_abort: @ Kernel generated IAbort
319 @ Should not get IAborts in kernel
321 adr r0, kernel_prefetch_abort_label
328 /****************************************************************************
329 * Exception data_abort ()
331 * Exception is a data abort. If exception happened in user mode,
332 * generate pager IPC. If exception happened in kernel mode, it is
333 * probably due to a non-mapped TCB (or else we panic).
362 .macro check_ldrd_insn jmp_to_if_ldrd
365 and r12, r3, #0x000000f0
374 switch_to_kernel 8 0 1
377 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
379 /* user data abort */
380 #ifdef CONFIG_ARM_V6PLUS
381 mrc p15, 0, r1, c5, c0, 0 @ Load DFSR into r1
382 bic r1, r1, #0x00ff0000
383 mrc p15, 0, r0, c6, c0, 0 @ Load DFAR into r0
385 ldr r2, [sp, #RF(PC, 5*4)] @ Load PC into r2
386 ldr lr, [sp, #RF(PSR, 5*4)] @ load spsr, from stack
388 ands lr, lr, #0x0f @ Mask all but relevant mode bits
391 orreq r1, r1, #0x00010000
392 orr r1, r1, #0x00400000 @ Set error code to data abort
395 adr lr, pagefault_return @ set return address
397 ldr pc, .LCpagefault_entry @ page fault
399 mrc p15, 0, r1, c5, c0, 0 @ Load FSR into r1
400 bic r1, r1, #(1 << 11) @ clear bit 11 (write indicator)
401 bic r1, r1, #0x00ff0000
402 mrc p15, 0, r0, c6, c0, 0 @ Load FAR into r0
403 ldr r2, [sp, #RF(PC, 5*4)] @ Load PC into r2
404 ldr lr, [sp, #RF(PSR, 5*4)] @ load spsr, from stack
405 tst lr, #0x20 @ comes from thumb mode?
408 ldr r3, [r2] @ Load faulting insn
409 check_ldrd_insn .LCwas_ldrd
411 orreq r1, r1, #(1 << 11) @ Set FSR write bit
415 ands lr, lr, #0x0f @ Mask all but relevant mode bits
418 orreq r1, r1, #0x00010000
419 orr r1, r1, #0x00400000 @ Set error code to data abort
421 adr lr, pagefault_return @ set return address
423 ldr pc, .LCpagefault_entry @ page fault
430 beq .LCret_handle_thumb
432 orreq r1, r1, #(1 << 11) @ Set FSR write bit
433 b .LCret_handle_thumb
436 .LCpagefault_entry: .word pagefault_entry
437 .LCslowtrap_entry: .word slowtrap_entry
440 /***************************************************************************
441 * Generic return code for restoring the thread state after exceptions.
461 * old sp -> | r0 | +0
470 beq slowtrap_from_pagefault
472 msrne cpsr_c, #0xd3 // disable IRQs
473 ldmneia sp!, {r0 - r3, r12} @ Restore user state
474 return_from_exception
476 slowtrap_from_pagefault:
477 msr cpsr_c, #0xd3 // disable IRQs
479 stmdb sp!, {r0 - r11}
480 make_tpidruro_space sp
483 adr lr, exception_return
484 ldr pc, .LCslowtrap_entry @ slow trap
486 .global __return_from_exception
487 __return_from_exception:
489 msr cpsr_c, #0xd3 // disable IRQs
490 add sp, sp, #12 // pfa, err & tpidruro
491 ldmia sp!, {r0 - r12}
492 return_from_exception
497 return_from_exception
500 /***************************************************************************
503 * Exception is an interrupt. Generate interrupt IPC.
507 switch_to_kernel 4 1 1
510 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
511 @ add r0, sp, #(5*4) @ debug
514 ldmia sp, {r0 - r3, r12} @ Restore user state
515 msr cpsr_c, #0xd3 // disable IRQs
517 return_from_exception
521 /******************************************************************************
524 * Exception is a fast interrupt.
528 #ifdef CONFIG_ARM_EM_TZ
535 .pc_adjust_map_thumb:
544 #ifdef CONFIG_ARM_EM_TZ
559 mov r0, r10 // restore
567 movw r11, #(fiq_b_vector - exception_vector)
575 adreq r0, .pc_adjust_map_arm
576 adrne r0, .pc_adjust_map_thumb
595 switch_to_kernel 4 1 1
598 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
601 ldmia sp, {r0 - r3, r12} @ Restore user state
602 msr cpsr_c, #0xd3 // disable IRQs
604 return_from_exception
608 /**************************************************************************/
609 /* The alien stuff is below */
610 /**************************************************************************/
611 alien_syscall: @ Do it for an alien ---------------------------------------
613 bicne r0, r0, #0x20000
615 @ Trap alien before system call -----------------------------------
616 @ The trap is an insn abort on the syscall address in the kernel.
617 ldr lr, [sp, #RF(PC, 13*4)]
618 str lr, [sp, #RF(USR_LR, 13*4)]
619 ldr lr, [sp, #RF(SVC_LR, 13*4)] @ read orig exception PC
620 sub lr, lr, #4 @ adjust pc to be on insn
621 str lr, [sp, #RF(PC, 13*4)] @ store to entry_stack_PC
622 enter_slowtrap_w_stack 0x00300000
623 @ Never reach this -- end up in user land after exception reply
625 1: @ Resume the alien system call ------------------------------------
626 str r0, [r1, #(OFS__THREAD__STATE)]
627 ldr r0, [sp, #RF(SVC_LR, 13*4)] @ read orig excpetion PC
628 adr r1, sys_call_table
632 2: nop @ The return point after the resumed alien system call --------
633 msr cpsr_c, #0xd3 // disable IRQs
634 @ Trap after the resumed alien system call ------------------------
635 @ The trap occurs at the insn where the system call returns to.
636 @ Set the bit 0x00010000 to indicate a trap after the resumed
638 enter_slowtrap_w_stack 0x00310000
641 /*****************************************************************************/
642 /* The syscall table stuff */
643 /*****************************************************************************/
645 GEN_LEAVE_BY_TRIGGER_EXCEPTION
647 .macro LOAD_USR_SP vcpu_ptr
648 add r0, \vcpu_ptr, #(OFS__VCPU_STATE__ENTRY_SP)
652 .macro LOAD_USR_VCPU reg, kvcpu, thread
656 GEN_VCPU_UPCALL OFS__THREAD__USER_VCPU, LOAD_USR_SP, LOAD_USR_VCPU
658 kernel_prefetch_abort_label: .string "Kernel prefetch abort"
659 missed_excpt_ret_label: .string "ERROR in exception return"
660 fiq_label: .string "FIQ entry"
662 /**********************************************************************
664 **********************************************************************/
668 #ifdef CONFIG_ARM_EM_TZ
674 mcr p15, 0, \reg, c7, c5, 4 @ cp15isb
678 /**********************************************************************
679 * Secure and Nonsecure switching stuff
681 *********************************************************************/
682 .macro SAVE_NONSECURE_STATE off
684 // save exit reason temporarily on stack
687 // switch to secure world
688 SWITCH_TO_SECURE_MODE lr
693 stmia lr!, {r0 - r12}
709 stmia r0!, {r8 - r12, sp, lr}
733 // copy return pc/cpsr from stack
738 // save pending virtual interrupt state
739 mrc p15, 0, r1, c12, c1, 0
742 mrc p15, 0, r1, c1, c0, 2 @ cpacr
746 mov r1, #0x500000 @ enable CP10/11
747 mcr p15, 0, r1, c1, c0, 2 @ cpacr
750 // switch to non-secure world
751 SWITCH_TO_NONSECURE_MODE r1
754 mrc p10, 7, r1, cr8, cr0, 0 @ fpexc
760 // switch to secure world
761 SWITCH_TO_SECURE_MODE r1
763 mrc p15, 0, r1, c5, c0, 0 @ read CP15_DFSR
766 mrc p15, 0, r1, c6, c0, 0 @ read CP15_DFAR
769 // copy the exit reason from stack
774 .macro RESTORE_AND_SWITCH_TO_NONSECURE_STATE off
778 // jump over general purpose register
793 ldmia r0!, {r8 - r12, sp, lr}
817 // copy return pc/cpsr on stack
821 // skip pending events field
825 mrc p15, 0, r1, c1, c0, 2 @ cpacr
827 // switch to non-secure world
828 SWITCH_TO_NONSECURE_MODE r1
832 mcr p10, 7, r1, cr8, cr0, 0 @ fpexc
839 ldmia lr!, {r0 - r12}
842 /**********************************************************************
843 * Save secure state on top of the stack.
845 * We save also the user-level registers here, because we need to
846 * restore some on FIQ.
849 .macro SAVE_SECURE_STATE
851 mrc p15, 0, r5, c1, c0, 2
853 stmdb sp!, {r3, r4} @ save supervisor return values
854 stmdb sp, {sp, lr}^ @ save user-level return values
858 /**********************************************************************
859 * Restore secure state when guest returns with monitor call.
861 * This removes the secure state from the top of the stack.
863 .macro RESTORE_SECURE_STATE
865 mov r0, sp @ restore stack pointer from supervisor mode
869 ldmia sp, {sp, lr}^ @ restore user-level return values
871 ldmia sp!, {r3, r4} @ restore supervisor return values
873 mcr p15, 0, r0, c1, c0, 2
876 .macro SWITCH_TO_NONSECURE_MODE reg
878 mcr p15, 0, \reg, c1, c1, 0
882 .macro SWITCH_TO_SECURE_MODE reg
884 mcr p15, 0, \reg, c1, c1, 0
889 /*****************************************************************************/
890 /* The monitor entry table stuff */
891 /*****************************************************************************/
893 .globl monitor_vector_base
896 b mon_undef_entry /* UNDEF */
897 b mon_swi_entry /* SWI */
898 b mon_inst_abort_entry /* IABORT */
899 b mon_data_abort_entry /* DABORT */
901 b mon_irq_entry /* IRQ */
902 b mon_fiq_entry /* FIQ */
904 .macro mon_enter_secure name, ec, sub_lr
914 mon_enter_secure mon_undef_entry, 6, 4
915 mon_enter_secure mon_swi_entry, 1, 0
916 mon_enter_secure mon_inst_abort_entry, 2, 4
917 mon_enter_secure mon_data_abort_entry, 3, 4
918 mon_enter_secure mon_irq_entry, 4, 4
919 mon_enter_secure mon_fiq_entry, 5, 4
921 // cps #0x12 @ switch to irq mode
922 // adr lr, go_nonsecure_after_fiq + 4 @ set lr_irq
923 // msr spsr, #0xd3 @ set spsr_irq
926 /**********************************************************************
931 SAVE_NONSECURE_STATE 20
938 /**********************************************************************
939 * Go to nonsecure world
941 * When the guest was interrupted by an FIQ, we don't need to save
942 * secure state again, because it is still on top of the stack.
945 //go_nonsecure_after_fiq:
946 // mov r2, sp @ copy sp_svc to sv_mon
950 // b go_nonsecure_after_fiq_2
955 RESTORE_AND_SWITCH_TO_NONSECURE_STATE 20
958 msr spsr, lr @ set spsr_mon with unsecure spsr
959 ldr lr, [sp, #-8] @ set lr_mon with unsecure ip
961 #endif /* TRUSTZONE */
963 /* -------------------------------------- TEXT ---------------------------*/
969 add lr, r0, #12 @ pfa, err + tpidruro
970 #if !defined(CONFIG_ARM_V6PLUS)
971 ldr r1, [lr, #RF(PSR, 13*4)] @ Unstack SPSR
972 msr spsr, r1 @ Load SPSR from kernel_lr
974 ldmia lr!, {r0 - r12}
975 ldmia lr, {sp,lr}^ @ restore user sp and lr (now lazy)
976 #if defined(CONFIG_ARM_V6PLUS)
977 add lr, lr, #RF_SIZE @ Read return address
980 add lr, lr, #(RF_SIZE - 4) @ Read return address
981 ldmdb lr, {pc}^ @ go back to interrupted insn