3 #include "globalconfig.h"
4 #include "config_tcbsize.h"
9 /**********************************************************************
10 * calculate the TCB address from a stack pointer
12 .macro CONTEXT_OF reg, ptr
13 bic \reg, \ptr, #((THREAD_BLOCK_SIZE-1) & 0xff)
14 bic \reg, \reg, #((THREAD_BLOCK_SIZE-1) & 0xff00)
17 /**********************************************************************
18 * Reset the thread cancel flag.
19 * Register r0 is scratched and contains the thread state afterwards
21 .macro RESET_THREAD_CANCEL_AT tcb
22 ldr r0, [\tcb, #(OFS__THREAD__STATE)]
24 str r0, [\tcb, #(OFS__THREAD__STATE)]
27 /****************************
28 * some handy definitions
36 #define RF(reg, offs) (RF_##reg + (offs))
38 /**************************************************************************
39 * Enter kernel mode (i.e. switch from any exception mode to the
40 * kernel mode and transfer the exception state).
43 .macro atomic_fixup insn do_store_pc
44 #ifndef CONFIG_ARM_V6PLUS
45 @ Adjust PC if it is in the special atomic insns area
46 @ Zero-flag set after this fixup code
47 cmp \insn, #0xffffe000
49 cmp \insn, #0xfffff000
52 biceq \insn, \insn, #0x0ff
54 str \insn, [sp, #RF(PC, RF_SIZE)]
57 @ ---------------------------------------------------
61 .macro __switch_to_kernel reg adjust atomic_fixup not_svc
65 #ifdef CONFIG_ARM_V6PLUS
66 #ifdef CONFIG_ARM_1136
67 // todo: do clrex with strex for CPUs without clrex
75 #if defined(CONFIG_ARM_V6PLUS)
77 msr cpsr_c, #0xd3 @cpsid f, #0x13
85 .endif @ syscall (already in svc mode)
89 str lr, [sp, #RF(PSR, -8)]
91 str lr, [sp, #RF(PC, -8)]
93 str lr, [sp, #RF(PC, -8)]
95 str lr, [sp, #RF(PSR, -8)]
99 .macro switch_to_kernel adjust atomic_fixup not_svc
100 __switch_to_kernel r14 \adjust \atomic_fixup \not_svc
103 /*************************************************************************
104 * return from an exception
106 .macro return_from_exception
107 ldr lr, [sp, #RF(PSR,0)] @ Unstack SPSR
108 tst lr, #0x0f @ Mask all but relevant mode bits
109 add sp, sp, #RF_SIZE @ SP to top of stack
110 #if defined(CONFIG_ARM_V6PLUS)
111 ldrne lr, [sp, #RF(SVC_LR, -RF_SIZE)] @ load old kernel lr
114 msr spsr_cfsx, lr @ Load SPSR from kernel_lr
115 ldr lr, [sp, #RF(PC, -RF_SIZE)] @ copy PC on psr field for
116 str lr, [sp, #RF(PSR, -RF_SIZE)] @ final ldmdb and proper ksp
117 ldrne lr, [sp, #RF(SVC_LR, -RF_SIZE)] @ load old kernel lr
118 ldmdb sp, {pc}^ @ go back to interrupted insn
124 /***********************************************************************
125 * Enter the kernel slowtrap handler
127 * Stack the state and call 'slowtrap_entry' with sp and error code
129 .macro enter_slowtrap_w_stack errorcode
133 adr lr, exception_return
134 ldr pc, .LCslowtrap_entry
137 .macro enter_slowtrap errorcode
138 stmdb sp!, {r0 - r12}
139 enter_slowtrap_w_stack \errorcode
145 * after SWITCH_TO_SVC !!!!
149 * | lr' | (pc after syscall)
157 * sp -> | sp^ | (user sp)
164 /*************************************************************************
166 * Generate stack for exception entries
167 * - Adjust return address
168 * - Store return address at [sp + 8]
169 * - Store spsr at [sp + 4]
171 * - Store user sp at [sp]
172 * - Store user lr at [sp + 4]
174 .macro exceptionframe
176 @ stmia sp, {sp,lr}^ @ now done lazy
180 /***************************************************************************
181 * Generate stack for system call entries
185 * after SWITCH_TO_SVC !!!!
189 * | lr^ | (pc after syscall)
197 * sp -> | sp^ | (user sp)
202 * lr: must contain fault addr (from switch_to_kernel)
205 add lr, sp, #RF(PC, -8)
208 @ stmia sp, {sp}^ @ now done lazy
211 .macro enter_sys_call no_sys_call
212 ldr lr, [sp, #RF(PC, -8)]
213 cmn lr, #0x2a @ Range Check !!! UNSIGNED !!!
214 bls \no_sys_call @ no syscall
218 stmdb sp!, {r0 - r12}
220 RESET_THREAD_CANCEL_AT r1 @ sets r0 to state
223 ldr r0, [sp, #RF(SVC_LR, 13*4)] @ read exception PC from stack (km_lr)
224 adr r1, sys_call_table
228 .global fast_ret_from_irq
230 2: ldmia sp, {r0 - r12}^
231 msr cpsr_c, #0xd3 // disable IRQs
235 ldr lr, [sp, #RF(PSR,0)]
237 @ ldmia sp, {sp,lr}^ @ done lazy
239 ldr lr, [sp, #RF(PC, -RF_SIZE)]
244 /**************************************************************************
245 * The Exception vector table.
248 .globl exception_vector
251 b undef_entry /* UNDEF */
252 b swi_entry /* SWI */
253 b inst_abort_entry /* IABORT */
254 b data_abort_entry /* DABORT */
256 b irq_entry /* IRQ */
257 b fiq_entry /* FIQ */
260 /* locations to pass lr and spsr from one mode to the other
261 these are globally shared !!! */
262 .section .excp.text,"xa"
266 /***************************************************************************
268 ** Exception entry points.
272 /***************************************************************************
275 * Exception is an undefined instruction.
279 switch_to_kernel 0 0 1
281 enter_slowtrap 0x00100000
283 /**************************************************************************
286 * Exception is a software interrupt (typically a syscall in normal
291 switch_to_kernel 0 0 0
292 enter_sys_call no_sys_call
295 enter_slowtrap 0x00200000
299 /***************************************************************************
300 * Exception inst_abort ()
302 * Exception is a prefetch (instruction) abort. This exception is also
303 * used for L4 syscalls. If the exception address is in the range 0x00
304 * to 0x24 (in the exception vector page), this is interpreted as a
305 * syscall number. Some platforms allow the exception vector to be
306 * relocated to the beginning of the last 64K of memory. For these
307 * platforms, we use a negative (i.e. end of address space) value to
308 * indicate the syscall number. If exception is not within the syscall
309 * range, generate a pager IPC (or panic if within the kernel).
314 switch_to_kernel 4 0 1
317 /**************************************************************************/
318 prefetch_abort: @ A real prefetch abort occured --- handled as a page fault
320 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
321 ldr lr, [sp, #RF(PSR, 5*4)] @ get spsr from stack
322 ands lr, lr, #0x0f @ Mask all but relevant mode bits
323 bne kernel_prefetch_abort @ Kernel abort?
324 /* user prefetch abort */
325 mrc p15, 0, r1, c5, c0, 1 @ Load IFSR into r1
326 bic r1, r1, #0x00ff0000
327 orr r1, r1, #0x00330000 @ Set read bit and prefetch abort
328 #if defined(CONFIG_ARM_V6PLUS) && !defined(CONFIG_ARM_1136)
329 mrc p15, 0, r0, c6, c0, 2 @ Read fault address, for T2: pfa != pc
331 ldr r0, [sp, #RF(PC, 5*4)] @ Get PC from RF and use as pfa
336 adr lr, pagefault_return
337 ldr pc, .LCpagefault_entry @ Jump to C code
339 kernel_prefetch_abort: @ Kernel generated IAbort
340 @ Should not get IAborts in kernel
342 adr r0, kernel_prefetch_abort_label
349 /****************************************************************************
350 * Exception data_abort ()
352 * Exception is a data abort. If exception happened in user mode,
353 * generate pager IPC. If exception happened in kernel mode, it is
354 * probably due to a non-mapped TCB (or else we panic).
383 .macro check_ldrd_insn jmp_to_if_ldrd
386 and r12, r3, #0x000000f0
395 switch_to_kernel 8 0 1
398 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
400 /* user data abort */
401 #ifdef CONFIG_ARM_V6PLUS
402 mrc p15, 0, r1, c5, c0, 0 @ Load DFSR into r1
403 bic r1, r1, #0x00ff0000
404 mrc p15, 0, r0, c6, c0, 0 @ Load DFAR into r0
406 ldr r2, [sp, #RF(PC, 5*4)] @ Load PC into r2
407 ldr lr, [sp, #RF(PSR, 5*4)] @ load spsr, from stack
409 ands lr, lr, #0x0f @ Mask all but relevant mode bits
412 orreq r1, r1, #0x00010000
413 orr r1, r1, #0x00400000 @ Set error code to data abort
416 adr lr, pagefault_return @ set return address
418 ldr pc, .LCpagefault_entry @ page fault
420 mrc p15, 0, r1, c5, c0, 0 @ Load FSR into r1
421 bic r1, r1, #(1 << 11) @ clear bit 11 (write indicator)
422 bic r1, r1, #0x00ff0000
423 mrc p15, 0, r0, c6, c0, 0 @ Load FAR into r0
424 ldr r2, [sp, #RF(PC, 5*4)] @ Load PC into r2
425 ldr lr, [sp, #RF(PSR, 5*4)] @ load spsr, from stack
426 tst lr, #0x20 @ comes from thumb mode?
429 ldr r3, [r2] @ Load faulting insn
430 check_ldrd_insn .LCwas_ldrd
432 orreq r1, r1, #(1 << 11) @ Set FSR write bit
436 ands lr, lr, #0x0f @ Mask all but relevant mode bits
439 orreq r1, r1, #0x00010000
440 orr r1, r1, #0x00400000 @ Set error code to data abort
442 adr lr, pagefault_return @ set return address
444 ldr pc, .LCpagefault_entry @ page fault
451 beq .LCret_handle_thumb
453 orreq r1, r1, #(1 << 11) @ Set FSR write bit
454 b .LCret_handle_thumb
457 .LCpagefault_entry: .word pagefault_entry
458 .LCslowtrap_entry: .word slowtrap_entry
461 /***************************************************************************
462 * Generic return code for restoring the thread state after exceptions.
482 * old sp -> | r0 | +0
491 beq slowtrap_from_pagefault
493 msrne cpsr_c, #0xd3 // disable IRQs
494 ldmneia sp!, {r0 - r3, r12} @ Restore user state
495 return_from_exception
497 slowtrap_from_pagefault:
498 msr cpsr_c, #0xd3 // disable IRQs
500 stmdb sp!, {r0 - r11}
503 adr lr, exception_return
504 ldr pc, .LCslowtrap_entry @ slow trap
506 .global __return_from_exception
507 __return_from_exception:
509 msr cpsr_c, #0xd3 // disable IRQs
511 ldmia sp!, {r0 - r12}
512 return_from_exception
517 return_from_exception
520 /***************************************************************************
523 * Exception is an interrupt. Generate interrupt IPC.
527 switch_to_kernel 4 1 1
530 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
531 @ add r0, sp, #(5*4) @ debug
534 ldmia sp, {r0 - r3, r12} @ Restore user state
535 msr cpsr_c, #0xd3 // disable IRQs
537 return_from_exception
543 /******************************************************************************
546 * Exception is a fast interrupt.
550 switch_to_kernel 4 1 1
553 stmdb sp!, {r0 - r3, r12} @ Stack rest of user state
554 @ add r0, sp, #(5*4) @ debug
557 ldmia sp, {r0 - r3, r12} @ Restore user state
558 msr cpsr_c, #0xd3 // disable IRQs
560 return_from_exception
566 /**************************************************************************/
567 /* The alien stuff is below */
568 /**************************************************************************/
569 alien_syscall: @ Do it for an alien ---------------------------------------
571 bicne r0, r0, #0x20000
573 @ Trap alien before system call -----------------------------------
574 @ The trap is an insn abort on the syscall address in the kernel.
575 ldr lr, [sp, #RF(PC, 13*4)]
576 str lr, [sp, #RF(USR_LR, 13*4)]
577 ldr lr, [sp, #RF(SVC_LR, 13*4)] @ read orig exception PC
578 sub lr, lr, #4 @ adjust pc to be on insn
579 str lr, [sp, #RF(PC, 13*4)] @ store to entry_stack_PC
580 enter_slowtrap_w_stack 0x00300000
581 @ Never reach this -- end up in user land after exception reply
583 1: @ Resume the alien system call ------------------------------------
584 str r0, [r1, #(OFS__THREAD__STATE)]
585 ldr r0, [sp, #RF(SVC_LR, 13*4)] @ read orig excpetion PC
586 adr r1, sys_call_table
590 2: nop @ The return point after the resumed alien system call --------
591 msr cpsr_c, #0xd3 // disable IRQs
592 @ Trap after the resumed alien system call ------------------------
593 @ The trap occurs at the insn where the system call returns to.
594 @ Set the bit 0x00010000 to indicate a trap after the resumed
596 enter_slowtrap_w_stack 0x00310000
599 /*****************************************************************************/
600 /* The syscall table stuff */
601 /*****************************************************************************/
602 #define SYSCALL(name) .word sys_##name##_wrapper
604 .globl sys_call_table
609 .word sys_ipc_wrapper
611 SYSCALL(invoke_debug)
620 .global leave_by_trigger_exception
622 leave_by_trigger_exception:
623 sub sp, sp, #RF_SIZE @ restore old return frame
624 stmdb sp!, {r0 - r12}
626 /* restore original IP */
628 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
629 str r0, [sp, #RF(PC, 13*4)]
631 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_PSR)]
632 str r0, [sp, #RF(PSR, 13*4)]
635 str r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
637 enter_slowtrap_w_stack 0x00500000
640 .global leave_by_vcpu_upcall;
642 leave_by_vcpu_upcall:
643 sub sp, sp, #RF_SIZE @ restore old return frame
646 /* restore original IP */
649 /* access_vcpu() for the local case */
650 ldr r2, [r1, #(OFS__THREAD__USER_VCPU)]
651 add r2, r2, #(VAL__SIZEOF_TRAP_STATE - RF_SIZE)
653 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
654 str r0, [r2, #RF(PC, 0)]
656 ldr r0, [r1, #(OFS__THREAD__EXCEPTION_PSR)]
657 str r0, [r2, #RF(PSR, 0)]
658 bic r0, #0x20 // force ARM mode
659 str r0, [sp, #RF(PSR, 3*4)]
662 str r0, [r1, #(OFS__THREAD__EXCEPTION_IP)]
664 ldr r0, [sp, #RF(USR_LR, 3*4)]
665 str r0, [r2, #RF(USR_LR, 0)]
667 ldr r0, [sp, #RF(USR_SP, 3*4)]
668 str r0, [r2, #RF(USR_SP, 0)]
683 add r0, r2, #(-8 + OFS__VCPU_STATE__ENTRY_SP)
686 ldr r0, [r2, #(-8 + OFS__VCPU_STATE__ENTRY_IP)]
688 str r0, [sp, #RF(PC, 0)]
694 kernel_prefetch_abort_label: .string "Kernel prefetch abort"
695 missed_excpt_ret_label: .string "ERROR in exception return"
696 fiq_label: .string "FIQ entry"
698 /**********************************************************************
700 **********************************************************************/
702 .macro DEBUGGER_ENTRY type
704 str sp, [sp, #(RF(USR_SP, -RF_SIZE))] @ save r[13]
705 sub sp, sp, #(RF_SIZE)
707 str lr, [sp, #RF(SVC_LR, 0)]
708 str lr, [sp, #RF(PC, 0)]
710 str lr, [sp, #RF(PSR, 0)]
712 stmdb sp!, {r0 - r12}
714 mov r1, #0x00e00000 @ err
715 orr r1, #\type @ + type
723 add sp, sp, #8 @ pfa and err
724 ldmia sp!, {r0 - r12}
725 ldr lr, [sp, #RF(PSR, 0)]
727 ldr lr, [sp, #RF(SVC_LR, 0)]
729 ldr sp, [sp, #(RF(USR_SP, 0))]
733 3: .word call_nested_trap_handler
739 .global kern_kdebug_entry
744 .global kern_kdebug_sequence_entry
746 kern_kdebug_sequence_entry:
752 .global kern_kdebug_ipi_entry
754 kern_kdebug_ipi_entry:
767 mcr p15, 0, lr, c7, c5, 4 @ cp15isb
771 /**********************************************************************
772 * Secure and Nonsecure switching stuff
774 *********************************************************************/
775 .macro SAVE_NONSECURE_STATE off
777 // save exit reason temporarily on stack
780 // switch to secure world
782 mcr p15, 0, lr, c1, c1, 0
787 stmia lr!, {r0 - r12}
803 stmia r0!, {r8 - r12, sp, lr}
827 // copy return pc/cpsr from stack
832 // save pending virtual interrupt state
833 mrc p15, 0, r1, c12, c1, 1
836 // switch to non-secure world
838 mcr p15, 0, r1, c1, c1, 0
841 mrc p15, 0, r1, c2, c0, 0 @ read CP15_TTB0
844 mrc p15, 0, r1, c2, c0, 1 @ read CP15_TTB1
847 mrc p15, 0, r1, c2, c0, 2 @ read CP15_TTBC
850 mrc p15, 0, r1, c12, c0, 0 @ read CP15_VECTOR_BASE
853 mrc p15, 0, r1, c5, c0, 0 @ read CP15_DFSR
856 mrc p15, 0, r1, c6, c0, 0 @ read CP15_DFAR
859 mrc p15, 0, r1, c5, c0, 1 @ read CP15_IFSR
862 mrc p15, 0, r1, c6, c0, 2 @ read CP15_IFAR
865 mrc p15, 0, r1, c1, c0, 0 @ read CP15_CONTROL
868 mrc p15, 0, r1, c10, c2, 0 @ read CP15_PRIM_REGION_REMAP
871 mrc p15, 0, r1, c10, c2, 1 @ read CP15_NORM_REGION_REMAP
874 mrc p15, 0, r1, c13, c0, 1 @ read CP15_CID
877 // tls regs are banked
878 mrc p15, 0, r1, c13, c0, 2 @ read CP15_TLS1
881 mrc p15, 0, r1, c13, c0, 3 @ read CP15_TLS2
884 mrc p15, 0, r1, c13, c0, 4 @ read CP15_TLS3
887 mrc p10, 7, r1, cr8, cr0, 0 @ fpexc
890 // switch to secure world
892 mcr p15, 0, r1, c1, c1, 0
895 mrc p15, 0, r1, c5, c0, 0 @ read CP15_DFSR
898 mrc p15, 0, r1, c6, c0, 0 @ read CP15_DFAR
901 // copy the exit reason from stack
906 .macro RESTORE_NONSECURE_STATE off
910 // jump over general purpose register
925 ldmia r0!, {r8 - r12, sp, lr}
949 // copy return pc/cpsr on stack
953 // set pending events
956 mcr p15, 0, r1, c12, c1, 1
959 // switch to non-secure world
961 mcr p15, 0, r1, c1, c1, 0
965 mcr p15, 0, r1, c2, c0, 0 @ write CP15_TTB0
968 mcr p15, 0, r1, c2, c0, 1 @ write CP15_TTB1
971 mcr p15, 0, r1, c2, c0, 2 @ write CP15_TTBC
974 mcr p15, 0, r1, c12, c0, 0 @ write CP15_VECTOR_BASE
977 mcr p15, 0, r1, c5, c0, 0 @ write CP15_DFSR
980 mcr p15, 0, r1, c6, c0, 0 @ write CP15_DFAR
983 mcr p15, 0, r1, c5, c0, 1 @ write CP15_IFSR
986 mcr p15, 0, r1, c6, c0, 2 @ write CP15_IFAR
989 mcr p15, 0, r1, c1, c0, 0 @ write CP15_CONTROL
992 mcr p15, 0, r1, c10, c2, 0 @ write CP15_PRIM_REGION_REMAP
995 mcr p15, 0, r1, c10, c2, 1 @ write CP15_NORM_REGION_REMAP
998 mcr p15, 0, r1, c13, c0, 1 @ write CP15_CID
1000 // tls regs are banked
1002 mcr p15, 0, r1, c13, c0, 2 @ write CP15_TLS1
1005 mcr p15, 0, r1, c13, c0, 3 @ write CP15_TLS2
1008 mcr p15, 0, r1, c13, c0, 4 @ write CP15_TLS3
1011 mcr p10, 7, r1, cr8, cr0, 0 @ fpexc
1013 // switch to secure world
1015 mcr p15, 0, r1, c1, c1, 0
1021 ldmia lr!, {r0 - r12}
1024 /**********************************************************************
1025 * Save secure state on top of the stack.
1027 * We save also the user-level registers here, because we need to
1028 * restore some on FIQ.
1031 .macro SAVE_SECURE_STATE
1033 stmdb sp!, {r3, r4} @ save supervisor return values
1034 stmdb sp, {sp, lr}^ @ save user-level return values
1038 /**********************************************************************
1039 * Restore secure state when guest returns with monitor call.
1041 * This removes the secure state from the top of the stack.
1043 .macro RESTORE_SECURE_STATE
1045 mov r0, sp @ restore stack pointer from supervisor mode
1049 ldmia sp, {sp, lr}^ @ restore user-level return values
1051 ldmia sp!, {r3, r4} @ restore supervisor return values
1054 /**********************************************************************
1055 * Restore secure state when guest is interrupted by FIQ
1057 * Don't remove secure state from stack as we need it
1058 * when application guest exits.
1059 * Just restore user-level state as this is spilled by the irq handler
1061 .macro RESTORE_SECURE_STATE_FIQ
1063 mov r0, sp @ restore stack pointer from supervisor mode
1067 ldmia sp, {sp, lr}^ @ restore user-level return values
1070 .macro SWITCH_TO_NONSECURE_MODE
1072 mcr p15, 0, lr, c1, c1, 0
1076 .macro SWITCH_TO_SECURE_MODE
1078 mcr p15, 0, lr, c1, c1, 0
1083 /*****************************************************************************/
1084 /* The monitor entry table stuff */
1085 /*****************************************************************************/
1087 .globl monitor_vector_base
1088 monitor_vector_base:
1090 b mon_undef_entry /* UNDEF */
1091 b mon_swi_entry /* SWI */
1092 b mon_inst_abort_entry /* IABORT */
1093 b mon_data_abort_entry /* DABORT */
1095 b mon_irq_entry /* IRQ */
1096 b mon_fiq_entry /* FIQ */
1103 srsdb sp, #0x16 @ save return state temporarily on stack
1104 mov lr, #1 @ set exit reason
1107 mon_inst_abort_entry:
1110 mov lr, #2 @ set exit reason
1113 mon_data_abort_entry:
1116 mov lr, #3 @ set exit reason
1122 mov lr, #4 @ set exit reason
1126 sub lr, lr, #4 @ adjust saved ip
1128 mov lr, #4 @ set exit reason
1131 // cps #0x12 @ switch to irq mode
1132 // adr lr, go_nonsecure_after_fiq + 4 @ set lr_irq
1133 // msr spsr, #0xd3 @ set spsr_irq
1136 /**********************************************************************
1137 * Go to secure world
1141 SAVE_NONSECURE_STATE 16
1142 RESTORE_SECURE_STATE
1148 /**********************************************************************
1149 * Go to nonsecure world
1151 * When the guest was interrupted by an FIQ, we don't need to save
1152 * secure state again, because it is still on top of the stack.
1155 //go_nonsecure_after_fiq:
1156 // mov r2, sp @ copy sp_svc to sv_mon
1160 // b go_nonsecure_after_fiq_2
1165 RESTORE_NONSECURE_STATE 16
1166 SWITCH_TO_NONSECURE_MODE
1168 // mcr p15, 0, lr, c7, c10, 4 @ drain write buffer
1169 // mcr p15, 0, lr, c8, c7, 0 @ flush TLB entry
1172 msr spsr, lr @ set spsr_mon with unsecure spsr
1173 ldr lr, [sp, #-8] @ set lr_mon with unsecure ip
1177 /* -------------------------------------- TEXT ---------------------------*/
1182 add sp, r1, #RF_SIZE
1184 ldr r1, [lr, #RF(PSR, 13*4)] @ Unstack SPSR
1185 msr spsr, r1 @ Load SPSR from kernel_lr
1186 ldmia lr!, {r0 - r12}
1187 ldmia lr, {sp,lr}^ @ restore user sp and lr (now lazy)
1188 #if defined(CONFIG_ARM_V6PLUS)
1189 add lr, lr, #RF_SIZE @ Read return address
1192 add lr, lr, #(RF_SIZE - 4) @ Read return address
1193 ldmdb lr, {pc}^ @ go back to interrupted insn