2 * This file contains a 'gate_init' initialization table
3 * to initialize the x86 processor trap vectors to default entrypoints.
4 * These entrypoints simply push a standard trap_state frame
5 * and jump to the 'trap_handler' routine.
8 #include "config_tcbsize.h"
9 #include "config_gdt.h"
10 #include "globalconfig.h"
12 #include <low_level.h>
14 #include "tcboffset.h"
19 /* We make the trap handler an interrupt gate, because for debugging
20 purposes, we don't want any interrupts to occur until they're
21 explicitly enabled in the base_trap_handler (usually
22 Thread::handle_slow_trap). */
25 * No error code. Clear error code and push trap number.
27 #define EXCEPTION(n,name) \
28 GATE_ENTRY(n,entry_##name,ACC_PL_K | ACC_INTR_GATE) ;\
37 * User-accessible exception. Otherwise, same as above.
39 #define EXCEP_USR(n,name) \
40 GATE_ENTRY(n,entry_##name,ACC_PL_U | ACC_INTR_GATE) ;\
49 * Error code has been pushed. Just push trap number.
51 #define EXCEP_ERR(n,name) \
52 GATE_ENTRY(n,entry_##name,ACC_PL_K | ACC_INTR_GATE) ;\
59 GATE_INITTAB_BEGIN(idt_init_table)
61 EXCEPTION(0x00,vec00_zero_div)
62 /* IA32 has to handle breakpoint exceptions if occured exactly at
63 entry_sys_fast_ipc -- see ia32/entry-ia32.S */
64 GATE_ENTRY(0x01,entry_vec01_debug,ACC_PL_K | ACC_INTR_GATE)
65 /* XXX IA32 has to handle NMI occured exactly at entry_sys_fast_ipc */
66 EXCEP_USR(0x02,vec02_nmi)
67 EXCEP_USR(0x03,vec03_breakpoint)
68 EXCEP_USR(0x04,vec04_into)
69 EXCEP_USR(0x05,vec05_bounds)
70 EXCEPTION(0x06,vec06_invop)
71 /* EXCEPTION(0x07,nofpu) */
72 // XXX we can't use a task gate, instead we must use IST
73 GATE_ENTRY(0x08,entry_vec08_dbf, ACC_PL_K | ACC_INTR_GATE)
74 EXCEPTION(0x09,vec09_fpu_ovfl)
75 /* EXCEP_ERR(0x0a,vec0a_inv_tss) */
76 EXCEP_ERR(0x0b,vec0b_segnp)
77 EXCEP_ERR(0x0c,vec0c_stack_fault)
78 EXCEP_ERR(0x0d,vec0d_gen_prot)
79 /* EXCEP_ERR(0x0e,vec0e_page_fault) */
80 /* EXCEPTION(0x0f,vec0f_trap_0f) */
81 EXCEPTION(0x10,vec10_fpu_err)
82 EXCEP_ERR(0x11,vec11_align)
83 EXCEPTION(0x12,vec12_mcheck)
84 EXCEPTION(0x13,vec13_simd_err)
88 .type slowtraps,@function
90 /* We have to introduce the label _slowtraps besides the label
91 slowtraps to achive that jmps from exception entry points
92 are optimized to two-byte jmps. The label slowtraps is visible
96 mov %rsp,%rdi /* ARG1: address of trap_state */
97 mov $0, %esi /* ARG2: default CPU = 0 */
99 #ifndef CONFIG_NO_FRAME_PTR
100 mov 0x90(%rsp),%rax /* create artificial stack frame */
101 push %rax /* push rip */
103 # ifndef CONFIG_PROFILE
108 /* Call the C handler function if one has been installed. */
109 mov BASE_TRAP_HANDLER, %rax
111 jz unexpected_trap_pop
115 #ifndef CONFIG_NO_FRAME_PTR
116 lea 0x10(%rsp),%rsp /* remove frame pointer */
119 /* If the handler function returned zero (success),
120 then resume execution as if the trap never happened.
121 Otherwise, just panic. */
126 add $0x10,%rsp /* pop trap number and error code */
130 #ifndef CONFIG_NO_FRAME_PTR
131 lea 0x10(%rsp), %rsp /* remove stack frame */
135 mov %rsp,%rdi /* 1st arg: trap state */
138 GATE_ENTRY(0x0e,entry_vec0e_page_fault,ACC_PL_K | ACC_INTR_GATE)
140 /* we must save %cr2 before we can be preempted -- therefore we're an
141 interrupt gate (invoked with interrupts turned off). Also, we
142 don't turn them on again here, but only after checking for
143 page-ins from the global page directory in thread_page_fault().
144 XXX: If you make changes to stack layout here, fix thread_page_fault */
146 /* XXX slow version - sets up nice stack frame for debugger */
149 .type entry_vec0e_page_fault,@function
150 .globl entry_vec0e_page_fault
151 entry_vec0e_page_fault:
155 /* We must reset the cancel flag here atomically
156 if we are entering fresh from user mode and an IPC might occur.
157 NOTE: We cannot test the user-mode bit in the error code because
158 it will flag "kernel" in case an I/O-bitmap page is not mapped
159 during an I/O access. */
161 mov 0x58(%rsp),%rcx /* get CS from stack */
162 andb $3,%cl /* retrieve current privilege level (CPL) */
163 jz 1f /* CPL == 0 -> kernel, skip resetting state */
165 RESET_THREAD_CANCEL_AT %rcx
167 leaq 0x50(%rsp),%r8 /* arg5: ptr to return frame */
168 mov PAGE_FAULT_ADDR,%rdi /* arg1: page fault address */
169 mov 0x48(%rsp),%rsi /* arg2: error code */
170 mov 0x50(%rsp),%rdx /* arg3: rip */
171 mov 0x60(%rsp),%rcx /* arg4: rflags */
172 call thread_page_fault
178 add $8,%rsp /* remove error code */
181 /* If code or stack from a small address space are not yet mapped in the
182 current page directory we might get a page fault on return from the
183 trampoline page. In this case we cannot return to the trampoline page
184 after handling the fault because we are already in user mode (with
185 segment limits below kernel space) while the trampoline code is located
186 in kernel data space. So instead we change ESP and EIP to point to the
187 address the trampoline wanted to return to and do the normal IRET. */
189 /* recover from a bad page fault by invoking the slow_trap handler */
193 /* we have on stack: r8, rdi, rsi, rdx, rcx, rax, error code
194 move registers down to make room for trap number
195 and build complete trap state before jumping to trap handler */
202 /* FPU not available in this context. */
203 GATE_ENTRY(0x07,entry_vec07_fpu_unavail, ACC_PL_K | ACC_INTR_GATE)
205 /* do all of this with disabled interrupts */
207 .type entry_vec07_fpu_unavail,@function
208 entry_vec07_fpu_unavail:
211 mov SCRATCH_REGISTER_SIZE(%rsp), %rdi
212 call thread_handle_fputrap
215 jz real_fpu_exception
226 /* timer interrupt */
227 #ifdef CONFIG_SCHED_PIT
228 GATE_ENTRY(0x20,entry_int_timer,ACC_PL_K | ACC_INTR_GATE)
230 #ifdef CONFIG_SCHED_RTC
231 GATE_ENTRY(0x28,entry_int_timer,ACC_PL_K | ACC_INTR_GATE)
233 #ifdef CONFIG_SCHED_APIC
234 GATE_ENTRY(APIC_IRQ_BASE, entry_int_timer,ACC_PL_K | ACC_INTR_GATE)
236 #ifdef CONFIG_SCHED_HPET
237 /* HPET is set at startup */
241 .globl entry_int_timer
246 mov SCRATCH_REGISTER_SIZE(%rsp),%rdi /* pass rip for logging */
247 call thread_timer_interrupt /* enter with disabled irqs */
253 .globl entry_int_timer_slow
254 entry_int_timer_slow:
257 call thread_timer_interrupt_slow /* enter with disabled irqs */
258 in_timer_interrupt_slow:
259 jmp do_timer_interrupt
263 .globl entry_int_timer_stop
264 entry_int_timer_stop:
267 call thread_timer_interrupt_stop
271 /* other interrupts */
273 #define INTERRUPT(int,name) \
274 GATE_ENTRY(int,entry_##name,ACC_PL_K | ACC_INTR_GATE) ;\
278 mov 0x28(%rsp),%rsi ;\
279 mov $ (int - 0x20),%rdi ;\
282 .type __generic_irq_entry,@function
283 .global __generic_irq_entry
296 .type all_irqs,@function
299 call irq_interrupt /* enter with disabled irqs */
303 entry_int_pic_ignore:
306 .global entry_int_pic_ignore
310 #ifndef CONFIG_SCHED_PIT
315 #ifndef CONFIG_SCHED_RTC
320 #define SYSTEM_CALL(int,name) \
321 GATE_ENTRY(int,entry_##name,ACC_PL_U | ACC_INTR_GATE) ;\
325 mov $(syscall_table+8*(int-0x30)), %rax ;\
329 .type all_syscalls,@function
334 RESET_THREAD_CANCEL_AT %rbx
343 #ifdef CONFIG_ASSEMBLER_IPC_SHORTCUT
344 GATE_ENTRY(0x30,entry_sys_ipc,ACC_PL_U | ACC_INTR_GATE);
346 GATE_ENTRY(0x30,entry_sys_ipc_c,ACC_PL_U | ACC_INTR_GATE);
349 #if defined (CONFIG_JDB_LOGGING) || !defined(CONFIG_ASSEMBLER_IPC_SHORTCUT)
351 .globl entry_sys_ipc_c
357 RESET_THREAD_CANCEL_AT %rbx
370 /* The slow variant of sys_ipc_entry is used when logging IPC */
372 .globl entry_sys_ipc_log
379 RESET_THREAD_CANCEL_AT %rbx
390 // these labels help show_tcb to guess the thread state
394 .globl in_handle_fputrap
396 .globl in_timer_interrupt
397 .globl in_timer_interrupt_slow
400 SYSTEM_CALL(0x32,sys_invoke_debug)
402 /* these functions are implemented in entry-native.S */
403 GATE_ENTRY(0x0a,entry_vec0a_invalid_tss,ACC_PL_K | ACC_INTR_GATE)
404 GATE_ENTRY(0x0f,entry_vec0f_apic_spurious_interrupt_bug,ACC_PL_K | ACC_INTR_GATE)
405 GATE_ENTRY(APIC_IRQ_BASE + 3,entry_apic_error_interrupt,ACC_PL_K | ACC_INTR_GATE)
406 GATE_ENTRY(APIC_IRQ_BASE + 0xf,entry_apic_spurious_interrupt,ACC_PL_K | ACC_INTR_GATE)
409 GATE_ENTRY(APIC_IRQ_BASE + 2, entry_ipi, ACC_PL_K | ACC_INTR_GATE)
410 GATE_ENTRY(APIC_IRQ_BASE - 2, entry_debug_ipi, ACC_PL_K | ACC_INTR_GATE)
411 GATE_ENTRY(APIC_IRQ_BASE - 1, entry_ipi_remote_request, ACC_PL_K | ACC_INTR_GATE)
417 .globl leave_by_trigger_exception
418 leave_by_trigger_exception:
424 jmp thread_restore_exc_state
428 .globl leave_by_vcpu_upcall
429 leave_by_vcpu_upcall:
431 sub $40,%rsp /* clean up stack from previous
434 call thread_restore_exc_state
437 mov OFS__THREAD__USER_VCPU(%rcx), %rdi
438 mov OFS__THREAD__VCPU_STATE(%rcx), %rcx
439 add $(VAL__SIZEOF_TRAP_STATE - 40), %rcx
440 mov SCRATCH_REGISTER_SIZE(%rsp), %rdx
441 mov %rdx, (%rcx) /* RIP */
442 mov 16 + SCRATCH_REGISTER_SIZE(%rsp), %rdx
443 mov %rdx, 16(%rcx) /* RFLAGS */
444 mov 24 + SCRATCH_REGISTER_SIZE(%rsp), %rdx
445 mov %rdx, 24(%rcx) /* RSP */
446 mov 32 + SCRATCH_REGISTER_SIZE(%rsp), %rdx
447 mov %rdx, 32(%rcx) /* SS */
449 mov 0(%rsp), %rdx /* R11 */
450 mov %rdx, -(14*8)(%rcx)
451 mov 8(%rsp), %rdx /* R10 */
452 mov %rdx, -(13*8)(%rcx)
453 mov (2*8)(%rsp), %rdx
454 mov %rdx, -(12*8)(%rcx)
455 mov (3*8)(%rsp), %rdx
456 mov %rdx, -(11*8)(%rcx)
457 mov (4*8)(%rsp), %rdx
458 mov %rdx, -(5*8)(%rcx)
459 mov (5*8)(%rsp), %rdx
460 mov %rdx, -(4*8)(%rcx)
461 mov (6*8)(%rsp), %rdx
462 mov %rdx, -(3*8)(%rcx)
463 mov (7*8)(%rsp), %rdx
464 mov %rdx, -(9*8)(%rcx)
465 mov (8*8)(%rsp), %rdx
466 mov %rdx, -(10*8)(%rcx)
468 lea SCRATCH_REGISTER_SIZE(%rsp), %rdx
469 lea -(5*8)(%rcx), %rsp
475 /*add SCRATCH_REGISTER_SIZE, %esp*/
478 # define REG_GS CPU_GS
483 pushq %fs /* we save the segment regs in the trap */
484 pushq REG_GS /* state, but we do not restore them. We */
485 pushq %ds /* rather reload them using */
486 pushq %es /* RESET_{KERNEL,USER}_SEGMENTS */
489 mov -VAL__SIZEOF_TRAP_STATE + 40 + OFS__VCPU_STATE__ENTRY_SP(%rcx), %rax
491 mov -VAL__SIZEOF_TRAP_STATE + 40 + OFS__VCPU_STATE__ENTRY_IP(%rcx), %rax