2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu-barrier.h"
25 int tb_invalidated_flag;
27 //#define CONFIG_DEBUG_EXEC
29 bool qemu_cpu_has_work(CPUState *env)
31 return cpu_has_work(env);
34 void cpu_loop_exit(CPUState *env)
36 env->current_tb = NULL;
37 longjmp(env->jmp_env, 1);
40 /* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUState *env, void *puc)
46 /* XXX: restore cpu registers saved in host registers */
48 env->exception_index = -1;
49 longjmp(env->jmp_env, 1);
53 /* Execute the code without caching the generated code. An interpreter
54 could be used if available. */
55 static void cpu_exec_nocache(CPUState *env, int max_cycles,
56 TranslationBlock *orig_tb)
58 unsigned long next_tb;
61 /* Should never happen.
62 We only end up here when an existing TB is too long. */
63 if (max_cycles > CF_COUNT_MASK)
64 max_cycles = CF_COUNT_MASK;
66 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
69 /* execute the generated code */
70 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
71 env->current_tb = NULL;
73 if ((next_tb & 3) == 2) {
74 /* Restore PC. This may happen if async event occurs before
75 the TB starts executing. */
76 cpu_pc_from_tb(env, tb);
78 tb_phys_invalidate(tb, -1);
82 static TranslationBlock *tb_find_slow(CPUState *env,
87 TranslationBlock *tb, **ptb1;
89 tb_page_addr_t phys_pc, phys_page1;
90 target_ulong virt_page2;
92 tb_invalidated_flag = 0;
94 /* find translated block using physical mappings */
95 phys_pc = get_page_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
104 tb->page_addr[0] == phys_page1 &&
105 tb->cs_base == cs_base &&
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
109 tb_page_addr_t phys_page2;
111 virt_page2 = (pc & TARGET_PAGE_MASK) +
113 phys_page2 = get_page_addr_code(env, virt_page2);
114 if (tb->page_addr[1] == phys_page2)
120 ptb1 = &tb->phys_hash_next;
123 /* if no translated code available, then translate it now */
124 tb = tb_gen_code(env, pc, cs_base, flags, 0);
127 /* Move the last found TB to the head of the list */
129 *ptb1 = tb->phys_hash_next;
130 tb->phys_hash_next = tb_phys_hash[h];
131 tb_phys_hash[h] = tb;
133 /* we add the TB in the virtual pc hash table */
134 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
138 static inline TranslationBlock *tb_find_fast(CPUState *env)
140 TranslationBlock *tb;
141 target_ulong cs_base, pc;
144 /* we record a subset of the CPU state. It will
145 always be the same before a given translated block
147 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
148 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
149 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
150 tb->flags != flags)) {
151 tb = tb_find_slow(env, pc, cs_base, flags);
156 static CPUDebugExcpHandler *debug_excp_handler;
158 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
160 CPUDebugExcpHandler *old_handler = debug_excp_handler;
162 debug_excp_handler = handler;
166 static void cpu_handle_debug_exception(CPUState *env)
170 if (!env->watchpoint_hit) {
171 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
172 wp->flags &= ~BP_WATCHPOINT_HIT;
175 if (debug_excp_handler) {
176 debug_excp_handler(env);
180 /* main execution loop */
182 volatile sig_atomic_t exit_request;
184 int cpu_exec(CPUState *env)
186 int ret, interrupt_request;
187 TranslationBlock *tb;
189 unsigned long next_tb;
192 if (!cpu_has_work(env)) {
199 cpu_single_env = env;
201 if (unlikely(exit_request)) {
202 env->exit_request = 1;
205 #if defined(TARGET_I386)
206 /* put eflags in CPU temporary format */
207 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208 DF = 1 - (2 * ((env->eflags >> 10) & 1));
209 CC_OP = CC_OP_EFLAGS;
210 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
211 #elif defined(TARGET_SPARC)
212 #elif defined(TARGET_M68K)
213 env->cc_op = CC_OP_FLAGS;
214 env->cc_dest = env->sr & 0xf;
215 env->cc_x = (env->sr >> 4) & 1;
216 #elif defined(TARGET_ALPHA)
217 #elif defined(TARGET_ARM)
218 #elif defined(TARGET_UNICORE32)
219 #elif defined(TARGET_PPC)
220 #elif defined(TARGET_LM32)
221 #elif defined(TARGET_MICROBLAZE)
222 #elif defined(TARGET_MIPS)
223 #elif defined(TARGET_SH4)
224 #elif defined(TARGET_CRIS)
225 #elif defined(TARGET_S390X)
228 #error unsupported target CPU
230 env->exception_index = -1;
232 /* prepare setjmp context for exception handling */
234 if (setjmp(env->jmp_env) == 0) {
235 /* if an exception is pending, we execute it here */
236 if (env->exception_index >= 0) {
237 if (env->exception_index >= EXCP_INTERRUPT) {
238 /* exit request from the cpu execution loop */
239 ret = env->exception_index;
240 if (ret == EXCP_DEBUG) {
241 cpu_handle_debug_exception(env);
245 #if defined(CONFIG_USER_ONLY)
246 /* if user mode only, we simulate a fake exception
247 which will be handled outside the cpu execution
249 #if defined(TARGET_I386)
252 ret = env->exception_index;
256 env->exception_index = -1;
261 next_tb = 0; /* force lookup of first TB */
263 interrupt_request = env->interrupt_request;
264 if (unlikely(interrupt_request)) {
265 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
266 /* Mask out external interrupts for this step. */
267 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
269 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
270 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
271 env->exception_index = EXCP_DEBUG;
274 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
275 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
276 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
277 if (interrupt_request & CPU_INTERRUPT_HALT) {
278 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
280 env->exception_index = EXCP_HLT;
284 #if defined(TARGET_I386)
285 if (interrupt_request & CPU_INTERRUPT_INIT) {
286 svm_check_intercept(env, SVM_EXIT_INIT);
288 env->exception_index = EXCP_HALTED;
290 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
292 } else if (env->hflags2 & HF2_GIF_MASK) {
293 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
294 !(env->hflags & HF_SMM_MASK)) {
295 svm_check_intercept(env, SVM_EXIT_SMI);
296 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
299 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
300 !(env->hflags2 & HF2_NMI_MASK)) {
301 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
302 env->hflags2 |= HF2_NMI_MASK;
303 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
305 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
306 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
307 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
309 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
310 (((env->hflags2 & HF2_VINTR_MASK) &&
311 (env->hflags2 & HF2_HIF_MASK)) ||
312 (!(env->hflags2 & HF2_VINTR_MASK) &&
313 (env->eflags & IF_MASK &&
314 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
316 svm_check_intercept(env, SVM_EXIT_INTR);
317 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
318 intno = cpu_get_pic_interrupt(env);
319 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
320 do_interrupt_x86_hardirq(env, intno, 1);
321 /* ensure that no TB jump will be modified as
322 the program flow was changed */
324 #if !defined(CONFIG_USER_ONLY)
325 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
326 (env->eflags & IF_MASK) &&
327 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
329 /* FIXME: this should respect TPR */
330 svm_check_intercept(env, SVM_EXIT_VINTR);
331 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
332 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
333 do_interrupt_x86_hardirq(env, intno, 1);
334 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
339 #elif defined(TARGET_PPC)
341 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
345 if (interrupt_request & CPU_INTERRUPT_HARD) {
346 ppc_hw_interrupt(env);
347 if (env->pending_interrupts == 0)
348 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
351 #elif defined(TARGET_LM32)
352 if ((interrupt_request & CPU_INTERRUPT_HARD)
353 && (env->ie & IE_IE)) {
354 env->exception_index = EXCP_IRQ;
358 #elif defined(TARGET_MICROBLAZE)
359 if ((interrupt_request & CPU_INTERRUPT_HARD)
360 && (env->sregs[SR_MSR] & MSR_IE)
361 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
362 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
363 env->exception_index = EXCP_IRQ;
367 #elif defined(TARGET_MIPS)
368 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
369 cpu_mips_hw_interrupts_pending(env)) {
371 env->exception_index = EXCP_EXT_INTERRUPT;
376 #elif defined(TARGET_SPARC)
377 if (interrupt_request & CPU_INTERRUPT_HARD) {
378 if (cpu_interrupts_enabled(env) &&
379 env->interrupt_index > 0) {
380 int pil = env->interrupt_index & 0xf;
381 int type = env->interrupt_index & 0xf0;
383 if (((type == TT_EXTINT) &&
384 cpu_pil_allowed(env, pil)) ||
386 env->exception_index = env->interrupt_index;
392 #elif defined(TARGET_ARM)
393 if (interrupt_request & CPU_INTERRUPT_FIQ
394 && !(env->uncached_cpsr & CPSR_F)) {
395 env->exception_index = EXCP_FIQ;
399 /* ARMv7-M interrupt return works by loading a magic value
400 into the PC. On real hardware the load causes the
401 return to occur. The qemu implementation performs the
402 jump normally, then does the exception return when the
403 CPU tries to execute code at the magic address.
404 This will cause the magic PC value to be pushed to
405 the stack if an interrupt occurred at the wrong time.
406 We avoid this by disabling interrupts when
407 pc contains a magic address. */
408 if (interrupt_request & CPU_INTERRUPT_HARD
409 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
410 || !(env->uncached_cpsr & CPSR_I))) {
411 env->exception_index = EXCP_IRQ;
415 #elif defined(TARGET_UNICORE32)
416 if (interrupt_request & CPU_INTERRUPT_HARD
417 && !(env->uncached_asr & ASR_I)) {
421 #elif defined(TARGET_SH4)
422 if (interrupt_request & CPU_INTERRUPT_HARD) {
426 #elif defined(TARGET_ALPHA)
429 /* ??? This hard-codes the OSF/1 interrupt levels. */
430 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
432 if (interrupt_request & CPU_INTERRUPT_HARD) {
433 idx = EXCP_DEV_INTERRUPT;
437 if (interrupt_request & CPU_INTERRUPT_TIMER) {
438 idx = EXCP_CLK_INTERRUPT;
442 if (interrupt_request & CPU_INTERRUPT_SMP) {
443 idx = EXCP_SMP_INTERRUPT;
447 if (interrupt_request & CPU_INTERRUPT_MCHK) {
452 env->exception_index = idx;
458 #elif defined(TARGET_CRIS)
459 if (interrupt_request & CPU_INTERRUPT_HARD
460 && (env->pregs[PR_CCS] & I_FLAG)
461 && !env->locked_irq) {
462 env->exception_index = EXCP_IRQ;
466 if (interrupt_request & CPU_INTERRUPT_NMI
467 && (env->pregs[PR_CCS] & M_FLAG)) {
468 env->exception_index = EXCP_NMI;
472 #elif defined(TARGET_M68K)
473 if (interrupt_request & CPU_INTERRUPT_HARD
474 && ((env->sr & SR_I) >> SR_I_SHIFT)
475 < env->pending_level) {
476 /* Real hardware gets the interrupt vector via an
477 IACK cycle at this point. Current emulated
478 hardware doesn't rely on this, so we
479 provide/save the vector when the interrupt is
481 env->exception_index = env->pending_vector;
482 do_interrupt_m68k_hardirq(env);
485 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
486 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
487 (env->psw.mask & PSW_MASK_EXT)) {
492 /* Don't use the cached interrupt_request value,
493 do_interrupt may have updated the EXITTB flag. */
494 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
495 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
496 /* ensure that no TB jump will be modified as
497 the program flow was changed */
501 if (unlikely(env->exit_request)) {
502 env->exit_request = 0;
503 env->exception_index = EXCP_INTERRUPT;
506 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
507 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
508 /* restore flags in standard format */
509 #if defined(TARGET_I386)
510 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
512 log_cpu_state(env, X86_DUMP_CCOP);
513 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
514 #elif defined(TARGET_M68K)
515 cpu_m68k_flush_flags(env, env->cc_op);
516 env->cc_op = CC_OP_FLAGS;
517 env->sr = (env->sr & 0xffe0)
518 | env->cc_dest | (env->cc_x << 4);
519 log_cpu_state(env, 0);
521 log_cpu_state(env, 0);
524 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
526 tb = tb_find_fast(env);
527 /* Note: we do it here to avoid a gcc bug on Mac OS X when
528 doing it in tb_find_slow */
529 if (tb_invalidated_flag) {
530 /* as some TB could have been invalidated because
531 of memory exceptions while generating the code, we
532 must recompute the hash index here */
534 tb_invalidated_flag = 0;
536 #ifdef CONFIG_DEBUG_EXEC
537 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
538 (long)tb->tc_ptr, tb->pc,
539 lookup_symbol(tb->pc));
541 /* see if we can patch the calling TB. When the TB
542 spans two pages, we cannot safely do a direct
544 if (next_tb != 0 && tb->page_addr[1] == -1) {
545 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
547 spin_unlock(&tb_lock);
549 /* cpu_interrupt might be called while translating the
550 TB, but before it is linked into a potentially
551 infinite loop and becomes env->current_tb. Avoid
552 starting execution if there is a pending interrupt. */
553 env->current_tb = tb;
555 if (likely(!env->exit_request)) {
557 /* execute the generated code */
558 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
559 if ((next_tb & 3) == 2) {
560 /* Instruction counter expired. */
562 tb = (TranslationBlock *)(long)(next_tb & ~3);
564 cpu_pc_from_tb(env, tb);
565 insns_left = env->icount_decr.u32;
566 if (env->icount_extra && insns_left >= 0) {
567 /* Refill decrementer and continue execution. */
568 env->icount_extra += insns_left;
569 if (env->icount_extra > 0xffff) {
572 insns_left = env->icount_extra;
574 env->icount_extra -= insns_left;
575 env->icount_decr.u16.low = insns_left;
577 if (insns_left > 0) {
578 /* Execute remaining instructions. */
579 cpu_exec_nocache(env, insns_left, tb);
581 env->exception_index = EXCP_INTERRUPT;
587 env->current_tb = NULL;
588 /* reset soft MMU for next block (it can currently
589 only be set by a memory fault) */
592 /* Reload env after longjmp - the compiler may have smashed all
593 * local variables as longjmp is marked 'noreturn'. */
594 env = cpu_single_env;
599 #if defined(TARGET_I386)
600 /* restore flags in standard format */
601 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
603 #elif defined(TARGET_ARM)
604 /* XXX: Save/restore host fpu exception state?. */
605 #elif defined(TARGET_UNICORE32)
606 #elif defined(TARGET_SPARC)
607 #elif defined(TARGET_PPC)
608 #elif defined(TARGET_LM32)
609 #elif defined(TARGET_M68K)
610 cpu_m68k_flush_flags(env, env->cc_op);
611 env->cc_op = CC_OP_FLAGS;
612 env->sr = (env->sr & 0xffe0)
613 | env->cc_dest | (env->cc_x << 4);
614 #elif defined(TARGET_MICROBLAZE)
615 #elif defined(TARGET_MIPS)
616 #elif defined(TARGET_SH4)
617 #elif defined(TARGET_ALPHA)
618 #elif defined(TARGET_CRIS)
619 #elif defined(TARGET_S390X)
622 #error unsupported target CPU
625 /* fail safe : never use cpu_single_env outside cpu_exec() */
626 cpu_single_env = NULL;