]> rtime.felk.cvut.cz Git - lisovros/qemu_apohw.git/blob - cpu-exec.c
target-i386: fix set of registers zeroed on reset
[lisovros/qemu_apohw.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25
26 bool qemu_cpu_has_work(CPUState *cpu)
27 {
28     return cpu_has_work(cpu);
29 }
30
31 void cpu_loop_exit(CPUArchState *env)
32 {
33     CPUState *cpu = ENV_GET_CPU(env);
34
35     cpu->current_tb = NULL;
36     siglongjmp(env->jmp_env, 1);
37 }
38
39 /* exit the current TB from a signal handler. The host registers are
40    restored in a state compatible with the CPU emulator
41  */
42 #if defined(CONFIG_SOFTMMU)
43 void cpu_resume_from_signal(CPUArchState *env, void *puc)
44 {
45     /* XXX: restore cpu registers saved in host registers */
46
47     env->exception_index = -1;
48     siglongjmp(env->jmp_env, 1);
49 }
50 #endif
51
52 /* Execute a TB, and fix up the CPU state afterwards if necessary */
53 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
54 {
55     CPUArchState *env = cpu->env_ptr;
56     uintptr_t next_tb = tcg_qemu_tb_exec(env, tb_ptr);
57     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
58         /* We didn't start executing this TB (eg because the instruction
59          * counter hit zero); we must restore the guest PC to the address
60          * of the start of the TB.
61          */
62         CPUClass *cc = CPU_GET_CLASS(cpu);
63         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
64         if (cc->synchronize_from_tb) {
65             cc->synchronize_from_tb(cpu, tb);
66         } else {
67             assert(cc->set_pc);
68             cc->set_pc(cpu, tb->pc);
69         }
70     }
71     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
72         /* We were asked to stop executing TBs (probably a pending
73          * interrupt. We've now stopped, so clear the flag.
74          */
75         cpu->tcg_exit_req = 0;
76     }
77     return next_tb;
78 }
79
80 /* Execute the code without caching the generated code. An interpreter
81    could be used if available. */
82 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
83                              TranslationBlock *orig_tb)
84 {
85     CPUState *cpu = ENV_GET_CPU(env);
86     TranslationBlock *tb;
87
88     /* Should never happen.
89        We only end up here when an existing TB is too long.  */
90     if (max_cycles > CF_COUNT_MASK)
91         max_cycles = CF_COUNT_MASK;
92
93     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
94                      max_cycles);
95     cpu->current_tb = tb;
96     /* execute the generated code */
97     cpu_tb_exec(cpu, tb->tc_ptr);
98     cpu->current_tb = NULL;
99     tb_phys_invalidate(tb, -1);
100     tb_free(tb);
101 }
102
103 static TranslationBlock *tb_find_slow(CPUArchState *env,
104                                       target_ulong pc,
105                                       target_ulong cs_base,
106                                       uint64_t flags)
107 {
108     TranslationBlock *tb, **ptb1;
109     unsigned int h;
110     tb_page_addr_t phys_pc, phys_page1;
111     target_ulong virt_page2;
112
113     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
114
115     /* find translated block using physical mappings */
116     phys_pc = get_page_addr_code(env, pc);
117     phys_page1 = phys_pc & TARGET_PAGE_MASK;
118     h = tb_phys_hash_func(phys_pc);
119     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
120     for(;;) {
121         tb = *ptb1;
122         if (!tb)
123             goto not_found;
124         if (tb->pc == pc &&
125             tb->page_addr[0] == phys_page1 &&
126             tb->cs_base == cs_base &&
127             tb->flags == flags) {
128             /* check next page if needed */
129             if (tb->page_addr[1] != -1) {
130                 tb_page_addr_t phys_page2;
131
132                 virt_page2 = (pc & TARGET_PAGE_MASK) +
133                     TARGET_PAGE_SIZE;
134                 phys_page2 = get_page_addr_code(env, virt_page2);
135                 if (tb->page_addr[1] == phys_page2)
136                     goto found;
137             } else {
138                 goto found;
139             }
140         }
141         ptb1 = &tb->phys_hash_next;
142     }
143  not_found:
144    /* if no translated code available, then translate it now */
145     tb = tb_gen_code(env, pc, cs_base, flags, 0);
146
147  found:
148     /* Move the last found TB to the head of the list */
149     if (likely(*ptb1)) {
150         *ptb1 = tb->phys_hash_next;
151         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
152         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
153     }
154     /* we add the TB in the virtual pc hash table */
155     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
156     return tb;
157 }
158
159 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
160 {
161     TranslationBlock *tb;
162     target_ulong cs_base, pc;
163     int flags;
164
165     /* we record a subset of the CPU state. It will
166        always be the same before a given translated block
167        is executed. */
168     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
169     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
170     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
171                  tb->flags != flags)) {
172         tb = tb_find_slow(env, pc, cs_base, flags);
173     }
174     return tb;
175 }
176
177 static CPUDebugExcpHandler *debug_excp_handler;
178
179 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
180 {
181     debug_excp_handler = handler;
182 }
183
184 static void cpu_handle_debug_exception(CPUArchState *env)
185 {
186     CPUWatchpoint *wp;
187
188     if (!env->watchpoint_hit) {
189         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
190             wp->flags &= ~BP_WATCHPOINT_HIT;
191         }
192     }
193     if (debug_excp_handler) {
194         debug_excp_handler(env);
195     }
196 }
197
198 /* main execution loop */
199
200 volatile sig_atomic_t exit_request;
201
202 int cpu_exec(CPUArchState *env)
203 {
204     CPUState *cpu = ENV_GET_CPU(env);
205 #if !(defined(CONFIG_USER_ONLY) && \
206       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
207     CPUClass *cc = CPU_GET_CLASS(cpu);
208 #endif
209     int ret, interrupt_request;
210     TranslationBlock *tb;
211     uint8_t *tc_ptr;
212     uintptr_t next_tb;
213
214     if (cpu->halted) {
215         if (!cpu_has_work(cpu)) {
216             return EXCP_HALTED;
217         }
218
219         cpu->halted = 0;
220     }
221
222     current_cpu = cpu;
223
224     /* As long as current_cpu is null, up to the assignment just above,
225      * requests by other threads to exit the execution loop are expected to
226      * be issued using the exit_request global. We must make sure that our
227      * evaluation of the global value is performed past the current_cpu
228      * value transition point, which requires a memory barrier as well as
229      * an instruction scheduling constraint on modern architectures.  */
230     smp_mb();
231
232     if (unlikely(exit_request)) {
233         cpu->exit_request = 1;
234     }
235
236 #if defined(TARGET_I386)
237     /* put eflags in CPU temporary format */
238     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
239     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
240     CC_OP = CC_OP_EFLAGS;
241     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
242 #elif defined(TARGET_SPARC)
243 #elif defined(TARGET_M68K)
244     env->cc_op = CC_OP_FLAGS;
245     env->cc_dest = env->sr & 0xf;
246     env->cc_x = (env->sr >> 4) & 1;
247 #elif defined(TARGET_ALPHA)
248 #elif defined(TARGET_ARM)
249 #elif defined(TARGET_UNICORE32)
250 #elif defined(TARGET_PPC)
251     env->reserve_addr = -1;
252 #elif defined(TARGET_LM32)
253 #elif defined(TARGET_MICROBLAZE)
254 #elif defined(TARGET_MIPS)
255 #elif defined(TARGET_MOXIE)
256 #elif defined(TARGET_OPENRISC)
257 #elif defined(TARGET_SH4)
258 #elif defined(TARGET_CRIS)
259 #elif defined(TARGET_S390X)
260 #elif defined(TARGET_XTENSA)
261     /* XXXXX */
262 #else
263 #error unsupported target CPU
264 #endif
265     env->exception_index = -1;
266
267     /* prepare setjmp context for exception handling */
268     for(;;) {
269         if (sigsetjmp(env->jmp_env, 0) == 0) {
270             /* if an exception is pending, we execute it here */
271             if (env->exception_index >= 0) {
272                 if (env->exception_index >= EXCP_INTERRUPT) {
273                     /* exit request from the cpu execution loop */
274                     ret = env->exception_index;
275                     if (ret == EXCP_DEBUG) {
276                         cpu_handle_debug_exception(env);
277                     }
278                     break;
279                 } else {
280 #if defined(CONFIG_USER_ONLY)
281                     /* if user mode only, we simulate a fake exception
282                        which will be handled outside the cpu execution
283                        loop */
284 #if defined(TARGET_I386)
285                     cc->do_interrupt(cpu);
286 #endif
287                     ret = env->exception_index;
288                     break;
289 #else
290                     cc->do_interrupt(cpu);
291                     env->exception_index = -1;
292 #endif
293                 }
294             }
295
296             next_tb = 0; /* force lookup of first TB */
297             for(;;) {
298                 interrupt_request = cpu->interrupt_request;
299                 if (unlikely(interrupt_request)) {
300                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
301                         /* Mask out external interrupts for this step. */
302                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
303                     }
304                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
305                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
306                         env->exception_index = EXCP_DEBUG;
307                         cpu_loop_exit(env);
308                     }
309 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
310     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
311     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
312                     if (interrupt_request & CPU_INTERRUPT_HALT) {
313                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
314                         cpu->halted = 1;
315                         env->exception_index = EXCP_HLT;
316                         cpu_loop_exit(env);
317                     }
318 #endif
319 #if defined(TARGET_I386)
320 #if !defined(CONFIG_USER_ONLY)
321                     if (interrupt_request & CPU_INTERRUPT_POLL) {
322                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
323                         apic_poll_irq(env->apic_state);
324                     }
325 #endif
326                     if (interrupt_request & CPU_INTERRUPT_INIT) {
327                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
328                                                           0);
329                             do_cpu_init(x86_env_get_cpu(env));
330                             env->exception_index = EXCP_HALTED;
331                             cpu_loop_exit(env);
332                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
333                             do_cpu_sipi(x86_env_get_cpu(env));
334                     } else if (env->hflags2 & HF2_GIF_MASK) {
335                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
336                             !(env->hflags & HF_SMM_MASK)) {
337                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
338                                                           0);
339                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
340                             do_smm_enter(x86_env_get_cpu(env));
341                             next_tb = 0;
342                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
343                                    !(env->hflags2 & HF2_NMI_MASK)) {
344                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
345                             env->hflags2 |= HF2_NMI_MASK;
346                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
347                             next_tb = 0;
348                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
349                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
350                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
351                             next_tb = 0;
352                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
353                                    (((env->hflags2 & HF2_VINTR_MASK) && 
354                                      (env->hflags2 & HF2_HIF_MASK)) ||
355                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
356                                      (env->eflags & IF_MASK && 
357                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
358                             int intno;
359                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
360                                                           0);
361                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
362                                                         CPU_INTERRUPT_VIRQ);
363                             intno = cpu_get_pic_interrupt(env);
364                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
365                             do_interrupt_x86_hardirq(env, intno, 1);
366                             /* ensure that no TB jump will be modified as
367                                the program flow was changed */
368                             next_tb = 0;
369 #if !defined(CONFIG_USER_ONLY)
370                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
371                                    (env->eflags & IF_MASK) && 
372                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
373                             int intno;
374                             /* FIXME: this should respect TPR */
375                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
376                                                           0);
377                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
378                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
379                             do_interrupt_x86_hardirq(env, intno, 1);
380                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
381                             next_tb = 0;
382 #endif
383                         }
384                     }
385 #elif defined(TARGET_PPC)
386                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
387                         cpu_reset(cpu);
388                     }
389                     if (interrupt_request & CPU_INTERRUPT_HARD) {
390                         ppc_hw_interrupt(env);
391                         if (env->pending_interrupts == 0) {
392                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
393                         }
394                         next_tb = 0;
395                     }
396 #elif defined(TARGET_LM32)
397                     if ((interrupt_request & CPU_INTERRUPT_HARD)
398                         && (env->ie & IE_IE)) {
399                         env->exception_index = EXCP_IRQ;
400                         cc->do_interrupt(cpu);
401                         next_tb = 0;
402                     }
403 #elif defined(TARGET_MICROBLAZE)
404                     if ((interrupt_request & CPU_INTERRUPT_HARD)
405                         && (env->sregs[SR_MSR] & MSR_IE)
406                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
407                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
408                         env->exception_index = EXCP_IRQ;
409                         cc->do_interrupt(cpu);
410                         next_tb = 0;
411                     }
412 #elif defined(TARGET_MIPS)
413                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
414                         cpu_mips_hw_interrupts_pending(env)) {
415                         /* Raise it */
416                         env->exception_index = EXCP_EXT_INTERRUPT;
417                         env->error_code = 0;
418                         cc->do_interrupt(cpu);
419                         next_tb = 0;
420                     }
421 #elif defined(TARGET_OPENRISC)
422                     {
423                         int idx = -1;
424                         if ((interrupt_request & CPU_INTERRUPT_HARD)
425                             && (env->sr & SR_IEE)) {
426                             idx = EXCP_INT;
427                         }
428                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
429                             && (env->sr & SR_TEE)) {
430                             idx = EXCP_TICK;
431                         }
432                         if (idx >= 0) {
433                             env->exception_index = idx;
434                             cc->do_interrupt(cpu);
435                             next_tb = 0;
436                         }
437                     }
438 #elif defined(TARGET_SPARC)
439                     if (interrupt_request & CPU_INTERRUPT_HARD) {
440                         if (cpu_interrupts_enabled(env) &&
441                             env->interrupt_index > 0) {
442                             int pil = env->interrupt_index & 0xf;
443                             int type = env->interrupt_index & 0xf0;
444
445                             if (((type == TT_EXTINT) &&
446                                   cpu_pil_allowed(env, pil)) ||
447                                   type != TT_EXTINT) {
448                                 env->exception_index = env->interrupt_index;
449                                 cc->do_interrupt(cpu);
450                                 next_tb = 0;
451                             }
452                         }
453                     }
454 #elif defined(TARGET_ARM)
455                     if (interrupt_request & CPU_INTERRUPT_FIQ
456                         && !(env->uncached_cpsr & CPSR_F)) {
457                         env->exception_index = EXCP_FIQ;
458                         cc->do_interrupt(cpu);
459                         next_tb = 0;
460                     }
461                     /* ARMv7-M interrupt return works by loading a magic value
462                        into the PC.  On real hardware the load causes the
463                        return to occur.  The qemu implementation performs the
464                        jump normally, then does the exception return when the
465                        CPU tries to execute code at the magic address.
466                        This will cause the magic PC value to be pushed to
467                        the stack if an interrupt occurred at the wrong time.
468                        We avoid this by disabling interrupts when
469                        pc contains a magic address.  */
470                     if (interrupt_request & CPU_INTERRUPT_HARD
471                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
472                             || !(env->uncached_cpsr & CPSR_I))) {
473                         env->exception_index = EXCP_IRQ;
474                         cc->do_interrupt(cpu);
475                         next_tb = 0;
476                     }
477 #elif defined(TARGET_UNICORE32)
478                     if (interrupt_request & CPU_INTERRUPT_HARD
479                         && !(env->uncached_asr & ASR_I)) {
480                         env->exception_index = UC32_EXCP_INTR;
481                         cc->do_interrupt(cpu);
482                         next_tb = 0;
483                     }
484 #elif defined(TARGET_SH4)
485                     if (interrupt_request & CPU_INTERRUPT_HARD) {
486                         cc->do_interrupt(cpu);
487                         next_tb = 0;
488                     }
489 #elif defined(TARGET_ALPHA)
490                     {
491                         int idx = -1;
492                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
493                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
494                         case 0 ... 3:
495                             if (interrupt_request & CPU_INTERRUPT_HARD) {
496                                 idx = EXCP_DEV_INTERRUPT;
497                             }
498                             /* FALLTHRU */
499                         case 4:
500                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
501                                 idx = EXCP_CLK_INTERRUPT;
502                             }
503                             /* FALLTHRU */
504                         case 5:
505                             if (interrupt_request & CPU_INTERRUPT_SMP) {
506                                 idx = EXCP_SMP_INTERRUPT;
507                             }
508                             /* FALLTHRU */
509                         case 6:
510                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
511                                 idx = EXCP_MCHK;
512                             }
513                         }
514                         if (idx >= 0) {
515                             env->exception_index = idx;
516                             env->error_code = 0;
517                             cc->do_interrupt(cpu);
518                             next_tb = 0;
519                         }
520                     }
521 #elif defined(TARGET_CRIS)
522                     if (interrupt_request & CPU_INTERRUPT_HARD
523                         && (env->pregs[PR_CCS] & I_FLAG)
524                         && !env->locked_irq) {
525                         env->exception_index = EXCP_IRQ;
526                         cc->do_interrupt(cpu);
527                         next_tb = 0;
528                     }
529                     if (interrupt_request & CPU_INTERRUPT_NMI) {
530                         unsigned int m_flag_archval;
531                         if (env->pregs[PR_VR] < 32) {
532                             m_flag_archval = M_FLAG_V10;
533                         } else {
534                             m_flag_archval = M_FLAG_V32;
535                         }
536                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
537                             env->exception_index = EXCP_NMI;
538                             cc->do_interrupt(cpu);
539                             next_tb = 0;
540                         }
541                     }
542 #elif defined(TARGET_M68K)
543                     if (interrupt_request & CPU_INTERRUPT_HARD
544                         && ((env->sr & SR_I) >> SR_I_SHIFT)
545                             < env->pending_level) {
546                         /* Real hardware gets the interrupt vector via an
547                            IACK cycle at this point.  Current emulated
548                            hardware doesn't rely on this, so we
549                            provide/save the vector when the interrupt is
550                            first signalled.  */
551                         env->exception_index = env->pending_vector;
552                         do_interrupt_m68k_hardirq(env);
553                         next_tb = 0;
554                     }
555 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
556                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
557                         (env->psw.mask & PSW_MASK_EXT)) {
558                         cc->do_interrupt(cpu);
559                         next_tb = 0;
560                     }
561 #elif defined(TARGET_XTENSA)
562                     if (interrupt_request & CPU_INTERRUPT_HARD) {
563                         env->exception_index = EXC_IRQ;
564                         cc->do_interrupt(cpu);
565                         next_tb = 0;
566                     }
567 #endif
568                    /* Don't use the cached interrupt_request value,
569                       do_interrupt may have updated the EXITTB flag. */
570                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
571                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
572                         /* ensure that no TB jump will be modified as
573                            the program flow was changed */
574                         next_tb = 0;
575                     }
576                 }
577                 if (unlikely(cpu->exit_request)) {
578                     cpu->exit_request = 0;
579                     env->exception_index = EXCP_INTERRUPT;
580                     cpu_loop_exit(env);
581                 }
582 #if defined(DEBUG_DISAS)
583                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
584                     /* restore flags in standard format */
585 #if defined(TARGET_I386)
586                     log_cpu_state(cpu, CPU_DUMP_CCOP);
587 #elif defined(TARGET_M68K)
588                     cpu_m68k_flush_flags(env, env->cc_op);
589                     env->cc_op = CC_OP_FLAGS;
590                     env->sr = (env->sr & 0xffe0)
591                               | env->cc_dest | (env->cc_x << 4);
592                     log_cpu_state(cpu, 0);
593 #else
594                     log_cpu_state(cpu, 0);
595 #endif
596                 }
597 #endif /* DEBUG_DISAS */
598                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
599                 tb = tb_find_fast(env);
600                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
601                    doing it in tb_find_slow */
602                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
603                     /* as some TB could have been invalidated because
604                        of memory exceptions while generating the code, we
605                        must recompute the hash index here */
606                     next_tb = 0;
607                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
608                 }
609                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
610                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
611                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
612                 }
613                 /* see if we can patch the calling TB. When the TB
614                    spans two pages, we cannot safely do a direct
615                    jump. */
616                 if (next_tb != 0 && tb->page_addr[1] == -1) {
617                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
618                                 next_tb & TB_EXIT_MASK, tb);
619                 }
620                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
621
622                 /* cpu_interrupt might be called while translating the
623                    TB, but before it is linked into a potentially
624                    infinite loop and becomes env->current_tb. Avoid
625                    starting execution if there is a pending interrupt. */
626                 cpu->current_tb = tb;
627                 barrier();
628                 if (likely(!cpu->exit_request)) {
629                     tc_ptr = tb->tc_ptr;
630                     /* execute the generated code */
631                     next_tb = cpu_tb_exec(cpu, tc_ptr);
632                     switch (next_tb & TB_EXIT_MASK) {
633                     case TB_EXIT_REQUESTED:
634                         /* Something asked us to stop executing
635                          * chained TBs; just continue round the main
636                          * loop. Whatever requested the exit will also
637                          * have set something else (eg exit_request or
638                          * interrupt_request) which we will handle
639                          * next time around the loop.
640                          */
641                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
642                         next_tb = 0;
643                         break;
644                     case TB_EXIT_ICOUNT_EXPIRED:
645                     {
646                         /* Instruction counter expired.  */
647                         int insns_left;
648                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
649                         insns_left = env->icount_decr.u32;
650                         if (env->icount_extra && insns_left >= 0) {
651                             /* Refill decrementer and continue execution.  */
652                             env->icount_extra += insns_left;
653                             if (env->icount_extra > 0xffff) {
654                                 insns_left = 0xffff;
655                             } else {
656                                 insns_left = env->icount_extra;
657                             }
658                             env->icount_extra -= insns_left;
659                             env->icount_decr.u16.low = insns_left;
660                         } else {
661                             if (insns_left > 0) {
662                                 /* Execute remaining instructions.  */
663                                 cpu_exec_nocache(env, insns_left, tb);
664                             }
665                             env->exception_index = EXCP_INTERRUPT;
666                             next_tb = 0;
667                             cpu_loop_exit(env);
668                         }
669                         break;
670                     }
671                     default:
672                         break;
673                     }
674                 }
675                 cpu->current_tb = NULL;
676                 /* reset soft MMU for next block (it can currently
677                    only be set by a memory fault) */
678             } /* for(;;) */
679         } else {
680             /* Reload env after longjmp - the compiler may have smashed all
681              * local variables as longjmp is marked 'noreturn'. */
682             cpu = current_cpu;
683             env = cpu->env_ptr;
684 #if !(defined(CONFIG_USER_ONLY) && \
685       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
686             cc = CPU_GET_CLASS(cpu);
687 #endif
688         }
689     } /* for(;;) */
690
691
692 #if defined(TARGET_I386)
693     /* restore flags in standard format */
694     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
695         | (env->df & DF_MASK);
696 #elif defined(TARGET_ARM)
697     /* XXX: Save/restore host fpu exception state?.  */
698 #elif defined(TARGET_UNICORE32)
699 #elif defined(TARGET_SPARC)
700 #elif defined(TARGET_PPC)
701 #elif defined(TARGET_LM32)
702 #elif defined(TARGET_M68K)
703     cpu_m68k_flush_flags(env, env->cc_op);
704     env->cc_op = CC_OP_FLAGS;
705     env->sr = (env->sr & 0xffe0)
706               | env->cc_dest | (env->cc_x << 4);
707 #elif defined(TARGET_MICROBLAZE)
708 #elif defined(TARGET_MIPS)
709 #elif defined(TARGET_MOXIE)
710 #elif defined(TARGET_OPENRISC)
711 #elif defined(TARGET_SH4)
712 #elif defined(TARGET_ALPHA)
713 #elif defined(TARGET_CRIS)
714 #elif defined(TARGET_S390X)
715 #elif defined(TARGET_XTENSA)
716     /* XXXXX */
717 #else
718 #error unsupported target CPU
719 #endif
720
721     /* fail safe : never use current_cpu outside cpu_exec() */
722     current_cpu = NULL;
723     return ret;
724 }