]> rtime.felk.cvut.cz Git - lisovros/qemu_apohw.git/blob - cpu-exec.c
block/raw-posix: Strip protocol prefix on creation
[lisovros/qemu_apohw.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25
26 bool qemu_cpu_has_work(CPUState *cpu)
27 {
28     return cpu_has_work(cpu);
29 }
30
31 void cpu_loop_exit(CPUArchState *env)
32 {
33     CPUState *cpu = ENV_GET_CPU(env);
34
35     cpu->current_tb = NULL;
36     siglongjmp(env->jmp_env, 1);
37 }
38
39 /* exit the current TB from a signal handler. The host registers are
40    restored in a state compatible with the CPU emulator
41  */
42 #if defined(CONFIG_SOFTMMU)
43 void cpu_resume_from_signal(CPUArchState *env, void *puc)
44 {
45     /* XXX: restore cpu registers saved in host registers */
46
47     env->exception_index = -1;
48     siglongjmp(env->jmp_env, 1);
49 }
50 #endif
51
52 /* Execute a TB, and fix up the CPU state afterwards if necessary */
53 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
54 {
55     CPUArchState *env = cpu->env_ptr;
56     uintptr_t next_tb;
57
58 #if defined(DEBUG_DISAS)
59     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
60 #if defined(TARGET_I386)
61         log_cpu_state(cpu, CPU_DUMP_CCOP);
62 #elif defined(TARGET_M68K)
63         /* ??? Should not modify env state for dumping.  */
64         cpu_m68k_flush_flags(env, env->cc_op);
65         env->cc_op = CC_OP_FLAGS;
66         env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
67         log_cpu_state(cpu, 0);
68 #else
69         log_cpu_state(cpu, 0);
70 #endif
71     }
72 #endif /* DEBUG_DISAS */
73
74     next_tb = tcg_qemu_tb_exec(env, tb_ptr);
75     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
76         /* We didn't start executing this TB (eg because the instruction
77          * counter hit zero); we must restore the guest PC to the address
78          * of the start of the TB.
79          */
80         CPUClass *cc = CPU_GET_CLASS(cpu);
81         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
82         if (cc->synchronize_from_tb) {
83             cc->synchronize_from_tb(cpu, tb);
84         } else {
85             assert(cc->set_pc);
86             cc->set_pc(cpu, tb->pc);
87         }
88     }
89     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
90         /* We were asked to stop executing TBs (probably a pending
91          * interrupt. We've now stopped, so clear the flag.
92          */
93         cpu->tcg_exit_req = 0;
94     }
95     return next_tb;
96 }
97
98 /* Execute the code without caching the generated code. An interpreter
99    could be used if available. */
100 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
101                              TranslationBlock *orig_tb)
102 {
103     CPUState *cpu = ENV_GET_CPU(env);
104     TranslationBlock *tb;
105
106     /* Should never happen.
107        We only end up here when an existing TB is too long.  */
108     if (max_cycles > CF_COUNT_MASK)
109         max_cycles = CF_COUNT_MASK;
110
111     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112                      max_cycles);
113     cpu->current_tb = tb;
114     /* execute the generated code */
115     cpu_tb_exec(cpu, tb->tc_ptr);
116     cpu->current_tb = NULL;
117     tb_phys_invalidate(tb, -1);
118     tb_free(tb);
119 }
120
121 static TranslationBlock *tb_find_slow(CPUArchState *env,
122                                       target_ulong pc,
123                                       target_ulong cs_base,
124                                       uint64_t flags)
125 {
126     TranslationBlock *tb, **ptb1;
127     unsigned int h;
128     tb_page_addr_t phys_pc, phys_page1;
129     target_ulong virt_page2;
130
131     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
132
133     /* find translated block using physical mappings */
134     phys_pc = get_page_addr_code(env, pc);
135     phys_page1 = phys_pc & TARGET_PAGE_MASK;
136     h = tb_phys_hash_func(phys_pc);
137     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
138     for(;;) {
139         tb = *ptb1;
140         if (!tb)
141             goto not_found;
142         if (tb->pc == pc &&
143             tb->page_addr[0] == phys_page1 &&
144             tb->cs_base == cs_base &&
145             tb->flags == flags) {
146             /* check next page if needed */
147             if (tb->page_addr[1] != -1) {
148                 tb_page_addr_t phys_page2;
149
150                 virt_page2 = (pc & TARGET_PAGE_MASK) +
151                     TARGET_PAGE_SIZE;
152                 phys_page2 = get_page_addr_code(env, virt_page2);
153                 if (tb->page_addr[1] == phys_page2)
154                     goto found;
155             } else {
156                 goto found;
157             }
158         }
159         ptb1 = &tb->phys_hash_next;
160     }
161  not_found:
162    /* if no translated code available, then translate it now */
163     tb = tb_gen_code(env, pc, cs_base, flags, 0);
164
165  found:
166     /* Move the last found TB to the head of the list */
167     if (likely(*ptb1)) {
168         *ptb1 = tb->phys_hash_next;
169         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
170         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
171     }
172     /* we add the TB in the virtual pc hash table */
173     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
174     return tb;
175 }
176
177 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
178 {
179     TranslationBlock *tb;
180     target_ulong cs_base, pc;
181     int flags;
182
183     /* we record a subset of the CPU state. It will
184        always be the same before a given translated block
185        is executed. */
186     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
187     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
188     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
189                  tb->flags != flags)) {
190         tb = tb_find_slow(env, pc, cs_base, flags);
191     }
192     return tb;
193 }
194
195 static CPUDebugExcpHandler *debug_excp_handler;
196
197 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
198 {
199     debug_excp_handler = handler;
200 }
201
202 static void cpu_handle_debug_exception(CPUArchState *env)
203 {
204     CPUWatchpoint *wp;
205
206     if (!env->watchpoint_hit) {
207         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
208             wp->flags &= ~BP_WATCHPOINT_HIT;
209         }
210     }
211     if (debug_excp_handler) {
212         debug_excp_handler(env);
213     }
214 }
215
216 /* main execution loop */
217
218 volatile sig_atomic_t exit_request;
219
220 int cpu_exec(CPUArchState *env)
221 {
222     CPUState *cpu = ENV_GET_CPU(env);
223 #if !(defined(CONFIG_USER_ONLY) && \
224       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
225     CPUClass *cc = CPU_GET_CLASS(cpu);
226 #endif
227 #ifdef TARGET_I386
228     X86CPU *x86_cpu = X86_CPU(cpu);
229 #endif
230     int ret, interrupt_request;
231     TranslationBlock *tb;
232     uint8_t *tc_ptr;
233     uintptr_t next_tb;
234
235     if (cpu->halted) {
236         if (!cpu_has_work(cpu)) {
237             return EXCP_HALTED;
238         }
239
240         cpu->halted = 0;
241     }
242
243     current_cpu = cpu;
244
245     /* As long as current_cpu is null, up to the assignment just above,
246      * requests by other threads to exit the execution loop are expected to
247      * be issued using the exit_request global. We must make sure that our
248      * evaluation of the global value is performed past the current_cpu
249      * value transition point, which requires a memory barrier as well as
250      * an instruction scheduling constraint on modern architectures.  */
251     smp_mb();
252
253     if (unlikely(exit_request)) {
254         cpu->exit_request = 1;
255     }
256
257 #if defined(TARGET_I386)
258     /* put eflags in CPU temporary format */
259     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
261     CC_OP = CC_OP_EFLAGS;
262     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263 #elif defined(TARGET_SPARC)
264 #elif defined(TARGET_M68K)
265     env->cc_op = CC_OP_FLAGS;
266     env->cc_dest = env->sr & 0xf;
267     env->cc_x = (env->sr >> 4) & 1;
268 #elif defined(TARGET_ALPHA)
269 #elif defined(TARGET_ARM)
270 #elif defined(TARGET_UNICORE32)
271 #elif defined(TARGET_PPC)
272     env->reserve_addr = -1;
273 #elif defined(TARGET_LM32)
274 #elif defined(TARGET_MICROBLAZE)
275 #elif defined(TARGET_MIPS)
276 #elif defined(TARGET_MOXIE)
277 #elif defined(TARGET_OPENRISC)
278 #elif defined(TARGET_SH4)
279 #elif defined(TARGET_CRIS)
280 #elif defined(TARGET_S390X)
281 #elif defined(TARGET_XTENSA)
282     /* XXXXX */
283 #else
284 #error unsupported target CPU
285 #endif
286     env->exception_index = -1;
287
288     /* prepare setjmp context for exception handling */
289     for(;;) {
290         if (sigsetjmp(env->jmp_env, 0) == 0) {
291             /* if an exception is pending, we execute it here */
292             if (env->exception_index >= 0) {
293                 if (env->exception_index >= EXCP_INTERRUPT) {
294                     /* exit request from the cpu execution loop */
295                     ret = env->exception_index;
296                     if (ret == EXCP_DEBUG) {
297                         cpu_handle_debug_exception(env);
298                     }
299                     break;
300                 } else {
301 #if defined(CONFIG_USER_ONLY)
302                     /* if user mode only, we simulate a fake exception
303                        which will be handled outside the cpu execution
304                        loop */
305 #if defined(TARGET_I386)
306                     cc->do_interrupt(cpu);
307 #endif
308                     ret = env->exception_index;
309                     break;
310 #else
311                     cc->do_interrupt(cpu);
312                     env->exception_index = -1;
313 #endif
314                 }
315             }
316
317             next_tb = 0; /* force lookup of first TB */
318             for(;;) {
319                 interrupt_request = cpu->interrupt_request;
320                 if (unlikely(interrupt_request)) {
321                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
322                         /* Mask out external interrupts for this step. */
323                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
324                     }
325                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
326                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
327                         env->exception_index = EXCP_DEBUG;
328                         cpu_loop_exit(env);
329                     }
330 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
331     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
332     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
333                     if (interrupt_request & CPU_INTERRUPT_HALT) {
334                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
335                         cpu->halted = 1;
336                         env->exception_index = EXCP_HLT;
337                         cpu_loop_exit(env);
338                     }
339 #endif
340 #if defined(TARGET_I386)
341 #if !defined(CONFIG_USER_ONLY)
342                     if (interrupt_request & CPU_INTERRUPT_POLL) {
343                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
344                         apic_poll_irq(x86_cpu->apic_state);
345                     }
346 #endif
347                     if (interrupt_request & CPU_INTERRUPT_INIT) {
348                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
349                                                           0);
350                             do_cpu_init(x86_cpu);
351                             env->exception_index = EXCP_HALTED;
352                             cpu_loop_exit(env);
353                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
354                             do_cpu_sipi(x86_cpu);
355                     } else if (env->hflags2 & HF2_GIF_MASK) {
356                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
357                             !(env->hflags & HF_SMM_MASK)) {
358                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
359                                                           0);
360                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
361                             do_smm_enter(x86_cpu);
362                             next_tb = 0;
363                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
364                                    !(env->hflags2 & HF2_NMI_MASK)) {
365                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
366                             env->hflags2 |= HF2_NMI_MASK;
367                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
368                             next_tb = 0;
369                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
370                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
371                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
372                             next_tb = 0;
373                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
374                                    (((env->hflags2 & HF2_VINTR_MASK) && 
375                                      (env->hflags2 & HF2_HIF_MASK)) ||
376                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
377                                      (env->eflags & IF_MASK && 
378                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
379                             int intno;
380                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
381                                                           0);
382                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
383                                                         CPU_INTERRUPT_VIRQ);
384                             intno = cpu_get_pic_interrupt(env);
385                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
386                             do_interrupt_x86_hardirq(env, intno, 1);
387                             /* ensure that no TB jump will be modified as
388                                the program flow was changed */
389                             next_tb = 0;
390 #if !defined(CONFIG_USER_ONLY)
391                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
392                                    (env->eflags & IF_MASK) && 
393                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
394                             int intno;
395                             /* FIXME: this should respect TPR */
396                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
397                                                           0);
398                             intno = ldl_phys(cpu->as,
399                                              env->vm_vmcb
400                                              + offsetof(struct vmcb,
401                                                         control.int_vector));
402                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
403                             do_interrupt_x86_hardirq(env, intno, 1);
404                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
405                             next_tb = 0;
406 #endif
407                         }
408                     }
409 #elif defined(TARGET_PPC)
410                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
411                         cpu_reset(cpu);
412                     }
413                     if (interrupt_request & CPU_INTERRUPT_HARD) {
414                         ppc_hw_interrupt(env);
415                         if (env->pending_interrupts == 0) {
416                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
417                         }
418                         next_tb = 0;
419                     }
420 #elif defined(TARGET_LM32)
421                     if ((interrupt_request & CPU_INTERRUPT_HARD)
422                         && (env->ie & IE_IE)) {
423                         env->exception_index = EXCP_IRQ;
424                         cc->do_interrupt(cpu);
425                         next_tb = 0;
426                     }
427 #elif defined(TARGET_MICROBLAZE)
428                     if ((interrupt_request & CPU_INTERRUPT_HARD)
429                         && (env->sregs[SR_MSR] & MSR_IE)
430                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
431                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
432                         env->exception_index = EXCP_IRQ;
433                         cc->do_interrupt(cpu);
434                         next_tb = 0;
435                     }
436 #elif defined(TARGET_MIPS)
437                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
438                         cpu_mips_hw_interrupts_pending(env)) {
439                         /* Raise it */
440                         env->exception_index = EXCP_EXT_INTERRUPT;
441                         env->error_code = 0;
442                         cc->do_interrupt(cpu);
443                         next_tb = 0;
444                     }
445 #elif defined(TARGET_OPENRISC)
446                     {
447                         int idx = -1;
448                         if ((interrupt_request & CPU_INTERRUPT_HARD)
449                             && (env->sr & SR_IEE)) {
450                             idx = EXCP_INT;
451                         }
452                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
453                             && (env->sr & SR_TEE)) {
454                             idx = EXCP_TICK;
455                         }
456                         if (idx >= 0) {
457                             env->exception_index = idx;
458                             cc->do_interrupt(cpu);
459                             next_tb = 0;
460                         }
461                     }
462 #elif defined(TARGET_SPARC)
463                     if (interrupt_request & CPU_INTERRUPT_HARD) {
464                         if (cpu_interrupts_enabled(env) &&
465                             env->interrupt_index > 0) {
466                             int pil = env->interrupt_index & 0xf;
467                             int type = env->interrupt_index & 0xf0;
468
469                             if (((type == TT_EXTINT) &&
470                                   cpu_pil_allowed(env, pil)) ||
471                                   type != TT_EXTINT) {
472                                 env->exception_index = env->interrupt_index;
473                                 cc->do_interrupt(cpu);
474                                 next_tb = 0;
475                             }
476                         }
477                     }
478 #elif defined(TARGET_ARM)
479                     if (interrupt_request & CPU_INTERRUPT_FIQ
480                         && !(env->daif & PSTATE_F)) {
481                         env->exception_index = EXCP_FIQ;
482                         cc->do_interrupt(cpu);
483                         next_tb = 0;
484                     }
485                     /* ARMv7-M interrupt return works by loading a magic value
486                        into the PC.  On real hardware the load causes the
487                        return to occur.  The qemu implementation performs the
488                        jump normally, then does the exception return when the
489                        CPU tries to execute code at the magic address.
490                        This will cause the magic PC value to be pushed to
491                        the stack if an interrupt occurred at the wrong time.
492                        We avoid this by disabling interrupts when
493                        pc contains a magic address.  */
494                     if (interrupt_request & CPU_INTERRUPT_HARD
495                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
496                             || !(env->daif & PSTATE_I))) {
497                         env->exception_index = EXCP_IRQ;
498                         cc->do_interrupt(cpu);
499                         next_tb = 0;
500                     }
501 #elif defined(TARGET_UNICORE32)
502                     if (interrupt_request & CPU_INTERRUPT_HARD
503                         && !(env->uncached_asr & ASR_I)) {
504                         env->exception_index = UC32_EXCP_INTR;
505                         cc->do_interrupt(cpu);
506                         next_tb = 0;
507                     }
508 #elif defined(TARGET_SH4)
509                     if (interrupt_request & CPU_INTERRUPT_HARD) {
510                         cc->do_interrupt(cpu);
511                         next_tb = 0;
512                     }
513 #elif defined(TARGET_ALPHA)
514                     {
515                         int idx = -1;
516                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
517                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
518                         case 0 ... 3:
519                             if (interrupt_request & CPU_INTERRUPT_HARD) {
520                                 idx = EXCP_DEV_INTERRUPT;
521                             }
522                             /* FALLTHRU */
523                         case 4:
524                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
525                                 idx = EXCP_CLK_INTERRUPT;
526                             }
527                             /* FALLTHRU */
528                         case 5:
529                             if (interrupt_request & CPU_INTERRUPT_SMP) {
530                                 idx = EXCP_SMP_INTERRUPT;
531                             }
532                             /* FALLTHRU */
533                         case 6:
534                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
535                                 idx = EXCP_MCHK;
536                             }
537                         }
538                         if (idx >= 0) {
539                             env->exception_index = idx;
540                             env->error_code = 0;
541                             cc->do_interrupt(cpu);
542                             next_tb = 0;
543                         }
544                     }
545 #elif defined(TARGET_CRIS)
546                     if (interrupt_request & CPU_INTERRUPT_HARD
547                         && (env->pregs[PR_CCS] & I_FLAG)
548                         && !env->locked_irq) {
549                         env->exception_index = EXCP_IRQ;
550                         cc->do_interrupt(cpu);
551                         next_tb = 0;
552                     }
553                     if (interrupt_request & CPU_INTERRUPT_NMI) {
554                         unsigned int m_flag_archval;
555                         if (env->pregs[PR_VR] < 32) {
556                             m_flag_archval = M_FLAG_V10;
557                         } else {
558                             m_flag_archval = M_FLAG_V32;
559                         }
560                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
561                             env->exception_index = EXCP_NMI;
562                             cc->do_interrupt(cpu);
563                             next_tb = 0;
564                         }
565                     }
566 #elif defined(TARGET_M68K)
567                     if (interrupt_request & CPU_INTERRUPT_HARD
568                         && ((env->sr & SR_I) >> SR_I_SHIFT)
569                             < env->pending_level) {
570                         /* Real hardware gets the interrupt vector via an
571                            IACK cycle at this point.  Current emulated
572                            hardware doesn't rely on this, so we
573                            provide/save the vector when the interrupt is
574                            first signalled.  */
575                         env->exception_index = env->pending_vector;
576                         do_interrupt_m68k_hardirq(env);
577                         next_tb = 0;
578                     }
579 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
580                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
581                         (env->psw.mask & PSW_MASK_EXT)) {
582                         cc->do_interrupt(cpu);
583                         next_tb = 0;
584                     }
585 #elif defined(TARGET_XTENSA)
586                     if (interrupt_request & CPU_INTERRUPT_HARD) {
587                         env->exception_index = EXC_IRQ;
588                         cc->do_interrupt(cpu);
589                         next_tb = 0;
590                     }
591 #endif
592                    /* Don't use the cached interrupt_request value,
593                       do_interrupt may have updated the EXITTB flag. */
594                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
595                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
596                         /* ensure that no TB jump will be modified as
597                            the program flow was changed */
598                         next_tb = 0;
599                     }
600                 }
601                 if (unlikely(cpu->exit_request)) {
602                     cpu->exit_request = 0;
603                     env->exception_index = EXCP_INTERRUPT;
604                     cpu_loop_exit(env);
605                 }
606                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
607                 tb = tb_find_fast(env);
608                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
609                    doing it in tb_find_slow */
610                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
611                     /* as some TB could have been invalidated because
612                        of memory exceptions while generating the code, we
613                        must recompute the hash index here */
614                     next_tb = 0;
615                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
616                 }
617                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
618                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
619                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
620                 }
621                 /* see if we can patch the calling TB. When the TB
622                    spans two pages, we cannot safely do a direct
623                    jump. */
624                 if (next_tb != 0 && tb->page_addr[1] == -1) {
625                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
626                                 next_tb & TB_EXIT_MASK, tb);
627                 }
628                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
629
630                 /* cpu_interrupt might be called while translating the
631                    TB, but before it is linked into a potentially
632                    infinite loop and becomes env->current_tb. Avoid
633                    starting execution if there is a pending interrupt. */
634                 cpu->current_tb = tb;
635                 barrier();
636                 if (likely(!cpu->exit_request)) {
637                     tc_ptr = tb->tc_ptr;
638                     /* execute the generated code */
639                     next_tb = cpu_tb_exec(cpu, tc_ptr);
640                     switch (next_tb & TB_EXIT_MASK) {
641                     case TB_EXIT_REQUESTED:
642                         /* Something asked us to stop executing
643                          * chained TBs; just continue round the main
644                          * loop. Whatever requested the exit will also
645                          * have set something else (eg exit_request or
646                          * interrupt_request) which we will handle
647                          * next time around the loop.
648                          */
649                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
650                         next_tb = 0;
651                         break;
652                     case TB_EXIT_ICOUNT_EXPIRED:
653                     {
654                         /* Instruction counter expired.  */
655                         int insns_left;
656                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
657                         insns_left = env->icount_decr.u32;
658                         if (env->icount_extra && insns_left >= 0) {
659                             /* Refill decrementer and continue execution.  */
660                             env->icount_extra += insns_left;
661                             if (env->icount_extra > 0xffff) {
662                                 insns_left = 0xffff;
663                             } else {
664                                 insns_left = env->icount_extra;
665                             }
666                             env->icount_extra -= insns_left;
667                             env->icount_decr.u16.low = insns_left;
668                         } else {
669                             if (insns_left > 0) {
670                                 /* Execute remaining instructions.  */
671                                 cpu_exec_nocache(env, insns_left, tb);
672                             }
673                             env->exception_index = EXCP_INTERRUPT;
674                             next_tb = 0;
675                             cpu_loop_exit(env);
676                         }
677                         break;
678                     }
679                     default:
680                         break;
681                     }
682                 }
683                 cpu->current_tb = NULL;
684                 /* reset soft MMU for next block (it can currently
685                    only be set by a memory fault) */
686             } /* for(;;) */
687         } else {
688             /* Reload env after longjmp - the compiler may have smashed all
689              * local variables as longjmp is marked 'noreturn'. */
690             cpu = current_cpu;
691             env = cpu->env_ptr;
692 #if !(defined(CONFIG_USER_ONLY) && \
693       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
694             cc = CPU_GET_CLASS(cpu);
695 #endif
696 #ifdef TARGET_I386
697             x86_cpu = X86_CPU(cpu);
698 #endif
699         }
700     } /* for(;;) */
701
702
703 #if defined(TARGET_I386)
704     /* restore flags in standard format */
705     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
706         | (env->df & DF_MASK);
707 #elif defined(TARGET_ARM)
708     /* XXX: Save/restore host fpu exception state?.  */
709 #elif defined(TARGET_UNICORE32)
710 #elif defined(TARGET_SPARC)
711 #elif defined(TARGET_PPC)
712 #elif defined(TARGET_LM32)
713 #elif defined(TARGET_M68K)
714     cpu_m68k_flush_flags(env, env->cc_op);
715     env->cc_op = CC_OP_FLAGS;
716     env->sr = (env->sr & 0xffe0)
717               | env->cc_dest | (env->cc_x << 4);
718 #elif defined(TARGET_MICROBLAZE)
719 #elif defined(TARGET_MIPS)
720 #elif defined(TARGET_MOXIE)
721 #elif defined(TARGET_OPENRISC)
722 #elif defined(TARGET_SH4)
723 #elif defined(TARGET_ALPHA)
724 #elif defined(TARGET_CRIS)
725 #elif defined(TARGET_S390X)
726 #elif defined(TARGET_XTENSA)
727     /* XXXXX */
728 #else
729 #error unsupported target CPU
730 #endif
731
732     /* fail safe : never use current_cpu outside cpu_exec() */
733     current_cpu = NULL;
734     return ret;
735 }