]> rtime.felk.cvut.cz Git - lisovros/qemu_apohw.git/blob - cpu-exec.c
cpu-exec: Change cpu_resume_from_signal() argument to CPUState
[lisovros/qemu_apohw.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25
26 void cpu_loop_exit(CPUState *cpu)
27 {
28     cpu->current_tb = NULL;
29     siglongjmp(cpu->jmp_env, 1);
30 }
31
32 /* exit the current TB from a signal handler. The host registers are
33    restored in a state compatible with the CPU emulator
34  */
35 #if defined(CONFIG_SOFTMMU)
36 void cpu_resume_from_signal(CPUState *cpu, void *puc)
37 {
38     /* XXX: restore cpu registers saved in host registers */
39
40     cpu->exception_index = -1;
41     siglongjmp(cpu->jmp_env, 1);
42 }
43 #endif
44
45 /* Execute a TB, and fix up the CPU state afterwards if necessary */
46 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
47 {
48     CPUArchState *env = cpu->env_ptr;
49     uintptr_t next_tb;
50
51 #if defined(DEBUG_DISAS)
52     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
53 #if defined(TARGET_I386)
54         log_cpu_state(cpu, CPU_DUMP_CCOP);
55 #elif defined(TARGET_M68K)
56         /* ??? Should not modify env state for dumping.  */
57         cpu_m68k_flush_flags(env, env->cc_op);
58         env->cc_op = CC_OP_FLAGS;
59         env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
60         log_cpu_state(cpu, 0);
61 #else
62         log_cpu_state(cpu, 0);
63 #endif
64     }
65 #endif /* DEBUG_DISAS */
66
67     next_tb = tcg_qemu_tb_exec(env, tb_ptr);
68     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
69         /* We didn't start executing this TB (eg because the instruction
70          * counter hit zero); we must restore the guest PC to the address
71          * of the start of the TB.
72          */
73         CPUClass *cc = CPU_GET_CLASS(cpu);
74         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
75         if (cc->synchronize_from_tb) {
76             cc->synchronize_from_tb(cpu, tb);
77         } else {
78             assert(cc->set_pc);
79             cc->set_pc(cpu, tb->pc);
80         }
81     }
82     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
83         /* We were asked to stop executing TBs (probably a pending
84          * interrupt. We've now stopped, so clear the flag.
85          */
86         cpu->tcg_exit_req = 0;
87     }
88     return next_tb;
89 }
90
91 /* Execute the code without caching the generated code. An interpreter
92    could be used if available. */
93 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
94                              TranslationBlock *orig_tb)
95 {
96     CPUState *cpu = ENV_GET_CPU(env);
97     TranslationBlock *tb;
98
99     /* Should never happen.
100        We only end up here when an existing TB is too long.  */
101     if (max_cycles > CF_COUNT_MASK)
102         max_cycles = CF_COUNT_MASK;
103
104     tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105                      max_cycles);
106     cpu->current_tb = tb;
107     /* execute the generated code */
108     cpu_tb_exec(cpu, tb->tc_ptr);
109     cpu->current_tb = NULL;
110     tb_phys_invalidate(tb, -1);
111     tb_free(tb);
112 }
113
114 static TranslationBlock *tb_find_slow(CPUArchState *env,
115                                       target_ulong pc,
116                                       target_ulong cs_base,
117                                       uint64_t flags)
118 {
119     CPUState *cpu = ENV_GET_CPU(env);
120     TranslationBlock *tb, **ptb1;
121     unsigned int h;
122     tb_page_addr_t phys_pc, phys_page1;
123     target_ulong virt_page2;
124
125     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
126
127     /* find translated block using physical mappings */
128     phys_pc = get_page_addr_code(env, pc);
129     phys_page1 = phys_pc & TARGET_PAGE_MASK;
130     h = tb_phys_hash_func(phys_pc);
131     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
132     for(;;) {
133         tb = *ptb1;
134         if (!tb)
135             goto not_found;
136         if (tb->pc == pc &&
137             tb->page_addr[0] == phys_page1 &&
138             tb->cs_base == cs_base &&
139             tb->flags == flags) {
140             /* check next page if needed */
141             if (tb->page_addr[1] != -1) {
142                 tb_page_addr_t phys_page2;
143
144                 virt_page2 = (pc & TARGET_PAGE_MASK) +
145                     TARGET_PAGE_SIZE;
146                 phys_page2 = get_page_addr_code(env, virt_page2);
147                 if (tb->page_addr[1] == phys_page2)
148                     goto found;
149             } else {
150                 goto found;
151             }
152         }
153         ptb1 = &tb->phys_hash_next;
154     }
155  not_found:
156    /* if no translated code available, then translate it now */
157     tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
158
159  found:
160     /* Move the last found TB to the head of the list */
161     if (likely(*ptb1)) {
162         *ptb1 = tb->phys_hash_next;
163         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
164         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
165     }
166     /* we add the TB in the virtual pc hash table */
167     cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168     return tb;
169 }
170
171 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
172 {
173     CPUState *cpu = ENV_GET_CPU(env);
174     TranslationBlock *tb;
175     target_ulong cs_base, pc;
176     int flags;
177
178     /* we record a subset of the CPU state. It will
179        always be the same before a given translated block
180        is executed. */
181     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
182     tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
183     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184                  tb->flags != flags)) {
185         tb = tb_find_slow(env, pc, cs_base, flags);
186     }
187     return tb;
188 }
189
190 static CPUDebugExcpHandler *debug_excp_handler;
191
192 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
193 {
194     debug_excp_handler = handler;
195 }
196
197 static void cpu_handle_debug_exception(CPUArchState *env)
198 {
199     CPUState *cpu = ENV_GET_CPU(env);
200     CPUWatchpoint *wp;
201
202     if (!cpu->watchpoint_hit) {
203         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
204             wp->flags &= ~BP_WATCHPOINT_HIT;
205         }
206     }
207     if (debug_excp_handler) {
208         debug_excp_handler(env);
209     }
210 }
211
212 /* main execution loop */
213
214 volatile sig_atomic_t exit_request;
215
216 int cpu_exec(CPUArchState *env)
217 {
218     CPUState *cpu = ENV_GET_CPU(env);
219 #if !(defined(CONFIG_USER_ONLY) && \
220       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
221     CPUClass *cc = CPU_GET_CLASS(cpu);
222 #endif
223 #ifdef TARGET_I386
224     X86CPU *x86_cpu = X86_CPU(cpu);
225 #endif
226     int ret, interrupt_request;
227     TranslationBlock *tb;
228     uint8_t *tc_ptr;
229     uintptr_t next_tb;
230
231     if (cpu->halted) {
232         if (!cpu_has_work(cpu)) {
233             return EXCP_HALTED;
234         }
235
236         cpu->halted = 0;
237     }
238
239     current_cpu = cpu;
240
241     /* As long as current_cpu is null, up to the assignment just above,
242      * requests by other threads to exit the execution loop are expected to
243      * be issued using the exit_request global. We must make sure that our
244      * evaluation of the global value is performed past the current_cpu
245      * value transition point, which requires a memory barrier as well as
246      * an instruction scheduling constraint on modern architectures.  */
247     smp_mb();
248
249     if (unlikely(exit_request)) {
250         cpu->exit_request = 1;
251     }
252
253 #if defined(TARGET_I386)
254     /* put eflags in CPU temporary format */
255     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
256     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
257     CC_OP = CC_OP_EFLAGS;
258     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
259 #elif defined(TARGET_SPARC)
260 #elif defined(TARGET_M68K)
261     env->cc_op = CC_OP_FLAGS;
262     env->cc_dest = env->sr & 0xf;
263     env->cc_x = (env->sr >> 4) & 1;
264 #elif defined(TARGET_ALPHA)
265 #elif defined(TARGET_ARM)
266 #elif defined(TARGET_UNICORE32)
267 #elif defined(TARGET_PPC)
268     env->reserve_addr = -1;
269 #elif defined(TARGET_LM32)
270 #elif defined(TARGET_MICROBLAZE)
271 #elif defined(TARGET_MIPS)
272 #elif defined(TARGET_MOXIE)
273 #elif defined(TARGET_OPENRISC)
274 #elif defined(TARGET_SH4)
275 #elif defined(TARGET_CRIS)
276 #elif defined(TARGET_S390X)
277 #elif defined(TARGET_XTENSA)
278     /* XXXXX */
279 #else
280 #error unsupported target CPU
281 #endif
282     cpu->exception_index = -1;
283
284     /* prepare setjmp context for exception handling */
285     for(;;) {
286         if (sigsetjmp(cpu->jmp_env, 0) == 0) {
287             /* if an exception is pending, we execute it here */
288             if (cpu->exception_index >= 0) {
289                 if (cpu->exception_index >= EXCP_INTERRUPT) {
290                     /* exit request from the cpu execution loop */
291                     ret = cpu->exception_index;
292                     if (ret == EXCP_DEBUG) {
293                         cpu_handle_debug_exception(env);
294                     }
295                     break;
296                 } else {
297 #if defined(CONFIG_USER_ONLY)
298                     /* if user mode only, we simulate a fake exception
299                        which will be handled outside the cpu execution
300                        loop */
301 #if defined(TARGET_I386)
302                     cc->do_interrupt(cpu);
303 #endif
304                     ret = cpu->exception_index;
305                     break;
306 #else
307                     cc->do_interrupt(cpu);
308                     cpu->exception_index = -1;
309 #endif
310                 }
311             }
312
313             next_tb = 0; /* force lookup of first TB */
314             for(;;) {
315                 interrupt_request = cpu->interrupt_request;
316                 if (unlikely(interrupt_request)) {
317                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
318                         /* Mask out external interrupts for this step. */
319                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
320                     }
321                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
322                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
323                         cpu->exception_index = EXCP_DEBUG;
324                         cpu_loop_exit(cpu);
325                     }
326 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
327     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
328     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
329                     if (interrupt_request & CPU_INTERRUPT_HALT) {
330                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
331                         cpu->halted = 1;
332                         cpu->exception_index = EXCP_HLT;
333                         cpu_loop_exit(cpu);
334                     }
335 #endif
336 #if defined(TARGET_I386)
337 #if !defined(CONFIG_USER_ONLY)
338                     if (interrupt_request & CPU_INTERRUPT_POLL) {
339                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
340                         apic_poll_irq(x86_cpu->apic_state);
341                     }
342 #endif
343                     if (interrupt_request & CPU_INTERRUPT_INIT) {
344                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
345                                                           0);
346                             do_cpu_init(x86_cpu);
347                             cpu->exception_index = EXCP_HALTED;
348                             cpu_loop_exit(cpu);
349                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
350                             do_cpu_sipi(x86_cpu);
351                     } else if (env->hflags2 & HF2_GIF_MASK) {
352                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
353                             !(env->hflags & HF_SMM_MASK)) {
354                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
355                                                           0);
356                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
357                             do_smm_enter(x86_cpu);
358                             next_tb = 0;
359                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
360                                    !(env->hflags2 & HF2_NMI_MASK)) {
361                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
362                             env->hflags2 |= HF2_NMI_MASK;
363                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
364                             next_tb = 0;
365                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
366                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
367                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
368                             next_tb = 0;
369                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
370                                    (((env->hflags2 & HF2_VINTR_MASK) && 
371                                      (env->hflags2 & HF2_HIF_MASK)) ||
372                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
373                                      (env->eflags & IF_MASK && 
374                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
375                             int intno;
376                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
377                                                           0);
378                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
379                                                         CPU_INTERRUPT_VIRQ);
380                             intno = cpu_get_pic_interrupt(env);
381                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
382                             do_interrupt_x86_hardirq(env, intno, 1);
383                             /* ensure that no TB jump will be modified as
384                                the program flow was changed */
385                             next_tb = 0;
386 #if !defined(CONFIG_USER_ONLY)
387                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
388                                    (env->eflags & IF_MASK) && 
389                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
390                             int intno;
391                             /* FIXME: this should respect TPR */
392                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
393                                                           0);
394                             intno = ldl_phys(cpu->as,
395                                              env->vm_vmcb
396                                              + offsetof(struct vmcb,
397                                                         control.int_vector));
398                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
399                             do_interrupt_x86_hardirq(env, intno, 1);
400                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
401                             next_tb = 0;
402 #endif
403                         }
404                     }
405 #elif defined(TARGET_PPC)
406                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
407                         cpu_reset(cpu);
408                     }
409                     if (interrupt_request & CPU_INTERRUPT_HARD) {
410                         ppc_hw_interrupt(env);
411                         if (env->pending_interrupts == 0) {
412                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
413                         }
414                         next_tb = 0;
415                     }
416 #elif defined(TARGET_LM32)
417                     if ((interrupt_request & CPU_INTERRUPT_HARD)
418                         && (env->ie & IE_IE)) {
419                         cpu->exception_index = EXCP_IRQ;
420                         cc->do_interrupt(cpu);
421                         next_tb = 0;
422                     }
423 #elif defined(TARGET_MICROBLAZE)
424                     if ((interrupt_request & CPU_INTERRUPT_HARD)
425                         && (env->sregs[SR_MSR] & MSR_IE)
426                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
427                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
428                         cpu->exception_index = EXCP_IRQ;
429                         cc->do_interrupt(cpu);
430                         next_tb = 0;
431                     }
432 #elif defined(TARGET_MIPS)
433                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
434                         cpu_mips_hw_interrupts_pending(env)) {
435                         /* Raise it */
436                         cpu->exception_index = EXCP_EXT_INTERRUPT;
437                         env->error_code = 0;
438                         cc->do_interrupt(cpu);
439                         next_tb = 0;
440                     }
441 #elif defined(TARGET_OPENRISC)
442                     {
443                         int idx = -1;
444                         if ((interrupt_request & CPU_INTERRUPT_HARD)
445                             && (env->sr & SR_IEE)) {
446                             idx = EXCP_INT;
447                         }
448                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
449                             && (env->sr & SR_TEE)) {
450                             idx = EXCP_TICK;
451                         }
452                         if (idx >= 0) {
453                             cpu->exception_index = idx;
454                             cc->do_interrupt(cpu);
455                             next_tb = 0;
456                         }
457                     }
458 #elif defined(TARGET_SPARC)
459                     if (interrupt_request & CPU_INTERRUPT_HARD) {
460                         if (cpu_interrupts_enabled(env) &&
461                             env->interrupt_index > 0) {
462                             int pil = env->interrupt_index & 0xf;
463                             int type = env->interrupt_index & 0xf0;
464
465                             if (((type == TT_EXTINT) &&
466                                   cpu_pil_allowed(env, pil)) ||
467                                   type != TT_EXTINT) {
468                                 cpu->exception_index = env->interrupt_index;
469                                 cc->do_interrupt(cpu);
470                                 next_tb = 0;
471                             }
472                         }
473                     }
474 #elif defined(TARGET_ARM)
475                     if (interrupt_request & CPU_INTERRUPT_FIQ
476                         && !(env->daif & PSTATE_F)) {
477                         cpu->exception_index = EXCP_FIQ;
478                         cc->do_interrupt(cpu);
479                         next_tb = 0;
480                     }
481                     /* ARMv7-M interrupt return works by loading a magic value
482                        into the PC.  On real hardware the load causes the
483                        return to occur.  The qemu implementation performs the
484                        jump normally, then does the exception return when the
485                        CPU tries to execute code at the magic address.
486                        This will cause the magic PC value to be pushed to
487                        the stack if an interrupt occurred at the wrong time.
488                        We avoid this by disabling interrupts when
489                        pc contains a magic address.  */
490                     if (interrupt_request & CPU_INTERRUPT_HARD
491                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
492                             || !(env->daif & PSTATE_I))) {
493                         cpu->exception_index = EXCP_IRQ;
494                         cc->do_interrupt(cpu);
495                         next_tb = 0;
496                     }
497 #elif defined(TARGET_UNICORE32)
498                     if (interrupt_request & CPU_INTERRUPT_HARD
499                         && !(env->uncached_asr & ASR_I)) {
500                         cpu->exception_index = UC32_EXCP_INTR;
501                         cc->do_interrupt(cpu);
502                         next_tb = 0;
503                     }
504 #elif defined(TARGET_SH4)
505                     if (interrupt_request & CPU_INTERRUPT_HARD) {
506                         cc->do_interrupt(cpu);
507                         next_tb = 0;
508                     }
509 #elif defined(TARGET_ALPHA)
510                     {
511                         int idx = -1;
512                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
513                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
514                         case 0 ... 3:
515                             if (interrupt_request & CPU_INTERRUPT_HARD) {
516                                 idx = EXCP_DEV_INTERRUPT;
517                             }
518                             /* FALLTHRU */
519                         case 4:
520                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
521                                 idx = EXCP_CLK_INTERRUPT;
522                             }
523                             /* FALLTHRU */
524                         case 5:
525                             if (interrupt_request & CPU_INTERRUPT_SMP) {
526                                 idx = EXCP_SMP_INTERRUPT;
527                             }
528                             /* FALLTHRU */
529                         case 6:
530                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
531                                 idx = EXCP_MCHK;
532                             }
533                         }
534                         if (idx >= 0) {
535                             cpu->exception_index = idx;
536                             env->error_code = 0;
537                             cc->do_interrupt(cpu);
538                             next_tb = 0;
539                         }
540                     }
541 #elif defined(TARGET_CRIS)
542                     if (interrupt_request & CPU_INTERRUPT_HARD
543                         && (env->pregs[PR_CCS] & I_FLAG)
544                         && !env->locked_irq) {
545                         cpu->exception_index = EXCP_IRQ;
546                         cc->do_interrupt(cpu);
547                         next_tb = 0;
548                     }
549                     if (interrupt_request & CPU_INTERRUPT_NMI) {
550                         unsigned int m_flag_archval;
551                         if (env->pregs[PR_VR] < 32) {
552                             m_flag_archval = M_FLAG_V10;
553                         } else {
554                             m_flag_archval = M_FLAG_V32;
555                         }
556                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
557                             cpu->exception_index = EXCP_NMI;
558                             cc->do_interrupt(cpu);
559                             next_tb = 0;
560                         }
561                     }
562 #elif defined(TARGET_M68K)
563                     if (interrupt_request & CPU_INTERRUPT_HARD
564                         && ((env->sr & SR_I) >> SR_I_SHIFT)
565                             < env->pending_level) {
566                         /* Real hardware gets the interrupt vector via an
567                            IACK cycle at this point.  Current emulated
568                            hardware doesn't rely on this, so we
569                            provide/save the vector when the interrupt is
570                            first signalled.  */
571                         cpu->exception_index = env->pending_vector;
572                         do_interrupt_m68k_hardirq(env);
573                         next_tb = 0;
574                     }
575 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
576                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
577                         (env->psw.mask & PSW_MASK_EXT)) {
578                         cc->do_interrupt(cpu);
579                         next_tb = 0;
580                     }
581 #elif defined(TARGET_XTENSA)
582                     if (interrupt_request & CPU_INTERRUPT_HARD) {
583                         cpu->exception_index = EXC_IRQ;
584                         cc->do_interrupt(cpu);
585                         next_tb = 0;
586                     }
587 #endif
588                    /* Don't use the cached interrupt_request value,
589                       do_interrupt may have updated the EXITTB flag. */
590                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
591                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
592                         /* ensure that no TB jump will be modified as
593                            the program flow was changed */
594                         next_tb = 0;
595                     }
596                 }
597                 if (unlikely(cpu->exit_request)) {
598                     cpu->exit_request = 0;
599                     cpu->exception_index = EXCP_INTERRUPT;
600                     cpu_loop_exit(cpu);
601                 }
602                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
603                 tb = tb_find_fast(env);
604                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
605                    doing it in tb_find_slow */
606                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
607                     /* as some TB could have been invalidated because
608                        of memory exceptions while generating the code, we
609                        must recompute the hash index here */
610                     next_tb = 0;
611                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
612                 }
613                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
614                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
615                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
616                 }
617                 /* see if we can patch the calling TB. When the TB
618                    spans two pages, we cannot safely do a direct
619                    jump. */
620                 if (next_tb != 0 && tb->page_addr[1] == -1) {
621                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
622                                 next_tb & TB_EXIT_MASK, tb);
623                 }
624                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
625
626                 /* cpu_interrupt might be called while translating the
627                    TB, but before it is linked into a potentially
628                    infinite loop and becomes env->current_tb. Avoid
629                    starting execution if there is a pending interrupt. */
630                 cpu->current_tb = tb;
631                 barrier();
632                 if (likely(!cpu->exit_request)) {
633                     tc_ptr = tb->tc_ptr;
634                     /* execute the generated code */
635                     next_tb = cpu_tb_exec(cpu, tc_ptr);
636                     switch (next_tb & TB_EXIT_MASK) {
637                     case TB_EXIT_REQUESTED:
638                         /* Something asked us to stop executing
639                          * chained TBs; just continue round the main
640                          * loop. Whatever requested the exit will also
641                          * have set something else (eg exit_request or
642                          * interrupt_request) which we will handle
643                          * next time around the loop.
644                          */
645                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
646                         next_tb = 0;
647                         break;
648                     case TB_EXIT_ICOUNT_EXPIRED:
649                     {
650                         /* Instruction counter expired.  */
651                         int insns_left;
652                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
653                         insns_left = cpu->icount_decr.u32;
654                         if (cpu->icount_extra && insns_left >= 0) {
655                             /* Refill decrementer and continue execution.  */
656                             cpu->icount_extra += insns_left;
657                             if (cpu->icount_extra > 0xffff) {
658                                 insns_left = 0xffff;
659                             } else {
660                                 insns_left = cpu->icount_extra;
661                             }
662                             cpu->icount_extra -= insns_left;
663                             cpu->icount_decr.u16.low = insns_left;
664                         } else {
665                             if (insns_left > 0) {
666                                 /* Execute remaining instructions.  */
667                                 cpu_exec_nocache(env, insns_left, tb);
668                             }
669                             cpu->exception_index = EXCP_INTERRUPT;
670                             next_tb = 0;
671                             cpu_loop_exit(cpu);
672                         }
673                         break;
674                     }
675                     default:
676                         break;
677                     }
678                 }
679                 cpu->current_tb = NULL;
680                 /* reset soft MMU for next block (it can currently
681                    only be set by a memory fault) */
682             } /* for(;;) */
683         } else {
684             /* Reload env after longjmp - the compiler may have smashed all
685              * local variables as longjmp is marked 'noreturn'. */
686             cpu = current_cpu;
687             env = cpu->env_ptr;
688 #if !(defined(CONFIG_USER_ONLY) && \
689       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
690             cc = CPU_GET_CLASS(cpu);
691 #endif
692 #ifdef TARGET_I386
693             x86_cpu = X86_CPU(cpu);
694 #endif
695         }
696     } /* for(;;) */
697
698
699 #if defined(TARGET_I386)
700     /* restore flags in standard format */
701     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
702         | (env->df & DF_MASK);
703 #elif defined(TARGET_ARM)
704     /* XXX: Save/restore host fpu exception state?.  */
705 #elif defined(TARGET_UNICORE32)
706 #elif defined(TARGET_SPARC)
707 #elif defined(TARGET_PPC)
708 #elif defined(TARGET_LM32)
709 #elif defined(TARGET_M68K)
710     cpu_m68k_flush_flags(env, env->cc_op);
711     env->cc_op = CC_OP_FLAGS;
712     env->sr = (env->sr & 0xffe0)
713               | env->cc_dest | (env->cc_x << 4);
714 #elif defined(TARGET_MICROBLAZE)
715 #elif defined(TARGET_MIPS)
716 #elif defined(TARGET_MOXIE)
717 #elif defined(TARGET_OPENRISC)
718 #elif defined(TARGET_SH4)
719 #elif defined(TARGET_ALPHA)
720 #elif defined(TARGET_CRIS)
721 #elif defined(TARGET_S390X)
722 #elif defined(TARGET_XTENSA)
723     /* XXXXX */
724 #else
725 #error unsupported target CPU
726 #endif
727
728     /* fail safe : never use current_cpu outside cpu_exec() */
729     current_cpu = NULL;
730     return ret;
731 }