]> rtime.felk.cvut.cz Git - lisovros/qemu_apohw.git/blob - cpu-exec.c
target-xtensa: implement exceptions
[lisovros/qemu_apohw.git] / cpu-exec.c
1 /*
2  *  i386 emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
24
25 int tb_invalidated_flag;
26
27 //#define CONFIG_DEBUG_EXEC
28
29 bool qemu_cpu_has_work(CPUState *env)
30 {
31     return cpu_has_work(env);
32 }
33
34 void cpu_loop_exit(CPUState *env)
35 {
36     env->current_tb = NULL;
37     longjmp(env->jmp_env, 1);
38 }
39
40 /* exit the current TB from a signal handler. The host registers are
41    restored in a state compatible with the CPU emulator
42  */
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUState *env, void *puc)
45 {
46     /* XXX: restore cpu registers saved in host registers */
47
48     env->exception_index = -1;
49     longjmp(env->jmp_env, 1);
50 }
51 #endif
52
53 /* Execute the code without caching the generated code. An interpreter
54    could be used if available. */
55 static void cpu_exec_nocache(CPUState *env, int max_cycles,
56                              TranslationBlock *orig_tb)
57 {
58     unsigned long next_tb;
59     TranslationBlock *tb;
60
61     /* Should never happen.
62        We only end up here when an existing TB is too long.  */
63     if (max_cycles > CF_COUNT_MASK)
64         max_cycles = CF_COUNT_MASK;
65
66     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
67                      max_cycles);
68     env->current_tb = tb;
69     /* execute the generated code */
70     next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
71     env->current_tb = NULL;
72
73     if ((next_tb & 3) == 2) {
74         /* Restore PC.  This may happen if async event occurs before
75            the TB starts executing.  */
76         cpu_pc_from_tb(env, tb);
77     }
78     tb_phys_invalidate(tb, -1);
79     tb_free(tb);
80 }
81
82 static TranslationBlock *tb_find_slow(CPUState *env,
83                                       target_ulong pc,
84                                       target_ulong cs_base,
85                                       uint64_t flags)
86 {
87     TranslationBlock *tb, **ptb1;
88     unsigned int h;
89     tb_page_addr_t phys_pc, phys_page1;
90     target_ulong virt_page2;
91
92     tb_invalidated_flag = 0;
93
94     /* find translated block using physical mappings */
95     phys_pc = get_page_addr_code(env, pc);
96     phys_page1 = phys_pc & TARGET_PAGE_MASK;
97     h = tb_phys_hash_func(phys_pc);
98     ptb1 = &tb_phys_hash[h];
99     for(;;) {
100         tb = *ptb1;
101         if (!tb)
102             goto not_found;
103         if (tb->pc == pc &&
104             tb->page_addr[0] == phys_page1 &&
105             tb->cs_base == cs_base &&
106             tb->flags == flags) {
107             /* check next page if needed */
108             if (tb->page_addr[1] != -1) {
109                 tb_page_addr_t phys_page2;
110
111                 virt_page2 = (pc & TARGET_PAGE_MASK) +
112                     TARGET_PAGE_SIZE;
113                 phys_page2 = get_page_addr_code(env, virt_page2);
114                 if (tb->page_addr[1] == phys_page2)
115                     goto found;
116             } else {
117                 goto found;
118             }
119         }
120         ptb1 = &tb->phys_hash_next;
121     }
122  not_found:
123    /* if no translated code available, then translate it now */
124     tb = tb_gen_code(env, pc, cs_base, flags, 0);
125
126  found:
127     /* Move the last found TB to the head of the list */
128     if (likely(*ptb1)) {
129         *ptb1 = tb->phys_hash_next;
130         tb->phys_hash_next = tb_phys_hash[h];
131         tb_phys_hash[h] = tb;
132     }
133     /* we add the TB in the virtual pc hash table */
134     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
135     return tb;
136 }
137
138 static inline TranslationBlock *tb_find_fast(CPUState *env)
139 {
140     TranslationBlock *tb;
141     target_ulong cs_base, pc;
142     int flags;
143
144     /* we record a subset of the CPU state. It will
145        always be the same before a given translated block
146        is executed. */
147     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
148     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
149     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
150                  tb->flags != flags)) {
151         tb = tb_find_slow(env, pc, cs_base, flags);
152     }
153     return tb;
154 }
155
156 static CPUDebugExcpHandler *debug_excp_handler;
157
158 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
159 {
160     CPUDebugExcpHandler *old_handler = debug_excp_handler;
161
162     debug_excp_handler = handler;
163     return old_handler;
164 }
165
166 static void cpu_handle_debug_exception(CPUState *env)
167 {
168     CPUWatchpoint *wp;
169
170     if (!env->watchpoint_hit) {
171         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
172             wp->flags &= ~BP_WATCHPOINT_HIT;
173         }
174     }
175     if (debug_excp_handler) {
176         debug_excp_handler(env);
177     }
178 }
179
180 /* main execution loop */
181
182 volatile sig_atomic_t exit_request;
183
184 int cpu_exec(CPUState *env)
185 {
186     int ret, interrupt_request;
187     TranslationBlock *tb;
188     uint8_t *tc_ptr;
189     unsigned long next_tb;
190
191     if (env->halted) {
192         if (!cpu_has_work(env)) {
193             return EXCP_HALTED;
194         }
195
196         env->halted = 0;
197     }
198
199     cpu_single_env = env;
200
201     if (unlikely(exit_request)) {
202         env->exit_request = 1;
203     }
204
205 #if defined(TARGET_I386)
206     /* put eflags in CPU temporary format */
207     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208     DF = 1 - (2 * ((env->eflags >> 10) & 1));
209     CC_OP = CC_OP_EFLAGS;
210     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
211 #elif defined(TARGET_SPARC)
212 #elif defined(TARGET_M68K)
213     env->cc_op = CC_OP_FLAGS;
214     env->cc_dest = env->sr & 0xf;
215     env->cc_x = (env->sr >> 4) & 1;
216 #elif defined(TARGET_ALPHA)
217 #elif defined(TARGET_ARM)
218 #elif defined(TARGET_UNICORE32)
219 #elif defined(TARGET_PPC)
220 #elif defined(TARGET_LM32)
221 #elif defined(TARGET_MICROBLAZE)
222 #elif defined(TARGET_MIPS)
223 #elif defined(TARGET_SH4)
224 #elif defined(TARGET_CRIS)
225 #elif defined(TARGET_S390X)
226 #elif defined(TARGET_XTENSA)
227     /* XXXXX */
228 #else
229 #error unsupported target CPU
230 #endif
231     env->exception_index = -1;
232
233     /* prepare setjmp context for exception handling */
234     for(;;) {
235         if (setjmp(env->jmp_env) == 0) {
236             /* if an exception is pending, we execute it here */
237             if (env->exception_index >= 0) {
238                 if (env->exception_index >= EXCP_INTERRUPT) {
239                     /* exit request from the cpu execution loop */
240                     ret = env->exception_index;
241                     if (ret == EXCP_DEBUG) {
242                         cpu_handle_debug_exception(env);
243                     }
244                     break;
245                 } else {
246 #if defined(CONFIG_USER_ONLY)
247                     /* if user mode only, we simulate a fake exception
248                        which will be handled outside the cpu execution
249                        loop */
250 #if defined(TARGET_I386)
251                     do_interrupt(env);
252 #endif
253                     ret = env->exception_index;
254                     break;
255 #else
256                     do_interrupt(env);
257                     env->exception_index = -1;
258 #endif
259                 }
260             }
261
262             next_tb = 0; /* force lookup of first TB */
263             for(;;) {
264                 interrupt_request = env->interrupt_request;
265                 if (unlikely(interrupt_request)) {
266                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
267                         /* Mask out external interrupts for this step. */
268                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
269                     }
270                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
271                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
272                         env->exception_index = EXCP_DEBUG;
273                         cpu_loop_exit(env);
274                     }
275 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
276     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
277     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
278                     if (interrupt_request & CPU_INTERRUPT_HALT) {
279                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
280                         env->halted = 1;
281                         env->exception_index = EXCP_HLT;
282                         cpu_loop_exit(env);
283                     }
284 #endif
285 #if defined(TARGET_I386)
286                     if (interrupt_request & CPU_INTERRUPT_INIT) {
287                             svm_check_intercept(env, SVM_EXIT_INIT);
288                             do_cpu_init(env);
289                             env->exception_index = EXCP_HALTED;
290                             cpu_loop_exit(env);
291                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
292                             do_cpu_sipi(env);
293                     } else if (env->hflags2 & HF2_GIF_MASK) {
294                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
295                             !(env->hflags & HF_SMM_MASK)) {
296                             svm_check_intercept(env, SVM_EXIT_SMI);
297                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
298                             do_smm_enter(env);
299                             next_tb = 0;
300                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
301                                    !(env->hflags2 & HF2_NMI_MASK)) {
302                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
303                             env->hflags2 |= HF2_NMI_MASK;
304                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
305                             next_tb = 0;
306                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
307                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
308                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
309                             next_tb = 0;
310                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
311                                    (((env->hflags2 & HF2_VINTR_MASK) && 
312                                      (env->hflags2 & HF2_HIF_MASK)) ||
313                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
314                                      (env->eflags & IF_MASK && 
315                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
316                             int intno;
317                             svm_check_intercept(env, SVM_EXIT_INTR);
318                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
319                             intno = cpu_get_pic_interrupt(env);
320                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
321                             do_interrupt_x86_hardirq(env, intno, 1);
322                             /* ensure that no TB jump will be modified as
323                                the program flow was changed */
324                             next_tb = 0;
325 #if !defined(CONFIG_USER_ONLY)
326                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
327                                    (env->eflags & IF_MASK) && 
328                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
329                             int intno;
330                             /* FIXME: this should respect TPR */
331                             svm_check_intercept(env, SVM_EXIT_VINTR);
332                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
333                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
334                             do_interrupt_x86_hardirq(env, intno, 1);
335                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
336                             next_tb = 0;
337 #endif
338                         }
339                     }
340 #elif defined(TARGET_PPC)
341 #if 0
342                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
343                         cpu_reset(env);
344                     }
345 #endif
346                     if (interrupt_request & CPU_INTERRUPT_HARD) {
347                         ppc_hw_interrupt(env);
348                         if (env->pending_interrupts == 0)
349                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
350                         next_tb = 0;
351                     }
352 #elif defined(TARGET_LM32)
353                     if ((interrupt_request & CPU_INTERRUPT_HARD)
354                         && (env->ie & IE_IE)) {
355                         env->exception_index = EXCP_IRQ;
356                         do_interrupt(env);
357                         next_tb = 0;
358                     }
359 #elif defined(TARGET_MICROBLAZE)
360                     if ((interrupt_request & CPU_INTERRUPT_HARD)
361                         && (env->sregs[SR_MSR] & MSR_IE)
362                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
363                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
364                         env->exception_index = EXCP_IRQ;
365                         do_interrupt(env);
366                         next_tb = 0;
367                     }
368 #elif defined(TARGET_MIPS)
369                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
370                         cpu_mips_hw_interrupts_pending(env)) {
371                         /* Raise it */
372                         env->exception_index = EXCP_EXT_INTERRUPT;
373                         env->error_code = 0;
374                         do_interrupt(env);
375                         next_tb = 0;
376                     }
377 #elif defined(TARGET_SPARC)
378                     if (interrupt_request & CPU_INTERRUPT_HARD) {
379                         if (cpu_interrupts_enabled(env) &&
380                             env->interrupt_index > 0) {
381                             int pil = env->interrupt_index & 0xf;
382                             int type = env->interrupt_index & 0xf0;
383
384                             if (((type == TT_EXTINT) &&
385                                   cpu_pil_allowed(env, pil)) ||
386                                   type != TT_EXTINT) {
387                                 env->exception_index = env->interrupt_index;
388                                 do_interrupt(env);
389                                 next_tb = 0;
390                             }
391                         }
392                     }
393 #elif defined(TARGET_ARM)
394                     if (interrupt_request & CPU_INTERRUPT_FIQ
395                         && !(env->uncached_cpsr & CPSR_F)) {
396                         env->exception_index = EXCP_FIQ;
397                         do_interrupt(env);
398                         next_tb = 0;
399                     }
400                     /* ARMv7-M interrupt return works by loading a magic value
401                        into the PC.  On real hardware the load causes the
402                        return to occur.  The qemu implementation performs the
403                        jump normally, then does the exception return when the
404                        CPU tries to execute code at the magic address.
405                        This will cause the magic PC value to be pushed to
406                        the stack if an interrupt occurred at the wrong time.
407                        We avoid this by disabling interrupts when
408                        pc contains a magic address.  */
409                     if (interrupt_request & CPU_INTERRUPT_HARD
410                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
411                             || !(env->uncached_cpsr & CPSR_I))) {
412                         env->exception_index = EXCP_IRQ;
413                         do_interrupt(env);
414                         next_tb = 0;
415                     }
416 #elif defined(TARGET_UNICORE32)
417                     if (interrupt_request & CPU_INTERRUPT_HARD
418                         && !(env->uncached_asr & ASR_I)) {
419                         do_interrupt(env);
420                         next_tb = 0;
421                     }
422 #elif defined(TARGET_SH4)
423                     if (interrupt_request & CPU_INTERRUPT_HARD) {
424                         do_interrupt(env);
425                         next_tb = 0;
426                     }
427 #elif defined(TARGET_ALPHA)
428                     {
429                         int idx = -1;
430                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
431                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
432                         case 0 ... 3:
433                             if (interrupt_request & CPU_INTERRUPT_HARD) {
434                                 idx = EXCP_DEV_INTERRUPT;
435                             }
436                             /* FALLTHRU */
437                         case 4:
438                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
439                                 idx = EXCP_CLK_INTERRUPT;
440                             }
441                             /* FALLTHRU */
442                         case 5:
443                             if (interrupt_request & CPU_INTERRUPT_SMP) {
444                                 idx = EXCP_SMP_INTERRUPT;
445                             }
446                             /* FALLTHRU */
447                         case 6:
448                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
449                                 idx = EXCP_MCHK;
450                             }
451                         }
452                         if (idx >= 0) {
453                             env->exception_index = idx;
454                             env->error_code = 0;
455                             do_interrupt(env);
456                             next_tb = 0;
457                         }
458                     }
459 #elif defined(TARGET_CRIS)
460                     if (interrupt_request & CPU_INTERRUPT_HARD
461                         && (env->pregs[PR_CCS] & I_FLAG)
462                         && !env->locked_irq) {
463                         env->exception_index = EXCP_IRQ;
464                         do_interrupt(env);
465                         next_tb = 0;
466                     }
467                     if (interrupt_request & CPU_INTERRUPT_NMI
468                         && (env->pregs[PR_CCS] & M_FLAG)) {
469                         env->exception_index = EXCP_NMI;
470                         do_interrupt(env);
471                         next_tb = 0;
472                     }
473 #elif defined(TARGET_M68K)
474                     if (interrupt_request & CPU_INTERRUPT_HARD
475                         && ((env->sr & SR_I) >> SR_I_SHIFT)
476                             < env->pending_level) {
477                         /* Real hardware gets the interrupt vector via an
478                            IACK cycle at this point.  Current emulated
479                            hardware doesn't rely on this, so we
480                            provide/save the vector when the interrupt is
481                            first signalled.  */
482                         env->exception_index = env->pending_vector;
483                         do_interrupt_m68k_hardirq(env);
484                         next_tb = 0;
485                     }
486 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
487                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
488                         (env->psw.mask & PSW_MASK_EXT)) {
489                         do_interrupt(env);
490                         next_tb = 0;
491                     }
492 #elif defined(TARGET_XTENSA)
493                     if (interrupt_request & CPU_INTERRUPT_HARD) {
494                         env->exception_index = EXC_IRQ;
495                         do_interrupt(env);
496                         next_tb = 0;
497                     }
498 #endif
499                    /* Don't use the cached interrupt_request value,
500                       do_interrupt may have updated the EXITTB flag. */
501                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
502                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
503                         /* ensure that no TB jump will be modified as
504                            the program flow was changed */
505                         next_tb = 0;
506                     }
507                 }
508                 if (unlikely(env->exit_request)) {
509                     env->exit_request = 0;
510                     env->exception_index = EXCP_INTERRUPT;
511                     cpu_loop_exit(env);
512                 }
513 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
514                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
515                     /* restore flags in standard format */
516 #if defined(TARGET_I386)
517                     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
518                         | (DF & DF_MASK);
519                     log_cpu_state(env, X86_DUMP_CCOP);
520                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
521 #elif defined(TARGET_M68K)
522                     cpu_m68k_flush_flags(env, env->cc_op);
523                     env->cc_op = CC_OP_FLAGS;
524                     env->sr = (env->sr & 0xffe0)
525                               | env->cc_dest | (env->cc_x << 4);
526                     log_cpu_state(env, 0);
527 #else
528                     log_cpu_state(env, 0);
529 #endif
530                 }
531 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
532                 spin_lock(&tb_lock);
533                 tb = tb_find_fast(env);
534                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
535                    doing it in tb_find_slow */
536                 if (tb_invalidated_flag) {
537                     /* as some TB could have been invalidated because
538                        of memory exceptions while generating the code, we
539                        must recompute the hash index here */
540                     next_tb = 0;
541                     tb_invalidated_flag = 0;
542                 }
543 #ifdef CONFIG_DEBUG_EXEC
544                 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
545                              (long)tb->tc_ptr, tb->pc,
546                              lookup_symbol(tb->pc));
547 #endif
548                 /* see if we can patch the calling TB. When the TB
549                    spans two pages, we cannot safely do a direct
550                    jump. */
551                 if (next_tb != 0 && tb->page_addr[1] == -1) {
552                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
553                 }
554                 spin_unlock(&tb_lock);
555
556                 /* cpu_interrupt might be called while translating the
557                    TB, but before it is linked into a potentially
558                    infinite loop and becomes env->current_tb. Avoid
559                    starting execution if there is a pending interrupt. */
560                 env->current_tb = tb;
561                 barrier();
562                 if (likely(!env->exit_request)) {
563                     tc_ptr = tb->tc_ptr;
564                 /* execute the generated code */
565                     next_tb = tcg_qemu_tb_exec(env, tc_ptr);
566                     if ((next_tb & 3) == 2) {
567                         /* Instruction counter expired.  */
568                         int insns_left;
569                         tb = (TranslationBlock *)(long)(next_tb & ~3);
570                         /* Restore PC.  */
571                         cpu_pc_from_tb(env, tb);
572                         insns_left = env->icount_decr.u32;
573                         if (env->icount_extra && insns_left >= 0) {
574                             /* Refill decrementer and continue execution.  */
575                             env->icount_extra += insns_left;
576                             if (env->icount_extra > 0xffff) {
577                                 insns_left = 0xffff;
578                             } else {
579                                 insns_left = env->icount_extra;
580                             }
581                             env->icount_extra -= insns_left;
582                             env->icount_decr.u16.low = insns_left;
583                         } else {
584                             if (insns_left > 0) {
585                                 /* Execute remaining instructions.  */
586                                 cpu_exec_nocache(env, insns_left, tb);
587                             }
588                             env->exception_index = EXCP_INTERRUPT;
589                             next_tb = 0;
590                             cpu_loop_exit(env);
591                         }
592                     }
593                 }
594                 env->current_tb = NULL;
595                 /* reset soft MMU for next block (it can currently
596                    only be set by a memory fault) */
597             } /* for(;;) */
598         } else {
599             /* Reload env after longjmp - the compiler may have smashed all
600              * local variables as longjmp is marked 'noreturn'. */
601             env = cpu_single_env;
602         }
603     } /* for(;;) */
604
605
606 #if defined(TARGET_I386)
607     /* restore flags in standard format */
608     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
609         | (DF & DF_MASK);
610 #elif defined(TARGET_ARM)
611     /* XXX: Save/restore host fpu exception state?.  */
612 #elif defined(TARGET_UNICORE32)
613 #elif defined(TARGET_SPARC)
614 #elif defined(TARGET_PPC)
615 #elif defined(TARGET_LM32)
616 #elif defined(TARGET_M68K)
617     cpu_m68k_flush_flags(env, env->cc_op);
618     env->cc_op = CC_OP_FLAGS;
619     env->sr = (env->sr & 0xffe0)
620               | env->cc_dest | (env->cc_x << 4);
621 #elif defined(TARGET_MICROBLAZE)
622 #elif defined(TARGET_MIPS)
623 #elif defined(TARGET_SH4)
624 #elif defined(TARGET_ALPHA)
625 #elif defined(TARGET_CRIS)
626 #elif defined(TARGET_S390X)
627 #elif defined(TARGET_XTENSA)
628     /* XXXXX */
629 #else
630 #error unsupported target CPU
631 #endif
632
633     /* fail safe : never use cpu_single_env outside cpu_exec() */
634     cpu_single_env = NULL;
635     return ret;
636 }