]> rtime.felk.cvut.cz Git - lisovros/qemu_apohw.git/blob - cpu-exec.c
cpu-exec: remove unnecessary assignment
[lisovros/qemu_apohw.git] / cpu-exec.c
1 /*
2  *  i386 emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
24
25 int tb_invalidated_flag;
26
27 //#define CONFIG_DEBUG_EXEC
28
29 bool qemu_cpu_has_work(CPUState *env)
30 {
31     return cpu_has_work(env);
32 }
33
34 void cpu_loop_exit(CPUState *env)
35 {
36     env->current_tb = NULL;
37     longjmp(env->jmp_env, 1);
38 }
39
40 /* exit the current TB from a signal handler. The host registers are
41    restored in a state compatible with the CPU emulator
42  */
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUState *env, void *puc)
45 {
46     /* XXX: restore cpu registers saved in host registers */
47
48     env->exception_index = -1;
49     longjmp(env->jmp_env, 1);
50 }
51 #endif
52
53 /* Execute the code without caching the generated code. An interpreter
54    could be used if available. */
55 static void cpu_exec_nocache(CPUState *env, int max_cycles,
56                              TranslationBlock *orig_tb)
57 {
58     unsigned long next_tb;
59     TranslationBlock *tb;
60
61     /* Should never happen.
62        We only end up here when an existing TB is too long.  */
63     if (max_cycles > CF_COUNT_MASK)
64         max_cycles = CF_COUNT_MASK;
65
66     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
67                      max_cycles);
68     env->current_tb = tb;
69     /* execute the generated code */
70     next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
71     env->current_tb = NULL;
72
73     if ((next_tb & 3) == 2) {
74         /* Restore PC.  This may happen if async event occurs before
75            the TB starts executing.  */
76         cpu_pc_from_tb(env, tb);
77     }
78     tb_phys_invalidate(tb, -1);
79     tb_free(tb);
80 }
81
82 static TranslationBlock *tb_find_slow(CPUState *env,
83                                       target_ulong pc,
84                                       target_ulong cs_base,
85                                       uint64_t flags)
86 {
87     TranslationBlock *tb, **ptb1;
88     unsigned int h;
89     tb_page_addr_t phys_pc, phys_page1;
90     target_ulong virt_page2;
91
92     tb_invalidated_flag = 0;
93
94     /* find translated block using physical mappings */
95     phys_pc = get_page_addr_code(env, pc);
96     phys_page1 = phys_pc & TARGET_PAGE_MASK;
97     h = tb_phys_hash_func(phys_pc);
98     ptb1 = &tb_phys_hash[h];
99     for(;;) {
100         tb = *ptb1;
101         if (!tb)
102             goto not_found;
103         if (tb->pc == pc &&
104             tb->page_addr[0] == phys_page1 &&
105             tb->cs_base == cs_base &&
106             tb->flags == flags) {
107             /* check next page if needed */
108             if (tb->page_addr[1] != -1) {
109                 tb_page_addr_t phys_page2;
110
111                 virt_page2 = (pc & TARGET_PAGE_MASK) +
112                     TARGET_PAGE_SIZE;
113                 phys_page2 = get_page_addr_code(env, virt_page2);
114                 if (tb->page_addr[1] == phys_page2)
115                     goto found;
116             } else {
117                 goto found;
118             }
119         }
120         ptb1 = &tb->phys_hash_next;
121     }
122  not_found:
123    /* if no translated code available, then translate it now */
124     tb = tb_gen_code(env, pc, cs_base, flags, 0);
125
126  found:
127     /* Move the last found TB to the head of the list */
128     if (likely(*ptb1)) {
129         *ptb1 = tb->phys_hash_next;
130         tb->phys_hash_next = tb_phys_hash[h];
131         tb_phys_hash[h] = tb;
132     }
133     /* we add the TB in the virtual pc hash table */
134     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
135     return tb;
136 }
137
138 static inline TranslationBlock *tb_find_fast(CPUState *env)
139 {
140     TranslationBlock *tb;
141     target_ulong cs_base, pc;
142     int flags;
143
144     /* we record a subset of the CPU state. It will
145        always be the same before a given translated block
146        is executed. */
147     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
148     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
149     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
150                  tb->flags != flags)) {
151         tb = tb_find_slow(env, pc, cs_base, flags);
152     }
153     return tb;
154 }
155
156 static CPUDebugExcpHandler *debug_excp_handler;
157
158 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
159 {
160     CPUDebugExcpHandler *old_handler = debug_excp_handler;
161
162     debug_excp_handler = handler;
163     return old_handler;
164 }
165
166 static void cpu_handle_debug_exception(CPUState *env)
167 {
168     CPUWatchpoint *wp;
169
170     if (!env->watchpoint_hit) {
171         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
172             wp->flags &= ~BP_WATCHPOINT_HIT;
173         }
174     }
175     if (debug_excp_handler) {
176         debug_excp_handler(env);
177     }
178 }
179
180 /* main execution loop */
181
182 volatile sig_atomic_t exit_request;
183
184 int cpu_exec(CPUState *env)
185 {
186     int ret, interrupt_request;
187     TranslationBlock *tb;
188     uint8_t *tc_ptr;
189     unsigned long next_tb;
190
191     if (env->halted) {
192         if (!cpu_has_work(env)) {
193             return EXCP_HALTED;
194         }
195
196         env->halted = 0;
197     }
198
199     cpu_single_env = env;
200
201     if (unlikely(exit_request)) {
202         env->exit_request = 1;
203     }
204
205 #if defined(TARGET_I386)
206     /* put eflags in CPU temporary format */
207     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208     DF = 1 - (2 * ((env->eflags >> 10) & 1));
209     CC_OP = CC_OP_EFLAGS;
210     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
211 #elif defined(TARGET_SPARC)
212 #elif defined(TARGET_M68K)
213     env->cc_op = CC_OP_FLAGS;
214     env->cc_dest = env->sr & 0xf;
215     env->cc_x = (env->sr >> 4) & 1;
216 #elif defined(TARGET_ALPHA)
217 #elif defined(TARGET_ARM)
218 #elif defined(TARGET_UNICORE32)
219 #elif defined(TARGET_PPC)
220 #elif defined(TARGET_LM32)
221 #elif defined(TARGET_MICROBLAZE)
222 #elif defined(TARGET_MIPS)
223 #elif defined(TARGET_SH4)
224 #elif defined(TARGET_CRIS)
225 #elif defined(TARGET_S390X)
226     /* XXXXX */
227 #else
228 #error unsupported target CPU
229 #endif
230     env->exception_index = -1;
231
232     /* prepare setjmp context for exception handling */
233     for(;;) {
234         if (setjmp(env->jmp_env) == 0) {
235             /* if an exception is pending, we execute it here */
236             if (env->exception_index >= 0) {
237                 if (env->exception_index >= EXCP_INTERRUPT) {
238                     /* exit request from the cpu execution loop */
239                     ret = env->exception_index;
240                     if (ret == EXCP_DEBUG) {
241                         cpu_handle_debug_exception(env);
242                     }
243                     break;
244                 } else {
245 #if defined(CONFIG_USER_ONLY)
246                     /* if user mode only, we simulate a fake exception
247                        which will be handled outside the cpu execution
248                        loop */
249 #if defined(TARGET_I386)
250                     do_interrupt(env);
251 #endif
252                     ret = env->exception_index;
253                     break;
254 #else
255                     do_interrupt(env);
256                     env->exception_index = -1;
257 #endif
258                 }
259             }
260
261             next_tb = 0; /* force lookup of first TB */
262             for(;;) {
263                 interrupt_request = env->interrupt_request;
264                 if (unlikely(interrupt_request)) {
265                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
266                         /* Mask out external interrupts for this step. */
267                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
268                     }
269                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
270                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
271                         env->exception_index = EXCP_DEBUG;
272                         cpu_loop_exit(env);
273                     }
274 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
275     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
276     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
277                     if (interrupt_request & CPU_INTERRUPT_HALT) {
278                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
279                         env->halted = 1;
280                         env->exception_index = EXCP_HLT;
281                         cpu_loop_exit(env);
282                     }
283 #endif
284 #if defined(TARGET_I386)
285                     if (interrupt_request & CPU_INTERRUPT_INIT) {
286                             svm_check_intercept(env, SVM_EXIT_INIT);
287                             do_cpu_init(env);
288                             env->exception_index = EXCP_HALTED;
289                             cpu_loop_exit(env);
290                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
291                             do_cpu_sipi(env);
292                     } else if (env->hflags2 & HF2_GIF_MASK) {
293                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
294                             !(env->hflags & HF_SMM_MASK)) {
295                             svm_check_intercept(env, SVM_EXIT_SMI);
296                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
297                             do_smm_enter(env);
298                             next_tb = 0;
299                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
300                                    !(env->hflags2 & HF2_NMI_MASK)) {
301                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
302                             env->hflags2 |= HF2_NMI_MASK;
303                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
304                             next_tb = 0;
305                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
306                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
307                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
308                             next_tb = 0;
309                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
310                                    (((env->hflags2 & HF2_VINTR_MASK) && 
311                                      (env->hflags2 & HF2_HIF_MASK)) ||
312                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
313                                      (env->eflags & IF_MASK && 
314                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
315                             int intno;
316                             svm_check_intercept(env, SVM_EXIT_INTR);
317                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
318                             intno = cpu_get_pic_interrupt(env);
319                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
320                             do_interrupt_x86_hardirq(env, intno, 1);
321                             /* ensure that no TB jump will be modified as
322                                the program flow was changed */
323                             next_tb = 0;
324 #if !defined(CONFIG_USER_ONLY)
325                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
326                                    (env->eflags & IF_MASK) && 
327                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
328                             int intno;
329                             /* FIXME: this should respect TPR */
330                             svm_check_intercept(env, SVM_EXIT_VINTR);
331                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
332                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
333                             do_interrupt_x86_hardirq(env, intno, 1);
334                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
335                             next_tb = 0;
336 #endif
337                         }
338                     }
339 #elif defined(TARGET_PPC)
340 #if 0
341                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
342                         cpu_reset(env);
343                     }
344 #endif
345                     if (interrupt_request & CPU_INTERRUPT_HARD) {
346                         ppc_hw_interrupt(env);
347                         if (env->pending_interrupts == 0)
348                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
349                         next_tb = 0;
350                     }
351 #elif defined(TARGET_LM32)
352                     if ((interrupt_request & CPU_INTERRUPT_HARD)
353                         && (env->ie & IE_IE)) {
354                         env->exception_index = EXCP_IRQ;
355                         do_interrupt(env);
356                         next_tb = 0;
357                     }
358 #elif defined(TARGET_MICROBLAZE)
359                     if ((interrupt_request & CPU_INTERRUPT_HARD)
360                         && (env->sregs[SR_MSR] & MSR_IE)
361                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
362                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
363                         env->exception_index = EXCP_IRQ;
364                         do_interrupt(env);
365                         next_tb = 0;
366                     }
367 #elif defined(TARGET_MIPS)
368                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
369                         cpu_mips_hw_interrupts_pending(env)) {
370                         /* Raise it */
371                         env->exception_index = EXCP_EXT_INTERRUPT;
372                         env->error_code = 0;
373                         do_interrupt(env);
374                         next_tb = 0;
375                     }
376 #elif defined(TARGET_SPARC)
377                     if (interrupt_request & CPU_INTERRUPT_HARD) {
378                         if (cpu_interrupts_enabled(env) &&
379                             env->interrupt_index > 0) {
380                             int pil = env->interrupt_index & 0xf;
381                             int type = env->interrupt_index & 0xf0;
382
383                             if (((type == TT_EXTINT) &&
384                                   cpu_pil_allowed(env, pil)) ||
385                                   type != TT_EXTINT) {
386                                 env->exception_index = env->interrupt_index;
387                                 do_interrupt(env);
388                                 next_tb = 0;
389                             }
390                         }
391                     }
392 #elif defined(TARGET_ARM)
393                     if (interrupt_request & CPU_INTERRUPT_FIQ
394                         && !(env->uncached_cpsr & CPSR_F)) {
395                         env->exception_index = EXCP_FIQ;
396                         do_interrupt(env);
397                         next_tb = 0;
398                     }
399                     /* ARMv7-M interrupt return works by loading a magic value
400                        into the PC.  On real hardware the load causes the
401                        return to occur.  The qemu implementation performs the
402                        jump normally, then does the exception return when the
403                        CPU tries to execute code at the magic address.
404                        This will cause the magic PC value to be pushed to
405                        the stack if an interrupt occurred at the wrong time.
406                        We avoid this by disabling interrupts when
407                        pc contains a magic address.  */
408                     if (interrupt_request & CPU_INTERRUPT_HARD
409                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
410                             || !(env->uncached_cpsr & CPSR_I))) {
411                         env->exception_index = EXCP_IRQ;
412                         do_interrupt(env);
413                         next_tb = 0;
414                     }
415 #elif defined(TARGET_UNICORE32)
416                     if (interrupt_request & CPU_INTERRUPT_HARD
417                         && !(env->uncached_asr & ASR_I)) {
418                         do_interrupt(env);
419                         next_tb = 0;
420                     }
421 #elif defined(TARGET_SH4)
422                     if (interrupt_request & CPU_INTERRUPT_HARD) {
423                         do_interrupt(env);
424                         next_tb = 0;
425                     }
426 #elif defined(TARGET_ALPHA)
427                     {
428                         int idx = -1;
429                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
430                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
431                         case 0 ... 3:
432                             if (interrupt_request & CPU_INTERRUPT_HARD) {
433                                 idx = EXCP_DEV_INTERRUPT;
434                             }
435                             /* FALLTHRU */
436                         case 4:
437                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
438                                 idx = EXCP_CLK_INTERRUPT;
439                             }
440                             /* FALLTHRU */
441                         case 5:
442                             if (interrupt_request & CPU_INTERRUPT_SMP) {
443                                 idx = EXCP_SMP_INTERRUPT;
444                             }
445                             /* FALLTHRU */
446                         case 6:
447                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
448                                 idx = EXCP_MCHK;
449                             }
450                         }
451                         if (idx >= 0) {
452                             env->exception_index = idx;
453                             env->error_code = 0;
454                             do_interrupt(env);
455                             next_tb = 0;
456                         }
457                     }
458 #elif defined(TARGET_CRIS)
459                     if (interrupt_request & CPU_INTERRUPT_HARD
460                         && (env->pregs[PR_CCS] & I_FLAG)
461                         && !env->locked_irq) {
462                         env->exception_index = EXCP_IRQ;
463                         do_interrupt(env);
464                         next_tb = 0;
465                     }
466                     if (interrupt_request & CPU_INTERRUPT_NMI
467                         && (env->pregs[PR_CCS] & M_FLAG)) {
468                         env->exception_index = EXCP_NMI;
469                         do_interrupt(env);
470                         next_tb = 0;
471                     }
472 #elif defined(TARGET_M68K)
473                     if (interrupt_request & CPU_INTERRUPT_HARD
474                         && ((env->sr & SR_I) >> SR_I_SHIFT)
475                             < env->pending_level) {
476                         /* Real hardware gets the interrupt vector via an
477                            IACK cycle at this point.  Current emulated
478                            hardware doesn't rely on this, so we
479                            provide/save the vector when the interrupt is
480                            first signalled.  */
481                         env->exception_index = env->pending_vector;
482                         do_interrupt_m68k_hardirq(env);
483                         next_tb = 0;
484                     }
485 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
486                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
487                         (env->psw.mask & PSW_MASK_EXT)) {
488                         do_interrupt(env);
489                         next_tb = 0;
490                     }
491 #endif
492                    /* Don't use the cached interrupt_request value,
493                       do_interrupt may have updated the EXITTB flag. */
494                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
495                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
496                         /* ensure that no TB jump will be modified as
497                            the program flow was changed */
498                         next_tb = 0;
499                     }
500                 }
501                 if (unlikely(env->exit_request)) {
502                     env->exit_request = 0;
503                     env->exception_index = EXCP_INTERRUPT;
504                     cpu_loop_exit(env);
505                 }
506 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
507                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
508                     /* restore flags in standard format */
509 #if defined(TARGET_I386)
510                     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
511                         | (DF & DF_MASK);
512                     log_cpu_state(env, X86_DUMP_CCOP);
513                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
514 #elif defined(TARGET_M68K)
515                     cpu_m68k_flush_flags(env, env->cc_op);
516                     env->cc_op = CC_OP_FLAGS;
517                     env->sr = (env->sr & 0xffe0)
518                               | env->cc_dest | (env->cc_x << 4);
519                     log_cpu_state(env, 0);
520 #else
521                     log_cpu_state(env, 0);
522 #endif
523                 }
524 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
525                 spin_lock(&tb_lock);
526                 tb = tb_find_fast(env);
527                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
528                    doing it in tb_find_slow */
529                 if (tb_invalidated_flag) {
530                     /* as some TB could have been invalidated because
531                        of memory exceptions while generating the code, we
532                        must recompute the hash index here */
533                     next_tb = 0;
534                     tb_invalidated_flag = 0;
535                 }
536 #ifdef CONFIG_DEBUG_EXEC
537                 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
538                              (long)tb->tc_ptr, tb->pc,
539                              lookup_symbol(tb->pc));
540 #endif
541                 /* see if we can patch the calling TB. When the TB
542                    spans two pages, we cannot safely do a direct
543                    jump. */
544                 if (next_tb != 0 && tb->page_addr[1] == -1) {
545                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
546                 }
547                 spin_unlock(&tb_lock);
548
549                 /* cpu_interrupt might be called while translating the
550                    TB, but before it is linked into a potentially
551                    infinite loop and becomes env->current_tb. Avoid
552                    starting execution if there is a pending interrupt. */
553                 env->current_tb = tb;
554                 barrier();
555                 if (likely(!env->exit_request)) {
556                     tc_ptr = tb->tc_ptr;
557                 /* execute the generated code */
558                     next_tb = tcg_qemu_tb_exec(env, tc_ptr);
559                     if ((next_tb & 3) == 2) {
560                         /* Instruction counter expired.  */
561                         int insns_left;
562                         tb = (TranslationBlock *)(long)(next_tb & ~3);
563                         /* Restore PC.  */
564                         cpu_pc_from_tb(env, tb);
565                         insns_left = env->icount_decr.u32;
566                         if (env->icount_extra && insns_left >= 0) {
567                             /* Refill decrementer and continue execution.  */
568                             env->icount_extra += insns_left;
569                             if (env->icount_extra > 0xffff) {
570                                 insns_left = 0xffff;
571                             } else {
572                                 insns_left = env->icount_extra;
573                             }
574                             env->icount_extra -= insns_left;
575                             env->icount_decr.u16.low = insns_left;
576                         } else {
577                             if (insns_left > 0) {
578                                 /* Execute remaining instructions.  */
579                                 cpu_exec_nocache(env, insns_left, tb);
580                             }
581                             env->exception_index = EXCP_INTERRUPT;
582                             next_tb = 0;
583                             cpu_loop_exit(env);
584                         }
585                     }
586                 }
587                 env->current_tb = NULL;
588                 /* reset soft MMU for next block (it can currently
589                    only be set by a memory fault) */
590             } /* for(;;) */
591         } else {
592             /* Reload env after longjmp - the compiler may have smashed all
593              * local variables as longjmp is marked 'noreturn'. */
594             env = cpu_single_env;
595         }
596     } /* for(;;) */
597
598
599 #if defined(TARGET_I386)
600     /* restore flags in standard format */
601     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
602         | (DF & DF_MASK);
603 #elif defined(TARGET_ARM)
604     /* XXX: Save/restore host fpu exception state?.  */
605 #elif defined(TARGET_UNICORE32)
606 #elif defined(TARGET_SPARC)
607 #elif defined(TARGET_PPC)
608 #elif defined(TARGET_LM32)
609 #elif defined(TARGET_M68K)
610     cpu_m68k_flush_flags(env, env->cc_op);
611     env->cc_op = CC_OP_FLAGS;
612     env->sr = (env->sr & 0xffe0)
613               | env->cc_dest | (env->cc_x << 4);
614 #elif defined(TARGET_MICROBLAZE)
615 #elif defined(TARGET_MIPS)
616 #elif defined(TARGET_SH4)
617 #elif defined(TARGET_ALPHA)
618 #elif defined(TARGET_CRIS)
619 #elif defined(TARGET_S390X)
620     /* XXXXX */
621 #else
622 #error unsupported target CPU
623 #endif
624
625     /* fail safe : never use cpu_single_env outside cpu_exec() */
626     cpu_single_env = NULL;
627     return ret;
628 }