]> rtime.felk.cvut.cz Git - lisovros/qemu_apohw.git/blob - cpu-exec.c
cpu-exec: Change cpu_loop_exit() argument to CPUState
[lisovros/qemu_apohw.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25
26 void cpu_loop_exit(CPUState *cpu)
27 {
28     cpu->current_tb = NULL;
29     siglongjmp(cpu->jmp_env, 1);
30 }
31
32 /* exit the current TB from a signal handler. The host registers are
33    restored in a state compatible with the CPU emulator
34  */
35 #if defined(CONFIG_SOFTMMU)
36 void cpu_resume_from_signal(CPUArchState *env, void *puc)
37 {
38     CPUState *cpu = ENV_GET_CPU(env);
39
40     /* XXX: restore cpu registers saved in host registers */
41
42     cpu->exception_index = -1;
43     siglongjmp(cpu->jmp_env, 1);
44 }
45 #endif
46
47 /* Execute a TB, and fix up the CPU state afterwards if necessary */
48 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
49 {
50     CPUArchState *env = cpu->env_ptr;
51     uintptr_t next_tb;
52
53 #if defined(DEBUG_DISAS)
54     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
55 #if defined(TARGET_I386)
56         log_cpu_state(cpu, CPU_DUMP_CCOP);
57 #elif defined(TARGET_M68K)
58         /* ??? Should not modify env state for dumping.  */
59         cpu_m68k_flush_flags(env, env->cc_op);
60         env->cc_op = CC_OP_FLAGS;
61         env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
62         log_cpu_state(cpu, 0);
63 #else
64         log_cpu_state(cpu, 0);
65 #endif
66     }
67 #endif /* DEBUG_DISAS */
68
69     next_tb = tcg_qemu_tb_exec(env, tb_ptr);
70     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
71         /* We didn't start executing this TB (eg because the instruction
72          * counter hit zero); we must restore the guest PC to the address
73          * of the start of the TB.
74          */
75         CPUClass *cc = CPU_GET_CLASS(cpu);
76         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
77         if (cc->synchronize_from_tb) {
78             cc->synchronize_from_tb(cpu, tb);
79         } else {
80             assert(cc->set_pc);
81             cc->set_pc(cpu, tb->pc);
82         }
83     }
84     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
85         /* We were asked to stop executing TBs (probably a pending
86          * interrupt. We've now stopped, so clear the flag.
87          */
88         cpu->tcg_exit_req = 0;
89     }
90     return next_tb;
91 }
92
93 /* Execute the code without caching the generated code. An interpreter
94    could be used if available. */
95 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
96                              TranslationBlock *orig_tb)
97 {
98     CPUState *cpu = ENV_GET_CPU(env);
99     TranslationBlock *tb;
100
101     /* Should never happen.
102        We only end up here when an existing TB is too long.  */
103     if (max_cycles > CF_COUNT_MASK)
104         max_cycles = CF_COUNT_MASK;
105
106     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107                      max_cycles);
108     cpu->current_tb = tb;
109     /* execute the generated code */
110     cpu_tb_exec(cpu, tb->tc_ptr);
111     cpu->current_tb = NULL;
112     tb_phys_invalidate(tb, -1);
113     tb_free(tb);
114 }
115
116 static TranslationBlock *tb_find_slow(CPUArchState *env,
117                                       target_ulong pc,
118                                       target_ulong cs_base,
119                                       uint64_t flags)
120 {
121     CPUState *cpu = ENV_GET_CPU(env);
122     TranslationBlock *tb, **ptb1;
123     unsigned int h;
124     tb_page_addr_t phys_pc, phys_page1;
125     target_ulong virt_page2;
126
127     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
128
129     /* find translated block using physical mappings */
130     phys_pc = get_page_addr_code(env, pc);
131     phys_page1 = phys_pc & TARGET_PAGE_MASK;
132     h = tb_phys_hash_func(phys_pc);
133     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
134     for(;;) {
135         tb = *ptb1;
136         if (!tb)
137             goto not_found;
138         if (tb->pc == pc &&
139             tb->page_addr[0] == phys_page1 &&
140             tb->cs_base == cs_base &&
141             tb->flags == flags) {
142             /* check next page if needed */
143             if (tb->page_addr[1] != -1) {
144                 tb_page_addr_t phys_page2;
145
146                 virt_page2 = (pc & TARGET_PAGE_MASK) +
147                     TARGET_PAGE_SIZE;
148                 phys_page2 = get_page_addr_code(env, virt_page2);
149                 if (tb->page_addr[1] == phys_page2)
150                     goto found;
151             } else {
152                 goto found;
153             }
154         }
155         ptb1 = &tb->phys_hash_next;
156     }
157  not_found:
158    /* if no translated code available, then translate it now */
159     tb = tb_gen_code(env, pc, cs_base, flags, 0);
160
161  found:
162     /* Move the last found TB to the head of the list */
163     if (likely(*ptb1)) {
164         *ptb1 = tb->phys_hash_next;
165         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
166         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
167     }
168     /* we add the TB in the virtual pc hash table */
169     cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
170     return tb;
171 }
172
173 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
174 {
175     CPUState *cpu = ENV_GET_CPU(env);
176     TranslationBlock *tb;
177     target_ulong cs_base, pc;
178     int flags;
179
180     /* we record a subset of the CPU state. It will
181        always be the same before a given translated block
182        is executed. */
183     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
184     tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
185     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
186                  tb->flags != flags)) {
187         tb = tb_find_slow(env, pc, cs_base, flags);
188     }
189     return tb;
190 }
191
192 static CPUDebugExcpHandler *debug_excp_handler;
193
194 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 {
196     debug_excp_handler = handler;
197 }
198
199 static void cpu_handle_debug_exception(CPUArchState *env)
200 {
201     CPUState *cpu = ENV_GET_CPU(env);
202     CPUWatchpoint *wp;
203
204     if (!cpu->watchpoint_hit) {
205         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
206             wp->flags &= ~BP_WATCHPOINT_HIT;
207         }
208     }
209     if (debug_excp_handler) {
210         debug_excp_handler(env);
211     }
212 }
213
214 /* main execution loop */
215
216 volatile sig_atomic_t exit_request;
217
218 int cpu_exec(CPUArchState *env)
219 {
220     CPUState *cpu = ENV_GET_CPU(env);
221 #if !(defined(CONFIG_USER_ONLY) && \
222       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
223     CPUClass *cc = CPU_GET_CLASS(cpu);
224 #endif
225 #ifdef TARGET_I386
226     X86CPU *x86_cpu = X86_CPU(cpu);
227 #endif
228     int ret, interrupt_request;
229     TranslationBlock *tb;
230     uint8_t *tc_ptr;
231     uintptr_t next_tb;
232
233     if (cpu->halted) {
234         if (!cpu_has_work(cpu)) {
235             return EXCP_HALTED;
236         }
237
238         cpu->halted = 0;
239     }
240
241     current_cpu = cpu;
242
243     /* As long as current_cpu is null, up to the assignment just above,
244      * requests by other threads to exit the execution loop are expected to
245      * be issued using the exit_request global. We must make sure that our
246      * evaluation of the global value is performed past the current_cpu
247      * value transition point, which requires a memory barrier as well as
248      * an instruction scheduling constraint on modern architectures.  */
249     smp_mb();
250
251     if (unlikely(exit_request)) {
252         cpu->exit_request = 1;
253     }
254
255 #if defined(TARGET_I386)
256     /* put eflags in CPU temporary format */
257     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
258     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
259     CC_OP = CC_OP_EFLAGS;
260     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
261 #elif defined(TARGET_SPARC)
262 #elif defined(TARGET_M68K)
263     env->cc_op = CC_OP_FLAGS;
264     env->cc_dest = env->sr & 0xf;
265     env->cc_x = (env->sr >> 4) & 1;
266 #elif defined(TARGET_ALPHA)
267 #elif defined(TARGET_ARM)
268 #elif defined(TARGET_UNICORE32)
269 #elif defined(TARGET_PPC)
270     env->reserve_addr = -1;
271 #elif defined(TARGET_LM32)
272 #elif defined(TARGET_MICROBLAZE)
273 #elif defined(TARGET_MIPS)
274 #elif defined(TARGET_MOXIE)
275 #elif defined(TARGET_OPENRISC)
276 #elif defined(TARGET_SH4)
277 #elif defined(TARGET_CRIS)
278 #elif defined(TARGET_S390X)
279 #elif defined(TARGET_XTENSA)
280     /* XXXXX */
281 #else
282 #error unsupported target CPU
283 #endif
284     cpu->exception_index = -1;
285
286     /* prepare setjmp context for exception handling */
287     for(;;) {
288         if (sigsetjmp(cpu->jmp_env, 0) == 0) {
289             /* if an exception is pending, we execute it here */
290             if (cpu->exception_index >= 0) {
291                 if (cpu->exception_index >= EXCP_INTERRUPT) {
292                     /* exit request from the cpu execution loop */
293                     ret = cpu->exception_index;
294                     if (ret == EXCP_DEBUG) {
295                         cpu_handle_debug_exception(env);
296                     }
297                     break;
298                 } else {
299 #if defined(CONFIG_USER_ONLY)
300                     /* if user mode only, we simulate a fake exception
301                        which will be handled outside the cpu execution
302                        loop */
303 #if defined(TARGET_I386)
304                     cc->do_interrupt(cpu);
305 #endif
306                     ret = cpu->exception_index;
307                     break;
308 #else
309                     cc->do_interrupt(cpu);
310                     cpu->exception_index = -1;
311 #endif
312                 }
313             }
314
315             next_tb = 0; /* force lookup of first TB */
316             for(;;) {
317                 interrupt_request = cpu->interrupt_request;
318                 if (unlikely(interrupt_request)) {
319                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
320                         /* Mask out external interrupts for this step. */
321                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
322                     }
323                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
324                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
325                         cpu->exception_index = EXCP_DEBUG;
326                         cpu_loop_exit(cpu);
327                     }
328 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
329     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
330     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
331                     if (interrupt_request & CPU_INTERRUPT_HALT) {
332                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
333                         cpu->halted = 1;
334                         cpu->exception_index = EXCP_HLT;
335                         cpu_loop_exit(cpu);
336                     }
337 #endif
338 #if defined(TARGET_I386)
339 #if !defined(CONFIG_USER_ONLY)
340                     if (interrupt_request & CPU_INTERRUPT_POLL) {
341                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
342                         apic_poll_irq(x86_cpu->apic_state);
343                     }
344 #endif
345                     if (interrupt_request & CPU_INTERRUPT_INIT) {
346                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
347                                                           0);
348                             do_cpu_init(x86_cpu);
349                             cpu->exception_index = EXCP_HALTED;
350                             cpu_loop_exit(cpu);
351                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
352                             do_cpu_sipi(x86_cpu);
353                     } else if (env->hflags2 & HF2_GIF_MASK) {
354                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
355                             !(env->hflags & HF_SMM_MASK)) {
356                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
357                                                           0);
358                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
359                             do_smm_enter(x86_cpu);
360                             next_tb = 0;
361                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
362                                    !(env->hflags2 & HF2_NMI_MASK)) {
363                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
364                             env->hflags2 |= HF2_NMI_MASK;
365                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
366                             next_tb = 0;
367                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
368                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
369                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
370                             next_tb = 0;
371                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
372                                    (((env->hflags2 & HF2_VINTR_MASK) && 
373                                      (env->hflags2 & HF2_HIF_MASK)) ||
374                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
375                                      (env->eflags & IF_MASK && 
376                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
377                             int intno;
378                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
379                                                           0);
380                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
381                                                         CPU_INTERRUPT_VIRQ);
382                             intno = cpu_get_pic_interrupt(env);
383                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
384                             do_interrupt_x86_hardirq(env, intno, 1);
385                             /* ensure that no TB jump will be modified as
386                                the program flow was changed */
387                             next_tb = 0;
388 #if !defined(CONFIG_USER_ONLY)
389                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
390                                    (env->eflags & IF_MASK) && 
391                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
392                             int intno;
393                             /* FIXME: this should respect TPR */
394                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
395                                                           0);
396                             intno = ldl_phys(cpu->as,
397                                              env->vm_vmcb
398                                              + offsetof(struct vmcb,
399                                                         control.int_vector));
400                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
401                             do_interrupt_x86_hardirq(env, intno, 1);
402                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
403                             next_tb = 0;
404 #endif
405                         }
406                     }
407 #elif defined(TARGET_PPC)
408                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
409                         cpu_reset(cpu);
410                     }
411                     if (interrupt_request & CPU_INTERRUPT_HARD) {
412                         ppc_hw_interrupt(env);
413                         if (env->pending_interrupts == 0) {
414                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
415                         }
416                         next_tb = 0;
417                     }
418 #elif defined(TARGET_LM32)
419                     if ((interrupt_request & CPU_INTERRUPT_HARD)
420                         && (env->ie & IE_IE)) {
421                         cpu->exception_index = EXCP_IRQ;
422                         cc->do_interrupt(cpu);
423                         next_tb = 0;
424                     }
425 #elif defined(TARGET_MICROBLAZE)
426                     if ((interrupt_request & CPU_INTERRUPT_HARD)
427                         && (env->sregs[SR_MSR] & MSR_IE)
428                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
429                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
430                         cpu->exception_index = EXCP_IRQ;
431                         cc->do_interrupt(cpu);
432                         next_tb = 0;
433                     }
434 #elif defined(TARGET_MIPS)
435                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
436                         cpu_mips_hw_interrupts_pending(env)) {
437                         /* Raise it */
438                         cpu->exception_index = EXCP_EXT_INTERRUPT;
439                         env->error_code = 0;
440                         cc->do_interrupt(cpu);
441                         next_tb = 0;
442                     }
443 #elif defined(TARGET_OPENRISC)
444                     {
445                         int idx = -1;
446                         if ((interrupt_request & CPU_INTERRUPT_HARD)
447                             && (env->sr & SR_IEE)) {
448                             idx = EXCP_INT;
449                         }
450                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
451                             && (env->sr & SR_TEE)) {
452                             idx = EXCP_TICK;
453                         }
454                         if (idx >= 0) {
455                             cpu->exception_index = idx;
456                             cc->do_interrupt(cpu);
457                             next_tb = 0;
458                         }
459                     }
460 #elif defined(TARGET_SPARC)
461                     if (interrupt_request & CPU_INTERRUPT_HARD) {
462                         if (cpu_interrupts_enabled(env) &&
463                             env->interrupt_index > 0) {
464                             int pil = env->interrupt_index & 0xf;
465                             int type = env->interrupt_index & 0xf0;
466
467                             if (((type == TT_EXTINT) &&
468                                   cpu_pil_allowed(env, pil)) ||
469                                   type != TT_EXTINT) {
470                                 cpu->exception_index = env->interrupt_index;
471                                 cc->do_interrupt(cpu);
472                                 next_tb = 0;
473                             }
474                         }
475                     }
476 #elif defined(TARGET_ARM)
477                     if (interrupt_request & CPU_INTERRUPT_FIQ
478                         && !(env->daif & PSTATE_F)) {
479                         cpu->exception_index = EXCP_FIQ;
480                         cc->do_interrupt(cpu);
481                         next_tb = 0;
482                     }
483                     /* ARMv7-M interrupt return works by loading a magic value
484                        into the PC.  On real hardware the load causes the
485                        return to occur.  The qemu implementation performs the
486                        jump normally, then does the exception return when the
487                        CPU tries to execute code at the magic address.
488                        This will cause the magic PC value to be pushed to
489                        the stack if an interrupt occurred at the wrong time.
490                        We avoid this by disabling interrupts when
491                        pc contains a magic address.  */
492                     if (interrupt_request & CPU_INTERRUPT_HARD
493                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
494                             || !(env->daif & PSTATE_I))) {
495                         cpu->exception_index = EXCP_IRQ;
496                         cc->do_interrupt(cpu);
497                         next_tb = 0;
498                     }
499 #elif defined(TARGET_UNICORE32)
500                     if (interrupt_request & CPU_INTERRUPT_HARD
501                         && !(env->uncached_asr & ASR_I)) {
502                         cpu->exception_index = UC32_EXCP_INTR;
503                         cc->do_interrupt(cpu);
504                         next_tb = 0;
505                     }
506 #elif defined(TARGET_SH4)
507                     if (interrupt_request & CPU_INTERRUPT_HARD) {
508                         cc->do_interrupt(cpu);
509                         next_tb = 0;
510                     }
511 #elif defined(TARGET_ALPHA)
512                     {
513                         int idx = -1;
514                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
515                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
516                         case 0 ... 3:
517                             if (interrupt_request & CPU_INTERRUPT_HARD) {
518                                 idx = EXCP_DEV_INTERRUPT;
519                             }
520                             /* FALLTHRU */
521                         case 4:
522                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
523                                 idx = EXCP_CLK_INTERRUPT;
524                             }
525                             /* FALLTHRU */
526                         case 5:
527                             if (interrupt_request & CPU_INTERRUPT_SMP) {
528                                 idx = EXCP_SMP_INTERRUPT;
529                             }
530                             /* FALLTHRU */
531                         case 6:
532                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
533                                 idx = EXCP_MCHK;
534                             }
535                         }
536                         if (idx >= 0) {
537                             cpu->exception_index = idx;
538                             env->error_code = 0;
539                             cc->do_interrupt(cpu);
540                             next_tb = 0;
541                         }
542                     }
543 #elif defined(TARGET_CRIS)
544                     if (interrupt_request & CPU_INTERRUPT_HARD
545                         && (env->pregs[PR_CCS] & I_FLAG)
546                         && !env->locked_irq) {
547                         cpu->exception_index = EXCP_IRQ;
548                         cc->do_interrupt(cpu);
549                         next_tb = 0;
550                     }
551                     if (interrupt_request & CPU_INTERRUPT_NMI) {
552                         unsigned int m_flag_archval;
553                         if (env->pregs[PR_VR] < 32) {
554                             m_flag_archval = M_FLAG_V10;
555                         } else {
556                             m_flag_archval = M_FLAG_V32;
557                         }
558                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
559                             cpu->exception_index = EXCP_NMI;
560                             cc->do_interrupt(cpu);
561                             next_tb = 0;
562                         }
563                     }
564 #elif defined(TARGET_M68K)
565                     if (interrupt_request & CPU_INTERRUPT_HARD
566                         && ((env->sr & SR_I) >> SR_I_SHIFT)
567                             < env->pending_level) {
568                         /* Real hardware gets the interrupt vector via an
569                            IACK cycle at this point.  Current emulated
570                            hardware doesn't rely on this, so we
571                            provide/save the vector when the interrupt is
572                            first signalled.  */
573                         cpu->exception_index = env->pending_vector;
574                         do_interrupt_m68k_hardirq(env);
575                         next_tb = 0;
576                     }
577 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
578                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
579                         (env->psw.mask & PSW_MASK_EXT)) {
580                         cc->do_interrupt(cpu);
581                         next_tb = 0;
582                     }
583 #elif defined(TARGET_XTENSA)
584                     if (interrupt_request & CPU_INTERRUPT_HARD) {
585                         cpu->exception_index = EXC_IRQ;
586                         cc->do_interrupt(cpu);
587                         next_tb = 0;
588                     }
589 #endif
590                    /* Don't use the cached interrupt_request value,
591                       do_interrupt may have updated the EXITTB flag. */
592                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
593                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
594                         /* ensure that no TB jump will be modified as
595                            the program flow was changed */
596                         next_tb = 0;
597                     }
598                 }
599                 if (unlikely(cpu->exit_request)) {
600                     cpu->exit_request = 0;
601                     cpu->exception_index = EXCP_INTERRUPT;
602                     cpu_loop_exit(cpu);
603                 }
604                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
605                 tb = tb_find_fast(env);
606                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
607                    doing it in tb_find_slow */
608                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
609                     /* as some TB could have been invalidated because
610                        of memory exceptions while generating the code, we
611                        must recompute the hash index here */
612                     next_tb = 0;
613                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
614                 }
615                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
616                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
617                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
618                 }
619                 /* see if we can patch the calling TB. When the TB
620                    spans two pages, we cannot safely do a direct
621                    jump. */
622                 if (next_tb != 0 && tb->page_addr[1] == -1) {
623                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
624                                 next_tb & TB_EXIT_MASK, tb);
625                 }
626                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
627
628                 /* cpu_interrupt might be called while translating the
629                    TB, but before it is linked into a potentially
630                    infinite loop and becomes env->current_tb. Avoid
631                    starting execution if there is a pending interrupt. */
632                 cpu->current_tb = tb;
633                 barrier();
634                 if (likely(!cpu->exit_request)) {
635                     tc_ptr = tb->tc_ptr;
636                     /* execute the generated code */
637                     next_tb = cpu_tb_exec(cpu, tc_ptr);
638                     switch (next_tb & TB_EXIT_MASK) {
639                     case TB_EXIT_REQUESTED:
640                         /* Something asked us to stop executing
641                          * chained TBs; just continue round the main
642                          * loop. Whatever requested the exit will also
643                          * have set something else (eg exit_request or
644                          * interrupt_request) which we will handle
645                          * next time around the loop.
646                          */
647                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
648                         next_tb = 0;
649                         break;
650                     case TB_EXIT_ICOUNT_EXPIRED:
651                     {
652                         /* Instruction counter expired.  */
653                         int insns_left;
654                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
655                         insns_left = cpu->icount_decr.u32;
656                         if (cpu->icount_extra && insns_left >= 0) {
657                             /* Refill decrementer and continue execution.  */
658                             cpu->icount_extra += insns_left;
659                             if (cpu->icount_extra > 0xffff) {
660                                 insns_left = 0xffff;
661                             } else {
662                                 insns_left = cpu->icount_extra;
663                             }
664                             cpu->icount_extra -= insns_left;
665                             cpu->icount_decr.u16.low = insns_left;
666                         } else {
667                             if (insns_left > 0) {
668                                 /* Execute remaining instructions.  */
669                                 cpu_exec_nocache(env, insns_left, tb);
670                             }
671                             cpu->exception_index = EXCP_INTERRUPT;
672                             next_tb = 0;
673                             cpu_loop_exit(cpu);
674                         }
675                         break;
676                     }
677                     default:
678                         break;
679                     }
680                 }
681                 cpu->current_tb = NULL;
682                 /* reset soft MMU for next block (it can currently
683                    only be set by a memory fault) */
684             } /* for(;;) */
685         } else {
686             /* Reload env after longjmp - the compiler may have smashed all
687              * local variables as longjmp is marked 'noreturn'. */
688             cpu = current_cpu;
689             env = cpu->env_ptr;
690 #if !(defined(CONFIG_USER_ONLY) && \
691       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
692             cc = CPU_GET_CLASS(cpu);
693 #endif
694 #ifdef TARGET_I386
695             x86_cpu = X86_CPU(cpu);
696 #endif
697         }
698     } /* for(;;) */
699
700
701 #if defined(TARGET_I386)
702     /* restore flags in standard format */
703     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
704         | (env->df & DF_MASK);
705 #elif defined(TARGET_ARM)
706     /* XXX: Save/restore host fpu exception state?.  */
707 #elif defined(TARGET_UNICORE32)
708 #elif defined(TARGET_SPARC)
709 #elif defined(TARGET_PPC)
710 #elif defined(TARGET_LM32)
711 #elif defined(TARGET_M68K)
712     cpu_m68k_flush_flags(env, env->cc_op);
713     env->cc_op = CC_OP_FLAGS;
714     env->sr = (env->sr & 0xffe0)
715               | env->cc_dest | (env->cc_x << 4);
716 #elif defined(TARGET_MICROBLAZE)
717 #elif defined(TARGET_MIPS)
718 #elif defined(TARGET_MOXIE)
719 #elif defined(TARGET_OPENRISC)
720 #elif defined(TARGET_SH4)
721 #elif defined(TARGET_ALPHA)
722 #elif defined(TARGET_CRIS)
723 #elif defined(TARGET_S390X)
724 #elif defined(TARGET_XTENSA)
725     /* XXXXX */
726 #else
727 #error unsupported target CPU
728 #endif
729
730     /* fail safe : never use current_cpu outside cpu_exec() */
731     current_cpu = NULL;
732     return ret;
733 }