cpu-exec.c 29.3 KB
Newer Older
bellard's avatar
bellard committed
1
/*
2
 *  emulator main execution loop
3
 *
bellard's avatar
bellard committed
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
bellard's avatar
bellard committed
5
 *
bellard's avatar
bellard committed
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
bellard's avatar
bellard committed
10
 *
bellard's avatar
bellard committed
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
bellard's avatar
bellard committed
15
 *
bellard's avatar
bellard committed
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard's avatar
bellard committed
18
 */
19
#include "config.h"
Blue Swirl's avatar
Blue Swirl committed
20
#include "cpu.h"
21
#include "disas/disas.h"
22
#include "tcg.h"
23
#include "qemu/atomic.h"
24
#include "sysemu/qtest.h"
bellard's avatar
bellard committed
25

26
bool qemu_cpu_has_work(CPUState *cpu)
27
{
28
    return cpu_has_work(cpu);
29 30
}

31
void cpu_loop_exit(CPUArchState *env)
32
{
33 34 35
    CPUState *cpu = ENV_GET_CPU(env);

    cpu->current_tb = NULL;
36
    siglongjmp(env->jmp_env, 1);
37
}
38

39 40 41
/* exit the current TB from a signal handler. The host registers are
   restored in a state compatible with the CPU emulator
 */
42
#if defined(CONFIG_SOFTMMU)
43
void cpu_resume_from_signal(CPUArchState *env, void *puc)
44 45 46 47
{
    /* XXX: restore cpu registers saved in host registers */

    env->exception_index = -1;
48
    siglongjmp(env->jmp_env, 1);
49 50
}
#endif
51

52 53 54 55
/* Execute a TB, and fix up the CPU state afterwards if necessary */
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
    CPUArchState *env = cpu->env_ptr;
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
    uintptr_t next_tb;

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
75 76 77 78 79
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
80
        CPUClass *cc = CPU_GET_CLASS(cpu);
81
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
82 83 84 85 86 87
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
88
    }
89 90 91 92 93 94
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
95 96 97
    return next_tb;
}

pbrook's avatar
pbrook committed
98 99
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
100
static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirl's avatar
Blue Swirl committed
101
                             TranslationBlock *orig_tb)
pbrook's avatar
pbrook committed
102
{
103
    CPUState *cpu = ENV_GET_CPU(env);
pbrook's avatar
pbrook committed
104 105 106 107 108 109 110 111 112
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
                     max_cycles);
113
    cpu->current_tb = tb;
pbrook's avatar
pbrook committed
114
    /* execute the generated code */
115
    cpu_tb_exec(cpu, tb->tc_ptr);
116
    cpu->current_tb = NULL;
pbrook's avatar
pbrook committed
117 118 119 120
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

121
static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirl's avatar
Blue Swirl committed
122
                                      target_ulong pc,
123
                                      target_ulong cs_base,
124
                                      uint64_t flags)
125 126 127
{
    TranslationBlock *tb, **ptb1;
    unsigned int h;
128
    tb_page_addr_t phys_pc, phys_page1;
Paul Brook's avatar
Paul Brook committed
129
    target_ulong virt_page2;
130

131
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
132

133
    /* find translated block using physical mappings */
Paul Brook's avatar
Paul Brook committed
134
    phys_pc = get_page_addr_code(env, pc);
135 136
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
137
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
138 139 140 141
    for(;;) {
        tb = *ptb1;
        if (!tb)
            goto not_found;
142
        if (tb->pc == pc &&
143
            tb->page_addr[0] == phys_page1 &&
144
            tb->cs_base == cs_base &&
145 146 147
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
148 149
                tb_page_addr_t phys_page2;

150
                virt_page2 = (pc & TARGET_PAGE_MASK) +
151
                    TARGET_PAGE_SIZE;
Paul Brook's avatar
Paul Brook committed
152
                phys_page2 = get_page_addr_code(env, virt_page2);
153 154 155 156 157 158 159 160 161
                if (tb->page_addr[1] == phys_page2)
                    goto found;
            } else {
                goto found;
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
 not_found:
pbrook's avatar
pbrook committed
162 163
   /* if no translated code available, then translate it now */
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
164

165
 found:
166 167 168
    /* Move the last found TB to the head of the list */
    if (likely(*ptb1)) {
        *ptb1 = tb->phys_hash_next;
169 170
        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
171
    }
172 173 174 175 176
    /* we add the TB in the virtual pc hash table */
    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
    return tb;
}

177
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
178 179 180
{
    TranslationBlock *tb;
    target_ulong cs_base, pc;
181
    int flags;
182 183 184 185

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
186
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
187
    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
188 189
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
Blue Swirl's avatar
Blue Swirl committed
190
        tb = tb_find_slow(env, pc, cs_base, flags);
191 192 193 194
    }
    return tb;
}

195 196
static CPUDebugExcpHandler *debug_excp_handler;

197
void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
198 199 200 201
{
    debug_excp_handler = handler;
}

202
static void cpu_handle_debug_exception(CPUArchState *env)
203 204 205 206 207 208 209 210 211 212 213 214 215
{
    CPUWatchpoint *wp;

    if (!env->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
    if (debug_excp_handler) {
        debug_excp_handler(env);
    }
}

bellard's avatar
bellard committed
216 217
/* main execution loop */

218 219
volatile sig_atomic_t exit_request;

220
int cpu_exec(CPUArchState *env)
bellard's avatar
bellard committed
221
{
222
    CPUState *cpu = ENV_GET_CPU(env);
223 224 225
#if !(defined(CONFIG_USER_ONLY) && \
      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
    CPUClass *cc = CPU_GET_CLASS(cpu);
226 227 228
#endif
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
229
#endif
230 231
    int ret, interrupt_request;
    TranslationBlock *tb;
bellard's avatar
bellard committed
232
    uint8_t *tc_ptr;
233
    uintptr_t next_tb;
234

235
    if (cpu->halted) {
236
        if (!cpu_has_work(cpu)) {
237 238 239
            return EXCP_HALTED;
        }

240
        cpu->halted = 0;
241
    }
bellard's avatar
bellard committed
242

243
    current_cpu = cpu;
244

245
    /* As long as current_cpu is null, up to the assignment just above,
246 247
     * requests by other threads to exit the execution loop are expected to
     * be issued using the exit_request global. We must make sure that our
248
     * evaluation of the global value is performed past the current_cpu
249 250 251 252
     * value transition point, which requires a memory barrier as well as
     * an instruction scheduling constraint on modern architectures.  */
    smp_mb();

253
    if (unlikely(exit_request)) {
254
        cpu->exit_request = 1;
255 256
    }

257
#if defined(TARGET_I386)
258 259
    /* put eflags in CPU temporary format */
    CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260
    env->df = 1 - (2 * ((env->eflags >> 10) & 1));
261 262
    CC_OP = CC_OP_EFLAGS;
    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263
#elif defined(TARGET_SPARC)
pbrook's avatar
pbrook committed
264 265 266 267
#elif defined(TARGET_M68K)
    env->cc_op = CC_OP_FLAGS;
    env->cc_dest = env->sr & 0xf;
    env->cc_x = (env->sr >> 4) & 1;
268 269
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_ARM)
270
#elif defined(TARGET_UNICORE32)
271
#elif defined(TARGET_PPC)
272
    env->reserve_addr = -1;
273
#elif defined(TARGET_LM32)
274
#elif defined(TARGET_MICROBLAZE)
bellard's avatar
bellard committed
275
#elif defined(TARGET_MIPS)
276
#elif defined(TARGET_MOXIE)
277
#elif defined(TARGET_OPENRISC)
bellard's avatar
bellard committed
278
#elif defined(TARGET_SH4)
279
#elif defined(TARGET_CRIS)
280
#elif defined(TARGET_S390X)
281
#elif defined(TARGET_XTENSA)
bellard's avatar
bellard committed
282
    /* XXXXX */
283 284 285
#else
#error unsupported target CPU
#endif
286
    env->exception_index = -1;
287

bellard's avatar
bellard committed
288
    /* prepare setjmp context for exception handling */
289
    for(;;) {
290
        if (sigsetjmp(env->jmp_env, 0) == 0) {
291 292 293 294 295
            /* if an exception is pending, we execute it here */
            if (env->exception_index >= 0) {
                if (env->exception_index >= EXCP_INTERRUPT) {
                    /* exit request from the cpu execution loop */
                    ret = env->exception_index;
296 297 298
                    if (ret == EXCP_DEBUG) {
                        cpu_handle_debug_exception(env);
                    }
299
                    break;
aurel32's avatar
aurel32 committed
300 301
                } else {
#if defined(CONFIG_USER_ONLY)
302
                    /* if user mode only, we simulate a fake exception
303
                       which will be handled outside the cpu execution
304
                       loop */
bellard's avatar
bellard committed
305
#if defined(TARGET_I386)
306
                    cc->do_interrupt(cpu);
bellard's avatar
bellard committed
307
#endif
308 309
                    ret = env->exception_index;
                    break;
aurel32's avatar
aurel32 committed
310
#else
311
                    cc->do_interrupt(cpu);
312
                    env->exception_index = -1;
bellard's avatar
bellard committed
313
#endif
314
                }
315
            }
bellard's avatar
bellard committed
316

317
            next_tb = 0; /* force lookup of first TB */
318
            for(;;) {
319
                interrupt_request = cpu->interrupt_request;
320
                if (unlikely(interrupt_request)) {
321
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
322
                        /* Mask out external interrupts for this step. */
323
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
324
                    }
325
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
326
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
327
                        env->exception_index = EXCP_DEBUG;
328
                        cpu_loop_exit(env);
329
                    }
330
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
331
    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
332
    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
333
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
334 335
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
336
                        env->exception_index = EXCP_HLT;
337
                        cpu_loop_exit(env);
338 339
                    }
#endif
bellard's avatar
bellard committed
340
#if defined(TARGET_I386)
341 342
#if !defined(CONFIG_USER_ONLY)
                    if (interrupt_request & CPU_INTERRUPT_POLL) {
343
                        cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
344
                        apic_poll_irq(x86_cpu->apic_state);
345 346
                    }
#endif
347
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
348 349
                            cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
                                                          0);
350
                            do_cpu_init(x86_cpu);
351
                            env->exception_index = EXCP_HALTED;
352
                            cpu_loop_exit(env);
353
                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
354
                            do_cpu_sipi(x86_cpu);
355
                    } else if (env->hflags2 & HF2_GIF_MASK) {
356 357
                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
                            !(env->hflags & HF_SMM_MASK)) {
358 359
                            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
                                                          0);
360
                            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
361
                            do_smm_enter(x86_cpu);
362 363 364
                            next_tb = 0;
                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
                                   !(env->hflags2 & HF2_NMI_MASK)) {
365
                            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
366
                            env->hflags2 |= HF2_NMI_MASK;
367
                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
368
                            next_tb = 0;
369
                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
370
                            cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
371
                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
372
                            next_tb = 0;
373 374 375 376 377 378 379
                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                                   (((env->hflags2 & HF2_VINTR_MASK) && 
                                     (env->hflags2 & HF2_HIF_MASK)) ||
                                    (!(env->hflags2 & HF2_VINTR_MASK) && 
                                     (env->eflags & IF_MASK && 
                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
                            int intno;
380 381
                            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
                                                          0);
382 383
                            cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
                                                        CPU_INTERRUPT_VIRQ);
384
                            intno = cpu_get_pic_interrupt(env);
385 386 387 388 389
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
                            do_interrupt_x86_hardirq(env, intno, 1);
                            /* ensure that no TB jump will be modified as
                               the program flow was changed */
                            next_tb = 0;
ths's avatar
ths committed
390
#if !defined(CONFIG_USER_ONLY)
391 392 393 394 395
                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
                                   (env->eflags & IF_MASK) && 
                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
                            int intno;
                            /* FIXME: this should respect TPR */
396 397
                            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
                                                          0);
398
                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
399
                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
400
                            do_interrupt_x86_hardirq(env, intno, 1);
401
                            cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
402
                            next_tb = 0;
403
#endif
404
                        }
bellard's avatar
bellard committed
405
                    }
406
#elif defined(TARGET_PPC)
407
                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
408
                        cpu_reset(cpu);
409
                    }
410
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
411
                        ppc_hw_interrupt(env);
412 413 414
                        if (env->pending_interrupts == 0) {
                            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
                        }
415
                        next_tb = 0;
416
                    }
417 418 419 420
#elif defined(TARGET_LM32)
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
                        && (env->ie & IE_IE)) {
                        env->exception_index = EXCP_IRQ;
421
                        cc->do_interrupt(cpu);
422 423
                        next_tb = 0;
                    }
424 425 426 427 428 429
#elif defined(TARGET_MICROBLAZE)
                    if ((interrupt_request & CPU_INTERRUPT_HARD)
                        && (env->sregs[SR_MSR] & MSR_IE)
                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
                        env->exception_index = EXCP_IRQ;
430
                        cc->do_interrupt(cpu);
431 432
                        next_tb = 0;
                    }
bellard's avatar
bellard committed
433 434
#elif defined(TARGET_MIPS)
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
435
                        cpu_mips_hw_interrupts_pending(env)) {
bellard's avatar
bellard committed
436 437 438
                        /* Raise it */
                        env->exception_index = EXCP_EXT_INTERRUPT;
                        env->error_code = 0;
439
                        cc->do_interrupt(cpu);
440
                        next_tb = 0;
bellard's avatar
bellard committed
441
                    }
442 443 444 445 446 447 448 449 450 451 452 453 454
#elif defined(TARGET_OPENRISC)
                    {
                        int idx = -1;
                        if ((interrupt_request & CPU_INTERRUPT_HARD)
                            && (env->sr & SR_IEE)) {
                            idx = EXCP_INT;
                        }
                        if ((interrupt_request & CPU_INTERRUPT_TIMER)
                            && (env->sr & SR_TEE)) {
                            idx = EXCP_TICK;
                        }
                        if (idx >= 0) {
                            env->exception_index = idx;
455
                            cc->do_interrupt(cpu);
456 457 458
                            next_tb = 0;
                        }
                    }
459
#elif defined(TARGET_SPARC)
460 461 462 463 464 465 466 467 468 469
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
                        if (cpu_interrupts_enabled(env) &&
                            env->interrupt_index > 0) {
                            int pil = env->interrupt_index & 0xf;
                            int type = env->interrupt_index & 0xf0;

                            if (((type == TT_EXTINT) &&
                                  cpu_pil_allowed(env, pil)) ||
                                  type != TT_EXTINT) {
                                env->exception_index = env->interrupt_index;
470
                                cc->do_interrupt(cpu);
471 472 473
                                next_tb = 0;
                            }
                        }
474
                    }
475 476 477 478
#elif defined(TARGET_ARM)
                    if (interrupt_request & CPU_INTERRUPT_FIQ
                        && !(env->uncached_cpsr & CPSR_F)) {
                        env->exception_index = EXCP_FIQ;
479
                        cc->do_interrupt(cpu);
480
                        next_tb = 0;
481
                    }
pbrook's avatar
pbrook committed
482 483 484 485 486 487
                    /* ARMv7-M interrupt return works by loading a magic value
                       into the PC.  On real hardware the load causes the
                       return to occur.  The qemu implementation performs the
                       jump normally, then does the exception return when the
                       CPU tries to execute code at the magic address.
                       This will cause the magic PC value to be pushed to
488
                       the stack if an interrupt occurred at the wrong time.
pbrook's avatar
pbrook committed
489 490
                       We avoid this by disabling interrupts when
                       pc contains a magic address.  */
491
                    if (interrupt_request & CPU_INTERRUPT_HARD
pbrook's avatar
pbrook committed
492 493
                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
                            || !(env->uncached_cpsr & CPSR_I))) {
494
                        env->exception_index = EXCP_IRQ;
495
                        cc->do_interrupt(cpu);
496
                        next_tb = 0;
497
                    }
498 499 500
#elif defined(TARGET_UNICORE32)
                    if (interrupt_request & CPU_INTERRUPT_HARD
                        && !(env->uncached_asr & ASR_I)) {
501
                        env->exception_index = UC32_EXCP_INTR;
502
                        cc->do_interrupt(cpu);
503 504
                        next_tb = 0;
                    }
bellard's avatar
bellard committed
505
#elif defined(TARGET_SH4)
506
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
507
                        cc->do_interrupt(cpu);
508
                        next_tb = 0;
509
                    }
510
#elif defined(TARGET_ALPHA)
511 512 513
                    {
                        int idx = -1;
                        /* ??? This hard-codes the OSF/1 interrupt levels.  */
514
                        switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
                        case 0 ... 3:
                            if (interrupt_request & CPU_INTERRUPT_HARD) {
                                idx = EXCP_DEV_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 4:
                            if (interrupt_request & CPU_INTERRUPT_TIMER) {
                                idx = EXCP_CLK_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 5:
                            if (interrupt_request & CPU_INTERRUPT_SMP) {
                                idx = EXCP_SMP_INTERRUPT;
                            }
                            /* FALLTHRU */
                        case 6:
                            if (interrupt_request & CPU_INTERRUPT_MCHK) {
                                idx = EXCP_MCHK;
                            }
                        }
                        if (idx >= 0) {
                            env->exception_index = idx;
                            env->error_code = 0;
538
                            cc->do_interrupt(cpu);
539 540
                            next_tb = 0;
                        }
541
                    }
542
#elif defined(TARGET_CRIS)
edgar_igl's avatar
edgar_igl committed
543
                    if (interrupt_request & CPU_INTERRUPT_HARD
544 545
                        && (env->pregs[PR_CCS] & I_FLAG)
                        && !env->locked_irq) {
edgar_igl's avatar
edgar_igl committed
546
                        env->exception_index = EXCP_IRQ;
547
                        cc->do_interrupt(cpu);
edgar_igl's avatar
edgar_igl committed
548 549
                        next_tb = 0;
                    }
550 551 552 553 554 555 556 557 558
                    if (interrupt_request & CPU_INTERRUPT_NMI) {
                        unsigned int m_flag_archval;
                        if (env->pregs[PR_VR] < 32) {
                            m_flag_archval = M_FLAG_V10;
                        } else {
                            m_flag_archval = M_FLAG_V32;
                        }
                        if ((env->pregs[PR_CCS] & m_flag_archval)) {
                            env->exception_index = EXCP_NMI;
559
                            cc->do_interrupt(cpu);
560 561
                            next_tb = 0;
                        }
562
                    }
pbrook's avatar
pbrook committed
563 564 565 566 567 568 569 570 571 572
#elif defined(TARGET_M68K)
                    if (interrupt_request & CPU_INTERRUPT_HARD
                        && ((env->sr & SR_I) >> SR_I_SHIFT)
                            < env->pending_level) {
                        /* Real hardware gets the interrupt vector via an
                           IACK cycle at this point.  Current emulated
                           hardware doesn't rely on this, so we
                           provide/save the vector when the interrupt is
                           first signalled.  */
                        env->exception_index = env->pending_vector;
573
                        do_interrupt_m68k_hardirq(env);
574
                        next_tb = 0;
pbrook's avatar
pbrook committed
575
                    }
576 577 578
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                        (env->psw.mask & PSW_MASK_EXT)) {
579
                        cc->do_interrupt(cpu);
580 581
                        next_tb = 0;
                    }
582 583 584
#elif defined(TARGET_XTENSA)
                    if (interrupt_request & CPU_INTERRUPT_HARD) {
                        env->exception_index = EXC_IRQ;
585
                        cc->do_interrupt(cpu);
586 587
                        next_tb = 0;
                    }
bellard's avatar
bellard committed
588
#endif
589
                   /* Don't use the cached interrupt_request value,
bellard's avatar
bellard committed
590
                      do_interrupt may have updated the EXITTB flag. */
591 592
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
593 594
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
595
                        next_tb = 0;
596
                    }
597
                }
598 599
                if (unlikely(cpu->exit_request)) {
                    cpu->exit_request = 0;
600
                    env->exception_index = EXCP_INTERRUPT;
601
                    cpu_loop_exit(env);
602
                }
603
                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
Blue Swirl's avatar
Blue Swirl committed
604
                tb = tb_find_fast(env);
pbrook's avatar
pbrook committed
605 606
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
607
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrook's avatar
pbrook committed
608 609 610 611
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
612
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrook's avatar
pbrook committed
613
                }
614 615 616 617
                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                }
618 619 620
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
Paolo Bonzini's avatar
Paolo Bonzini committed
621
                if (next_tb != 0 && tb->page_addr[1] == -1) {
622 623
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
624
                }
625
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
626 627 628 629 630

                /* cpu_interrupt might be called while translating the
                   TB, but before it is linked into a potentially
                   infinite loop and becomes env->current_tb. Avoid
                   starting execution if there is a pending interrupt. */
631
                cpu->current_tb = tb;
Jan Kiszka's avatar
Jan Kiszka committed
632
                barrier();
633
                if (likely(!cpu->exit_request)) {
pbrook's avatar
pbrook committed
634
                    tc_ptr = tb->tc_ptr;
635
                    /* execute the generated code */
636
                    next_tb = cpu_tb_exec(cpu, tc_ptr);
637 638 639 640 641 642 643 644 645 646 647 648 649 650
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
                         * next time around the loop.
                         */
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
651
                        /* Instruction counter expired.  */
pbrook's avatar
pbrook committed
652
                        int insns_left;
653
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
pbrook's avatar
pbrook committed
654 655 656 657 658 659 660 661 662 663 664 665 666 667
                        insns_left = env->icount_decr.u32;
                        if (env->icount_extra && insns_left >= 0) {
                            /* Refill decrementer and continue execution.  */
                            env->icount_extra += insns_left;
                            if (env->icount_extra > 0xffff) {
                                insns_left = 0xffff;
                            } else {
                                insns_left = env->icount_extra;
                            }
                            env->icount_extra -= insns_left;
                            env->icount_decr.u16.low = insns_left;
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
Blue Swirl's avatar
Blue Swirl committed
668
                                cpu_exec_nocache(env, insns_left, tb);
pbrook's avatar
pbrook committed
669 670 671
                            }
                            env->exception_index = EXCP_INTERRUPT;
                            next_tb = 0;
672
                            cpu_loop_exit(env);
pbrook's avatar
pbrook committed
673
                        }
674 675 676 677
                        break;
                    }
                    default:
                        break;
pbrook's avatar
pbrook committed
678 679
                    }
                }
680
                cpu->current_tb = NULL;
bellard's avatar
bellard committed
681 682
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
ths's avatar
ths committed
683
            } /* for(;;) */
684 685 686
        } else {
            /* Reload env after longjmp - the compiler may have smashed all
             * local variables as longjmp is marked 'noreturn'. */
687 688
            cpu = current_cpu;
            env = cpu->env_ptr;
689 690 691
#if !(defined(CONFIG_USER_ONLY) && \
      (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
            cc = CPU_GET_CLASS(cpu);
692 693 694
#endif
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
695
#endif
bellard's avatar
bellard committed
696
        }
697 698
    } /* for(;;) */

bellard's avatar
bellard committed
699

700
#if defined(TARGET_I386)
701
    /* restore flags in standard format */
702
    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
703
        | (env->df & DF_MASK);
704
#elif defined(TARGET_ARM)
bellard's avatar
bellard committed
705
    /* XXX: Save/restore host fpu exception state?.  */
706
#elif defined(TARGET_UNICORE32)
707
#elif defined(TARGET_SPARC)
708
#elif defined(TARGET_PPC)
709
#elif defined(TARGET_LM32)
pbrook's avatar
pbrook committed
710 711 712 713 714
#elif defined(TARGET_M68K)
    cpu_m68k_flush_flags(env, env->cc_op);
    env->cc_op = CC_OP_FLAGS;
    env->sr = (env->sr & 0xffe0)
              | env->cc_dest | (env->cc_x << 4);
715
#elif defined(TARGET_MICROBLAZE)
bellard's avatar
bellard committed
716
#elif defined(TARGET_MIPS)
717
#elif defined(TARGET_MOXIE)
718
#elif defined(TARGET_OPENRISC)
bellard's avatar
bellard committed
719
#elif defined(TARGET_SH4)
720
#elif defined(TARGET_ALPHA)
721
#elif defined(TARGET_CRIS)
722
#elif defined(TARGET_S390X)
723
#elif defined(TARGET_XTENSA)
bellard's avatar
bellard committed
724
    /* XXXXX */
725 726 727
#else
#error unsupported target CPU
#endif
728

729 730
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
bellard's avatar
bellard committed
731 732
    return ret;
}