@@ -132,10 +132,14 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
#endif /* CONFIG USER ONLY */
/* Execute a TB, and fix up the CPU state afterwards if necessary */
-static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
+static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
{
CPUArchState *env = cpu->env_ptr;
uintptr_t next_tb;
+ uint8_t *tb_ptr = itb->tc_ptr;
+
+ qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
+ itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
@@ -166,6 +170,10 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
*/
CPUClass *cc = CPU_GET_CLASS(cpu);
TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
+ qemu_log_mask(CPU_LOG_EXEC,
+ "Abandoned execution of TB chain before %p ["
+ TARGET_FMT_lx "] %s\n",
+ itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
if (cc->synchronize_from_tb) {
cc->synchronize_from_tb(cpu, tb);
} else {
@@ -201,7 +209,7 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
cpu->current_tb = tb;
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
- cpu_tb_exec(cpu, tb->tc_ptr);
+ cpu_tb_exec(cpu, tb);
cpu->current_tb = NULL;
tb_phys_invalidate(tb, -1);
tb_free(tb);
@@ -343,7 +351,6 @@ int cpu_exec(CPUState *cpu)
#endif
int ret, interrupt_request;
TranslationBlock *tb;
- uint8_t *tc_ptr;
uintptr_t next_tb;
SyncClocks sc;
@@ -499,10 +506,6 @@ int cpu_exec(CPUState *cpu)
next_tb = 0;
tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
}
- if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
- qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
- tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
- }
/* see if we can patch the calling TB. When the TB
spans two pages, we cannot safely do a direct
jump. */
@@ -514,10 +517,9 @@ int cpu_exec(CPUState *cpu)
tb_unlock();
if (likely(!cpu->exit_request)) {
trace_exec_tb(tb, tb->pc);
- tc_ptr = tb->tc_ptr;
/* execute the generated code */
cpu->current_tb = tb;
- next_tb = cpu_tb_exec(cpu, tc_ptr);
+ next_tb = cpu_tb_exec(cpu, tb);
cpu->current_tb = NULL;
switch (next_tb & TB_EXIT_MASK) {
case TB_EXIT_REQUESTED:
@@ -379,6 +379,9 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
{
/* NOTE: this test is only needed for thread safety */
if (!tb->jmp_next[n]) {
+ qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p [" TARGET_FMT_lx
+ "] index %d -> %p [" TARGET_FMT_lx "]\n",
+ tb->tc_ptr, tb->pc, n, tb_next->tc_ptr, tb_next->pc);
/* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);