@@ -12017,101 +12017,17 @@ static target_ulong arm_tr_translate_insn(DisasContextBase *dcbase,
return dc->pc;
}
-/* generate intermediate code for basic block 'tb'. */
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
+static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
{
- DisasContext dc1, *dc = &dc1;
- int max_insns;
-
- /* generate intermediate code */
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
- /* The A64 decoder has its own top level loop, because it doesn't need
- * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
- */
- if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
- gen_intermediate_code_a64(&dc->base, cs, tb);
+ if (dc->base.is_jmp == DISAS_SKIP) {
return;
}
- dc->base.tb = tb;
- dc->base.pc_first = dc->base.tb->pc;
- dc->base.pc_next = dc->base.pc_first;
- dc->base.is_jmp = DISAS_NEXT;
- dc->base.num_insns = 0;
- dc->base.singlestep_enabled = cs->singlestep_enabled;
- arm_tr_init_disas_context(&dc->base, cs);
-
- max_insns = tb->cflags & CF_COUNT_MASK;
- if (max_insns == 0) {
- max_insns = CF_COUNT_MASK;
- }
- if (max_insns > TCG_MAX_INSNS) {
- max_insns = TCG_MAX_INSNS;
- }
-
- gen_tb_start(tb);
-
- tcg_clear_temp_count();
- arm_tr_tb_start(&dc->base, cs);
-
- do {
- dc->base.num_insns++;
- arm_tr_insn_start(&dc->base, cs);
-
- if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
- CPUBreakpoint *bp;
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
- if (bp->pc == dc->base.pc_next) {
- BreakpointCheckType bp_check =
- arm_tr_breakpoint_check(&dc->base, cs, bp);
- switch (bp_check) {
- case BC_MISS:
- /* Target ignored this breakpoint, go to next */
- break;
- case BC_HIT_INSN:
- /* Hit, keep translating */
- /*
- * TODO: if we're never going to have more than one
- * BP in a single address, we can simply use a
- * bool here.
- */
- goto done_breakpoints;
- case BC_HIT_TB:
- /* Hit, end TB */
- goto done_generating;
- default:
- g_assert_not_reached();
- }
- }
- }
- }
- done_breakpoints:
-
- if (dc->base.num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
- gen_io_start();
- }
-
- dc->base.pc_next = arm_tr_translate_insn(&dc->base, cs);
-
- if (tcg_check_temp_count()) {
- fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
- dc->pc);
- }
-
- if (!dc->base.is_jmp && (tcg_op_buf_full() || singlestep ||
- dc->base.num_insns >= max_insns)) {
- dc->base.is_jmp = DISAS_TOO_MANY;
- }
- } while (!dc->base.is_jmp);
-
- if (dc->base.is_jmp != DISAS_SKIP) {
- if (tb->cflags & CF_LAST_IO) {
- if (dc->condjmp) {
- /* FIXME: This can theoretically happen with self-modifying
- code. */
- cpu_abort(cs, "IO on conditional branch instruction");
- }
- gen_io_end();
+ if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) {
+ /* FIXME: This can theoretically happen with self-modifying code. */
+ cpu_abort(cpu, "IO on conditional branch instruction");
}
/* At this stage dc->condjmp will only be set when the skipped
@@ -12215,6 +12131,101 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
gen_goto_tb(dc, 1, dc->pc);
}
}
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
+{
+ DisasContext dc1, *dc = &dc1;
+ int max_insns;
+
+ /* generate intermediate code */
+
+ /* The A64 decoder has its own top level loop, because it doesn't need
+ * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
+ */
+ if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
+ gen_intermediate_code_a64(&dc->base, cs, tb);
+ return;
+ }
+
+ dc->base.tb = tb;
+ dc->base.pc_first = dc->base.tb->pc;
+ dc->base.pc_next = dc->base.pc_first;
+ dc->base.is_jmp = DISAS_NEXT;
+ dc->base.num_insns = 0;
+ dc->base.singlestep_enabled = cs->singlestep_enabled;
+ arm_tr_init_disas_context(&dc->base, cs);
+
+
+ arm_tr_init_globals(&dc->base, cs);
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+
+ tcg_clear_temp_count();
+ arm_tr_tb_start(&dc->base, cs);
+
+ do {
+ dc->base.num_insns++;
+ arm_tr_insn_start(&dc->base, cs);
+
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ CPUBreakpoint *bp;
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ if (bp->pc == dc->base.pc_next) {
+ BreakpointCheckType bp_check =
+ arm_tr_breakpoint_check(&dc->base, cs, bp);
+ switch (bp_check) {
+ case BC_MISS:
+ /* Target ignored this breakpoint, go to next */
+ break;
+ case BC_HIT_INSN:
+ /* Hit, keep translating */
+ /*
+ * TODO: if we're never going to have more than one
+ * BP in a single address, we can simply use a
+ * bool here.
+ */
+ goto done_breakpoints;
+ case BC_HIT_TB:
+ /* Hit, end TB */
+ goto done_generating;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ }
+ }
+ done_breakpoints:
+
+ if (dc->base.num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ dc->base.pc_next = arm_tr_translate_insn(&dc->base, cs);
+
+ if (tcg_check_temp_count()) {
+ fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
+ dc->pc);
+ }
+
+ if (!dc->base.is_jmp && (tcg_op_buf_full() || singlestep ||
+ dc->base.num_insns >= max_insns)) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+ } while (!dc->base.is_jmp);
+
+ arm_tr_tb_stop(&dc->base, cs);
+
+ if (dc->base.tb->cflags & CF_LAST_IO) {
+ gen_io_end();
}
done_generating:
Incrementally paves the way towards using the generic instruction translation loop. Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu> --- target/arm/translate.c | 191 +++++++++++++++++++++++++----------------------- 1 file changed, 101 insertions(+), 90 deletions(-)