• R/O
  • HTTP
  • SSH
  • HTTPS

Commit

Tags
No Tags

Frequently used words (click to add to your profile)

javac++androidlinuxc#windowsobjective-ccocoa誰得qtpythonphprubygameguibathyscaphec計画中(planning stage)翻訳omegatframeworktwitterdomtestvb.netdirectxゲームエンジンbtronarduinopreviewer

Commit MetaInfo

Revision51b061fbf027f844cc0bfb1eaaf0ae818e3ceb36 (tree)
Time2017-09-08 03:23:13
AuthorRichard Henderson <rth@twid...>
CommiterRichard Henderson

Log Message

target/hppa: Convert to TranslatorOps

Signed-off-by: Richard Henderson <rth@twiddle.net>

Change Summary

Incremental Difference

--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -3729,185 +3729,201 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
37293729 return gen_illegal(ctx);
37303730 }
37313731
3732-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
3732+static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
3733+ CPUState *cs, int max_insns)
37333734 {
3734- CPUHPPAState *env = cs->env_ptr;
3735- DisasContext ctx;
3736- DisasJumpType ret;
3737- int num_insns, max_insns, i;
3735+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
3736+ TranslationBlock *tb = ctx->base.tb;
3737+ int i, bound;
37383738
3739- ctx.base.tb = tb;
3740- ctx.base.singlestep_enabled = cs->singlestep_enabled;
3741- ctx.cs = cs;
3742- ctx.iaoq_f = tb->pc;
3743- ctx.iaoq_b = tb->cs_base;
3739+ ctx->cs = cs;
3740+ ctx->iaoq_f = tb->pc;
3741+ ctx->iaoq_b = tb->cs_base;
3742+ ctx->iaoq_n = -1;
3743+ TCGV_UNUSED(ctx->iaoq_n_var);
37443744
3745- ctx.ntemps = 0;
3746- for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) {
3747- TCGV_UNUSED(ctx.temps[i]);
3745+ ctx->ntemps = 0;
3746+ for (i = 0; i < ARRAY_SIZE(ctx->temps); ++i) {
3747+ TCGV_UNUSED(ctx->temps[i]);
37483748 }
37493749
3750- /* Compute the maximum number of insns to execute, as bounded by
3751- (1) icount, (2) single-stepping, (3) branch delay slots, or
3752- (4) the number of insns remaining on the current page. */
3753- max_insns = tb->cflags & CF_COUNT_MASK;
3754- if (max_insns == 0) {
3755- max_insns = CF_COUNT_MASK;
3756- }
3757- if (ctx.base.singlestep_enabled || singlestep) {
3758- max_insns = 1;
3759- } else if (max_insns > TCG_MAX_INSNS) {
3760- max_insns = TCG_MAX_INSNS;
3761- }
3750+ bound = -(tb->pc | TARGET_PAGE_MASK) / 4;
3751+ return MIN(max_insns, bound);
3752+}
37623753
3763- num_insns = 0;
3764- gen_tb_start(tb);
3754+static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
3755+{
3756+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
37653757
37663758 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
3767- ctx.null_cond = cond_make_f();
3768- ctx.psw_n_nonzero = false;
3769- if (tb->flags & 1) {
3770- ctx.null_cond.c = TCG_COND_ALWAYS;
3771- ctx.psw_n_nonzero = true;
3759+ ctx->null_cond = cond_make_f();
3760+ ctx->psw_n_nonzero = false;
3761+ if (ctx->base.tb->flags & 1) {
3762+ ctx->null_cond.c = TCG_COND_ALWAYS;
3763+ ctx->psw_n_nonzero = true;
37723764 }
3773- ctx.null_lab = NULL;
3765+ ctx->null_lab = NULL;
3766+}
37743767
3775- do {
3776- tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b);
3777- num_insns++;
3768+static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
3769+{
3770+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
37783771
3779- if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) {
3780- ret = gen_excp(&ctx, EXCP_DEBUG);
3781- break;
3782- }
3783- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3784- gen_io_start();
3785- }
3772+ tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
3773+}
3774+
3775+static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
3776+ const CPUBreakpoint *bp)
3777+{
3778+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
37863779
3787- if (ctx.iaoq_f < TARGET_PAGE_SIZE) {
3788- ret = do_page_zero(&ctx);
3789- assert(ret != DISAS_NEXT);
3780+ ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
3781+ ctx->base.pc_next = ctx->iaoq_f + 4;
3782+ return true;
3783+}
3784+
3785+static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
3786+{
3787+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
3788+ CPUHPPAState *env = cs->env_ptr;
3789+ DisasJumpType ret;
3790+ int i, n;
3791+
3792+ /* Execute one insn. */
3793+ if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
3794+ ret = do_page_zero(ctx);
3795+ assert(ret != DISAS_NEXT);
3796+ } else {
3797+ /* Always fetch the insn, even if nullified, so that we check
3798+ the page permissions for execute. */
3799+ uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f);
3800+
3801+ /* Set up the IA queue for the next insn.
3802+ This will be overwritten by a branch. */
3803+ if (ctx->iaoq_b == -1) {
3804+ ctx->iaoq_n = -1;
3805+ ctx->iaoq_n_var = get_temp(ctx);
3806+ tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
37903807 } else {
3791- /* Always fetch the insn, even if nullified, so that we check
3792- the page permissions for execute. */
3793- uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f);
3794-
3795- /* Set up the IA queue for the next insn.
3796- This will be overwritten by a branch. */
3797- if (ctx.iaoq_b == -1) {
3798- ctx.iaoq_n = -1;
3799- ctx.iaoq_n_var = get_temp(&ctx);
3800- tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4);
3801- } else {
3802- ctx.iaoq_n = ctx.iaoq_b + 4;
3803- TCGV_UNUSED(ctx.iaoq_n_var);
3804- }
3805-
3806- if (unlikely(ctx.null_cond.c == TCG_COND_ALWAYS)) {
3807- ctx.null_cond.c = TCG_COND_NEVER;
3808- ret = DISAS_NEXT;
3809- } else {
3810- ret = translate_one(&ctx, insn);
3811- assert(ctx.null_lab == NULL);
3812- }
3808+ ctx->iaoq_n = ctx->iaoq_b + 4;
3809+ TCGV_UNUSED(ctx->iaoq_n_var);
38133810 }
38143811
3815- for (i = 0; i < ctx.ntemps; ++i) {
3816- tcg_temp_free(ctx.temps[i]);
3817- TCGV_UNUSED(ctx.temps[i]);
3818- }
3819- ctx.ntemps = 0;
3820-
3821- /* If we see non-linear instructions, exhaust instruction count,
3822- or run out of buffer space, stop generation. */
3823- /* ??? The non-linear instruction restriction is purely due to
3824- the debugging dump. Otherwise we *could* follow unconditional
3825- branches within the same page. */
3826- if (ret == DISAS_NEXT
3827- && (ctx.iaoq_b != ctx.iaoq_f + 4
3828- || num_insns >= max_insns
3829- || tcg_op_buf_full())) {
3830- if (ctx.null_cond.c == TCG_COND_NEVER
3831- || ctx.null_cond.c == TCG_COND_ALWAYS) {
3832- nullify_set(&ctx, ctx.null_cond.c == TCG_COND_ALWAYS);
3833- gen_goto_tb(&ctx, 0, ctx.iaoq_b, ctx.iaoq_n);
3834- ret = DISAS_NORETURN;
3835- } else {
3836- ret = DISAS_IAQ_N_STALE;
3837- }
3812+ if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
3813+ ctx->null_cond.c = TCG_COND_NEVER;
3814+ ret = DISAS_NEXT;
3815+ } else {
3816+ ret = translate_one(ctx, insn);
3817+ assert(ctx->null_lab == NULL);
38383818 }
3819+ }
38393820
3840- ctx.iaoq_f = ctx.iaoq_b;
3841- ctx.iaoq_b = ctx.iaoq_n;
3842- if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
3843- break;
3844- }
3845- if (ctx.iaoq_f == -1) {
3846- tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3847- copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var);
3848- nullify_save(&ctx);
3849- ret = DISAS_IAQ_N_UPDATED;
3850- break;
3851- }
3852- if (ctx.iaoq_b == -1) {
3853- tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var);
3854- }
3855- } while (ret == DISAS_NEXT);
3821+ /* Free any temporaries allocated. */
3822+ for (i = 0, n = ctx->ntemps; i < n; ++i) {
3823+ tcg_temp_free(ctx->temps[i]);
3824+ TCGV_UNUSED(ctx->temps[i]);
3825+ }
3826+ ctx->ntemps = 0;
38563827
3857- if (tb->cflags & CF_LAST_IO) {
3858- gen_io_end();
3828+ /* Advance the insn queue. */
3829+ /* ??? The non-linear instruction restriction is purely due to
3830+ the debugging dump. Otherwise we *could* follow unconditional
3831+ branches within the same page. */
3832+ if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
3833+ if (ctx->null_cond.c == TCG_COND_NEVER
3834+ || ctx->null_cond.c == TCG_COND_ALWAYS) {
3835+ nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
3836+ gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
3837+ ret = DISAS_NORETURN;
3838+ } else {
3839+ ret = DISAS_IAQ_N_STALE;
3840+ }
38593841 }
3842+ ctx->iaoq_f = ctx->iaoq_b;
3843+ ctx->iaoq_b = ctx->iaoq_n;
3844+ ctx->base.is_jmp = ret;
3845+
3846+ if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
3847+ return;
3848+ }
3849+ if (ctx->iaoq_f == -1) {
3850+ tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3851+ copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
3852+ nullify_save(ctx);
3853+ ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
3854+ } else if (ctx->iaoq_b == -1) {
3855+ tcg_gen_mov_tl(cpu_iaoq_b, ctx->iaoq_n_var);
3856+ }
3857+}
3858+
3859+static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
3860+{
3861+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
38603862
3861- switch (ret) {
3863+ switch (ctx->base.is_jmp) {
38623864 case DISAS_NORETURN:
38633865 break;
3866+ case DISAS_TOO_MANY:
38643867 case DISAS_IAQ_N_STALE:
3865- copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f);
3866- copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b);
3867- nullify_save(&ctx);
3868+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
3869+ copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
3870+ nullify_save(ctx);
38683871 /* FALLTHRU */
38693872 case DISAS_IAQ_N_UPDATED:
3870- if (ctx.base.singlestep_enabled) {
3873+ if (ctx->base.singlestep_enabled) {
38713874 gen_excp_1(EXCP_DEBUG);
38723875 } else {
38733876 tcg_gen_lookup_and_goto_ptr(cpu_iaoq_f);
38743877 }
38753878 break;
38763879 default:
3877- abort();
3880+ g_assert_not_reached();
38783881 }
38793882
3880- gen_tb_end(tb, num_insns);
3883+ /* We don't actually use this during normal translation,
3884+ but we should interact with the generic main loop. */
3885+ ctx->base.pc_next = ctx->base.tb->pc + 4 * ctx->base.num_insns;
3886+}
38813887
3882- tb->size = num_insns * 4;
3883- tb->icount = num_insns;
3888+static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
3889+{
3890+ TranslationBlock *tb = dcbase->tb;
38843891
3885-#ifdef DEBUG_DISAS
3886- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3887- && qemu_log_in_addr_range(tb->pc)) {
3888- qemu_log_lock();
3889- switch (tb->pc) {
3890- case 0x00:
3891- qemu_log("IN:\n0x00000000: (null)\n\n");
3892- break;
3893- case 0xb0:
3894- qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n");
3895- break;
3896- case 0xe0:
3897- qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n");
3898- break;
3899- case 0x100:
3900- qemu_log("IN:\n0x00000100: syscall\n\n");
3901- break;
3902- default:
3903- qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3904- log_target_disas(cs, tb->pc, tb->size, 1);
3905- qemu_log("\n");
3906- break;
3907- }
3908- qemu_log_unlock();
3892+ switch (tb->pc) {
3893+ case 0x00:
3894+ qemu_log("IN:\n0x00000000: (null)\n");
3895+ break;
3896+ case 0xb0:
3897+ qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
3898+ break;
3899+ case 0xe0:
3900+ qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
3901+ break;
3902+ case 0x100:
3903+ qemu_log("IN:\n0x00000100: syscall\n");
3904+ break;
3905+ default:
3906+ qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3907+ log_target_disas(cs, tb->pc, tb->size, 1);
3908+ break;
39093909 }
3910-#endif
3910+}
3911+
3912+static const TranslatorOps hppa_tr_ops = {
3913+ .init_disas_context = hppa_tr_init_disas_context,
3914+ .tb_start = hppa_tr_tb_start,
3915+ .insn_start = hppa_tr_insn_start,
3916+ .breakpoint_check = hppa_tr_breakpoint_check,
3917+ .translate_insn = hppa_tr_translate_insn,
3918+ .tb_stop = hppa_tr_tb_stop,
3919+ .disas_log = hppa_tr_disas_log,
3920+};
3921+
3922+void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
3923+
3924+{
3925+ DisasContext ctx;
3926+ translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
39113927 }
39123928
39133929 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,