@@ -95,7 +95,7 @@
struct arm_boot_info;
#define NB_MMU_MODES 7
-#define TARGET_INSN_START_EXTRA_WORDS 1
+#define TARGET_INSN_START_EXTRA_WORDS 2
/* We currently assume float and double are IEEE single and double
precision respectively.
@@ -196,6 +196,9 @@ typedef struct CPUARMState {
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
uint64_t daif; /* exception masks, in the bits they are in PSTATE */
+ /* IS state from decoded instructions. Only valid after Data aborts. */
+ ARMInsnSyndrome isyn;
+
uint64_t elr_el[4]; /* AArch64 exception link regs */
uint64_t sp_el[4]; /* AArch64 banked stack pointers */
@@ -115,8 +115,23 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
exc = EXCP_PREFETCH_ABORT;
} else {
+ if (target_el != 2 || fi.s1ptw) {
+ /* ISV is only set for data aborts routed to EL2 and
+ * never for S1PTWalks faulting on Stage 2.
+ *
+ * See ARMv8 specs:
+ * ISS encoding for an exception from a Data Abort, the
+ * ISV field.
+ */
+ env->isyn.dabt.valid = 0;
+ }
syn = syn_data_abort(same_el,
- 0, 0, 0, 0, 0, 0,
+ env->isyn.dabt.valid,
+ env->isyn.dabt.sas,
+ env->isyn.dabt.sse,
+ env->isyn.dabt.srt,
+ env->isyn.dabt.sf,
+ env->isyn.dabt.ar,
0, 0, fi.s1ptw, is_write == 1, syn,
env->thumb);
if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
@@ -163,9 +178,24 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
env->exception.fsr |= (1 << 11);
}
+ if (target_el != 2) {
+ /* ISV is only set for data aborts routed to EL2 and
+ * never for S1PTWalks faulting on Stage 2.
+ *
+ * See ARMv8 specs:
+ * ISS encoding for an exception from a Data Abort, the
+ * ISV field.
+ */
+ env->isyn.dabt.valid = 0;
+ }
raise_exception(env, EXCP_DATA_ABORT,
syn_data_abort(same_el,
- 0, 0, 0, 0, 0, 0,
+ env->isyn.dabt.valid,
+ env->isyn.dabt.sas,
+ env->isyn.dabt.sse,
+ env->isyn.dabt.srt,
+ env->isyn.dabt.sf,
+ env->isyn.dabt.ar,
0, 0, 0, is_write == 1, 0x21,
env->thumb),
target_el);
@@ -1803,6 +1803,24 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
}
#endif
+static void disas_ldr_update_isyn_sse_sf(DisasContext *s, int size,
+ bool is_signed, int opc)
+{
+ int opc0 = extract32(opc, 0, 1);
+ int regsize;
+
+ s->isyn.dabt.sse = is_signed;
+ /* Update the Sixty-Four bit (SF) registersize. This logic is derived
+ * from the ARMv8 specs for LDR (Shared decode for all encodings).
+ */
+ if (is_signed) {
+ regsize = opc0 ? 32 : 64;
+ } else {
+ regsize = size == 3 ? 64 : 32;
+ }
+ s->isyn.dabt.sf = regsize == 64;
+}
+
/* C3.3.6 Load/store exclusive
*
* 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
@@ -1859,6 +1877,12 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
} else {
do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false);
}
+
+ /* Generate ISV for non-exclusive accesses including LASR. */
+ s->isyn.dabt.valid = true;
+ s->isyn.dabt.sse = false;
+ s->isyn.dabt.sf = size == 3 ? 64 : 32;
+ s->isyn.dabt.ar = is_lasr;
}
}
@@ -1901,6 +1925,11 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
}
size = 2 + extract32(opc, 0, 1);
is_signed = extract32(opc, 1, 1);
+
+ /* Enable ISV generation. */
+ s->isyn.dabt.valid = true;
+ s->isyn.dabt.sse = is_signed;
+ s->isyn.dabt.sf = size == 3 ? 64 : 32;
}
tcg_rt = cpu_reg(s, rt);
@@ -2119,6 +2148,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
is_store = (opc == 0);
is_signed = extract32(opc, 1, 1);
is_extended = (size < 3) && extract32(opc, 0, 1);
+ disas_ldr_update_isyn_sse_sf(s, size, is_signed, opc);
}
switch (idx) {
@@ -2238,6 +2268,7 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
is_store = (opc == 0);
is_signed = extract32(opc, 1, 1);
is_extended = (size < 3) && extract32(opc, 0, 1);
+ disas_ldr_update_isyn_sse_sf(s, size, is_signed, opc);
}
if (rn == 31) {
@@ -2321,6 +2352,7 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
is_store = (opc == 0);
is_signed = extract32(opc, 1, 1);
is_extended = (size < 3) && extract32(opc, 0, 1);
+ disas_ldr_update_isyn_sse_sf(s, size, is_signed, opc);
}
if (rn == 31) {
@@ -2354,6 +2386,11 @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
bool is_vector = extract32(insn, 26, 1);
int size = extract32(insn, 30, 2);
+ /* Vectored load/stores do not include an Insn Syndrome. */
+ s->isyn.dabt.valid = !is_vector;
+ s->isyn.dabt.sas = size;
+ s->isyn.dabt.srt = rt;
+
switch (extract32(insn, 24, 2)) {
case 0:
if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
@@ -2660,6 +2697,21 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
/* C3.3 Loads and stores */
static void disas_ldst(DisasContext *s, uint32_t insn)
{
+ uint32_t isyn32;
+
+ /* Insn Syndrome in AArch64 is only valid for:
+ * Load/Stores of a single GPR (e.g not vector regs).
+ * Including register 31.
+ * Including Acquire/Release.
+ * Excluding Exclusive accesses.
+ * Excluding accesses with writeback.
+ *
+ * We clear the insn syndrome state here (invalidating it).
+ * The various sub decoders will then fill out information
+ * as they decode it.
+ */
+ memset(&s->isyn, 0, sizeof s->isyn);
+
switch (extract32(insn, 24, 6)) {
case 0x08: /* Load/store exclusive */
disas_ldst_excl(s, insn);
@@ -2685,6 +2737,14 @@ static void disas_ldst(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
break;
}
+
+ /* The saved insn syndrome state in insn_start is already zero (invalid).
+ * We only update it if we have a valid insn syndrome to save.
+ */
+ if (s->isyn.dabt.valid) {
+ isyn32 = arm_encode_dabt_isyn_u32(&s->isyn);
+ tcg_set_insn_param(s->insn_start_idx, 2, isyn32);
+ }
}
/* C3.4.6 PC-rel. addressing
@@ -11087,7 +11147,8 @@ void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
tcg_clear_temp_count();
do {
- tcg_gen_insn_start(dc->pc, 0);
+ dc->insn_start_idx = tcg_op_buf_count();
+ tcg_gen_insn_start(dc->pc, 0, 0);
num_insns++;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
@@ -11349,7 +11349,8 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
}
do {
tcg_gen_insn_start(dc->pc,
- (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
+ (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
+ 0);
num_insns++;
#ifdef CONFIG_USER_ONLY
@@ -11665,8 +11666,10 @@ void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
if (is_a64(env)) {
env->pc = data[0];
env->condexec_bits = 0;
+ arm_decode_dabt_isyn_u32(&env->isyn, data[2]);
} else {
env->regs[15] = data[0];
env->condexec_bits = data[1];
+ arm_decode_dabt_isyn_u32(&env->isyn, data[2]);
}
}
@@ -58,6 +58,9 @@ typedef struct DisasContext {
bool ss_same_el;
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
int c15_cpar;
+ ARMInsnSyndrome isyn;
+ /* TCG op index of the current insn_start. */
+ int insn_start_idx;
#define TMP_A64_MAX 16
int tmp_a64_count;
TCGv_i64 tmp_a64[TMP_A64_MAX];