@@ -102,10 +102,10 @@ typedef struct CPURISCVState CPURISCVState;
#define RV_VLEN_MAX 256
-FIELD(VTYPE, VLMUL, 0, 2)
-FIELD(VTYPE, VSEW, 2, 3)
-FIELD(VTYPE, VEDIV, 5, 2)
-FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9)
+FIELD(VTYPE, VLMUL, 0, 3)
+FIELD(VTYPE, VSEW, 3, 3)
+FIELD(VTYPE, VEDIV, 8, 2)
+FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1)
struct CPURISCVState {
@@ -403,18 +403,20 @@ typedef RISCVCPU ArchCPU;
#include "exec/cpu-all.h"
FIELD(TB_FLAGS, MEM_IDX, 0, 3)
-FIELD(TB_FLAGS, VL_EQ_VLMAX, 3, 1)
-FIELD(TB_FLAGS, LMUL, 4, 2)
+FIELD(TB_FLAGS, LMUL, 3, 3)
FIELD(TB_FLAGS, SEW, 6, 3)
-FIELD(TB_FLAGS, VILL, 9, 1)
+/* Skip MSTATUS_VS (0x600) bits */
+FIELD(TB_FLAGS, VL_EQ_VLMAX, 11, 1)
+FIELD(TB_FLAGS, VILL, 12, 1)
+/* Skip MSTATUS_FS (0x6000) bits */
/* Is a Hypervisor instruction load/store allowed? */
-FIELD(TB_FLAGS, HLSX, 10, 1)
-FIELD(TB_FLAGS, MSTATUS_HS_FS, 11, 2)
-FIELD(TB_FLAGS, MSTATUS_HS_VS, 13, 2)
+FIELD(TB_FLAGS, HLSX, 15, 1)
+FIELD(TB_FLAGS, MSTATUS_HS_FS, 16, 2)
+FIELD(TB_FLAGS, MSTATUS_HS_VS, 18, 2)
/* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
-FIELD(TB_FLAGS, XL, 15, 2)
+FIELD(TB_FLAGS, XL, 20, 2)
/* If PointerMasking should be applied */
-FIELD(TB_FLAGS, PM_ENABLED, 17, 1)
+FIELD(TB_FLAGS, PM_ENABLED, 22, 1)
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
@@ -80,7 +80,19 @@ typedef struct DisasContext {
bool hlsx;
/* vector extension */
bool vill;
- uint8_t lmul;
+ /*
+ * Encode LMUL to lmul as follows:
+ * LMUL vlmul lmul
+ * 1 000 0
+ * 2 001 1
+ * 4 010 2
+ * 8 011 3
+ * - 100 -
+ * 1/8 101 -3
+ * 1/4 110 -2
+ * 1/2 111 -1
+ */
+ int8_t lmul;
uint8_t sew;
uint16_t vlen;
bool vl_eq_vlmax;
@@ -690,7 +702,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
- ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
+ ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->cs = cs;
@@ -86,9 +86,21 @@ static inline uint32_t vext_vm(uint32_t desc)
return FIELD_EX32(simd_data(desc), VDATA, VM);
}
-static inline uint32_t vext_lmul(uint32_t desc)
+/*
+ * Encode LMUL to lmul as following:
+ * LMUL vlmul lmul
+ * 1 000 0
+ * 2 001 1
+ * 4 010 2
+ * 8 011 3
+ * - 100 -
+ * 1/8 101 -3
+ * 1/4 110 -2
+ * 1/2 111 -1
+ */
+static inline int32_t vext_lmul(uint32_t desc)
{
- return FIELD_EX32(simd_data(desc), VDATA, LMUL);
+ return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
}
static uint32_t vext_wd(uint32_t desc)