@@ -209,6 +209,7 @@ struct CPURISCVState {
uint64_t htimedelta;
/* Hypervisor controlled virtual interrupt priorities */
+ target_ulong hvictl;
uint8_t hviprio[64];
/* Upper 64-bits of 128-bit CSRs */
@@ -512,6 +513,7 @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
return env->misa_mxl;
}
#endif
+#define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
#if defined(TARGET_RISCV32)
#define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
@@ -234,6 +234,15 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
+static int aia_hmode(CPURISCVState *env, int csrno)
+{
+ if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return hmode(env, csrno);
+}
+
static int aia_hmode32(CPURISCVState *env, int csrno)
{
if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
@@ -1142,6 +1151,9 @@ static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
if (riscv_cpu_virt_enabled(env)) {
+ if (env->hvictl & HVICTL_VTI) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
} else {
ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
@@ -1355,6 +1367,9 @@ static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
uint64_t mask = env->mideleg & sip_writable_mask;
if (riscv_cpu_virt_enabled(env)) {
+ if (env->hvictl & HVICTL_VTI) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
} else {
ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
@@ -1741,6 +1756,110 @@ static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->hvictl;
+ return RISCV_EXCP_NONE;
+}
+
+static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->hvictl = val & HVICTL_VALID_MASK;
+ return RISCV_EXCP_NONE;
+}
+
+static int read_hvipriox(CPURISCVState *env, int first_index,
+ uint8_t *iprio, target_ulong *val)
+{
+ int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
+
+ /* First index has to be a multiple of number of irqs per register */
+ if (first_index % num_irqs) {
+ return (riscv_cpu_virt_enabled(env)) ?
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ /* Fill-up return value */
+ *val = 0;
+ for (i = 0; i < num_irqs; i++) {
+ if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
+ continue;
+ }
+ if (rdzero) {
+ continue;
+ }
+ *val |= ((target_ulong)iprio[irq]) << (i * 8);
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static int write_hvipriox(CPURISCVState *env, int first_index,
+ uint8_t *iprio, target_ulong val)
+{
+ int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
+
+ /* First index has to be a multiple of number of irqs per register */
+ if (first_index % num_irqs) {
+ return (riscv_cpu_virt_enabled(env)) ?
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ /* Fill-up priority arrary */
+ for (i = 0; i < num_irqs; i++) {
+ if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
+ continue;
+ }
+ if (rdzero) {
+ iprio[irq] = 0;
+ } else {
+ iprio[irq] = (val >> (i * 8)) & 0xff;
+ }
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ return read_hvipriox(env, 0, env->hviprio, val);
+}
+
+static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
+{
+ return write_hvipriox(env, 0, env->hviprio, val);
+}
+
+static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ return read_hvipriox(env, 4, env->hviprio, val);
+}
+
+static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
+{
+ return write_hvipriox(env, 4, env->hviprio, val);
+}
+
+static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ return read_hvipriox(env, 8, env->hviprio, val);
+}
+
+static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
+{
+ return write_hvipriox(env, 8, env->hviprio, val);
+}
+
+static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ return read_hvipriox(env, 12, env->hviprio, val);
+}
+
+static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
+{
+ return write_hvipriox(env, 12, env->hviprio, val);
+}
+
/* Virtual CSR Registers */
static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
target_ulong *val)
@@ -2534,9 +2653,16 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2 },
[CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst },
+ /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
+ [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, write_hvictl },
+ [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, write_hviprio1 },
+ [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, write_hviprio2 },
+
/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
[CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, rmw_hidelegh },
[CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
+ [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, write_hviprio1h },
+ [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, write_hviprio2h },
[CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
[CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
@@ -92,6 +92,8 @@ static const VMStateDescription vmstate_hyper = {
VMSTATE_UINTTL(env.hgeie, RISCVCPU),
VMSTATE_UINTTL(env.hgeip, RISCVCPU),
VMSTATE_UINT64(env.htimedelta, RISCVCPU),
+
+ VMSTATE_UINTTL(env.hvictl, RISCVCPU),
VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64),
VMSTATE_UINT64(env.vsstatus, RISCVCPU),