@@ -45,11 +45,6 @@ static inline uint8_t pmp_get_a_field(uint8_t cfg)
*/
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
{
- /* mseccfg.RLB is set */
- if (MSECCFG_RLB_ISSET(env)) {
- return 0;
- }
-
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
return 1;
}
@@ -62,6 +57,15 @@ static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
return 0;
}
+/*
+ * Check whether a PMP is locked for writing or not.
+ * (i.e. has LOCK flag and mseccfg.RLB is unset)
+ */
+static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index)
+{
+ return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env);
+}
+
/*
* Count the number of active rules.
*/
@@ -90,39 +94,40 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
- bool locked = true;
+ bool readonly = true;
if (riscv_cpu_cfg(env)->ext_smepmp) {
/* mseccfg.RLB is set */
if (MSECCFG_RLB_ISSET(env)) {
- locked = false;
+ readonly = false;
}
/* mseccfg.MML is not set */
- if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
- locked = false;
+ if (!MSECCFG_MML_ISSET(env) && !pmp_is_readonly(env, pmp_index)) {
+ readonly = false;
}
/* mseccfg.MML is set */
if (MSECCFG_MML_ISSET(env)) {
/* not adding execute bit */
if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
- locked = false;
+ readonly = false;
}
/* shared region and not adding X bit */
if ((val & PMP_LOCK) != PMP_LOCK &&
(val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
- locked = false;
+ readonly = false;
}
}
} else {
- if (!pmp_is_locked(env, pmp_index)) {
- locked = false;
+ if (!pmp_is_readonly(env, pmp_index)) {
+ readonly = false;
}
}
- if (locked) {
- qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
+ if (readonly) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpcfg write - read only\n");
} else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
/* If !mseccfg.MML then ignore writes with encoding RW=01 */
if ((val & PMP_WRITE) && !(val & PMP_READ) &&
@@ -524,14 +529,14 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
- if (pmp_is_locked(env, addr_index + 1) && is_next_cfg_tor) {
+ if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) {
qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpaddr write - pmpcfg + 1 locked\n");
+ "ignoring pmpaddr write - pmpcfg+1 read only\n");
return;
}
}
- if (!pmp_is_locked(env, addr_index)) {
+ if (!pmp_is_readonly(env, addr_index)) {
if (env->pmp_state.pmp[addr_index].addr_reg != val) {
env->pmp_state.pmp[addr_index].addr_reg = val;
pmp_update_rule_addr(env, addr_index);
@@ -542,7 +547,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpaddr write - locked\n");
+ "ignoring pmpaddr write - read only\n");
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
When Smepmp is supported, RLB allows bypassing locks when writing CSRs but should not affect interpretation of actual PMP rules. pmp_is_locked is changed to only check LOCK bit and a new pmp_is_readonly function is added that checks both LOCK bit and mseccfg.RLB. pmp_write_cfg and pmpaddr_csr_write are changed to use pmp_is_readonly while pmp_hart_has_privs keeps using pmp_is_locked. Signed-off-by: Loïc Lefort <loic@rivosinc.com> --- target/riscv/pmp.c | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-)