@@ -237,6 +237,13 @@ static inline int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
return 0;
}
+static inline int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
+ phys_addr_t fault_ipa,
+ struct kvm_s2_trans *trans)
+{
+ return 0;
+}
+
static inline void kvm_nested_s2_unmap(struct kvm_vcpu *vcpu) { }
static inline void kvm_nested_s2_free(struct kvm *kvm) { }
static inline void kvm_nested_s2_wp(struct kvm *kvm) { }
@@ -337,6 +337,8 @@ struct kvm_s2_trans {
void update_nested_s2_mmu(struct kvm_vcpu *vcpu);
int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
struct kvm_s2_trans *result);
+int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ struct kvm_s2_trans *trans);
void kvm_nested_s2_unmap(struct kvm_vcpu *vcpu);
void kvm_nested_s2_free(struct kvm *kvm);
void kvm_nested_s2_wp(struct kvm *kvm);
@@ -271,6 +271,28 @@ int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
return walk_nested_s2_pgd(vcpu, gipa, &wi, result);
}
+/*
+ * Returns non-zero if permission fault is handled by injecting it to the next
+ * level hypervisor.
+ */
+int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ struct kvm_s2_trans *trans)
+{
+ unsigned long fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
+ bool write_fault = kvm_is_write_fault(vcpu);
+
+ if (fault_status != FSC_PERM)
+ return 0;
+
+ if ((write_fault && !trans->writable) ||
+ (!write_fault && !trans->readable)) {
+ trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
+ return 1;
+ }
+
+ return 0;
+}
+
/* expects kvm->mmu_lock to be held */
void kvm_nested_s2_wp(struct kvm *kvm)
{
@@ -1591,6 +1591,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (ret)
goto out_unlock;
+ nested_trans.esr = 0;
+ ret = kvm_s2_handle_perm_fault(vcpu, fault_ipa, &nested_trans);
+ if (nested_trans.esr)
+ kvm_inject_s2_fault(vcpu, nested_trans.esr);
+ if (ret)
+ goto out_unlock;
+
ipa = nested_trans.output;
}