@@ -3378,6 +3378,21 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
if ((error_code & PFERR_WRITE_MASK) &&
spte_can_locklessly_be_made_writable(spte))
{
+ /*
+ * Record write protect fault caused by
+ * Sub-page Protection
+ */
+ if (spte & PT_SPP_MASK) {
+ fault_handled = true;
+
+ vcpu->run->exit_reason = KVM_EXIT_SPP;
+ vcpu->run->spp.addr = gva;
+ kvm_skip_emulated_instruction(vcpu);
+
+ /* Let QEMU decide how to handle this. */
+ break;
+ }
+
new_spte |= PT_WRITABLE_MASK;
/*
@@ -5343,6 +5358,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
r = vcpu->arch.mmu->page_fault(vcpu, cr2,
lower_32_bits(error_code),
false);
+
+ if (vcpu->run->exit_reason == KVM_EXIT_SPP)
+ return 0;
+
WARN_ON(r == RET_PF_INVALID);
}
@@ -26,6 +26,7 @@
#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
+#define PT_SPP_MASK (1ULL << 61)
#define PT64_NX_SHIFT 63
#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
@@ -235,6 +235,7 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_S390_STSI 25
#define KVM_EXIT_IOAPIC_EOI 26
#define KVM_EXIT_HYPERV 27
+#define KVM_EXIT_SPP 28
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -390,6 +391,10 @@ struct kvm_run {
struct {
__u8 vector;
} eoi;
+ /* KVM_EXIT_SPP */
+ struct {
+ __u64 addr;
+ } spp;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
/* Fix the size of the union. */