@@ -221,6 +221,12 @@ static inline unsigned int kvm_get_vmid_bits(void)
return 8;
}
+static inline void kvm_nested_s2_unmap(struct kvm_vcpu *vcpu) { }
+static inline void kvm_nested_s2_free(struct kvm *kvm) { }
+static inline void kvm_nested_s2_wp(struct kvm *kvm) { }
+static inline void kvm_nested_s2_clear(struct kvm *kvm) { }
+static inline void kvm_nested_s2_flush(struct kvm *kvm) { }
+
static inline u64 kvm_get_vttbr(struct kvm_s2_vmid *vmid,
struct kvm_s2_mmu *mmu)
{
@@ -325,6 +325,11 @@ static inline unsigned int kvm_get_vmid_bits(void)
struct kvm_nested_s2_mmu *get_nested_mmu(struct kvm_vcpu *vcpu, u64 vttbr);
struct kvm_s2_mmu *vcpu_get_active_s2_mmu(struct kvm_vcpu *vcpu);
void update_nested_s2_mmu(struct kvm_vcpu *vcpu);
+void kvm_nested_s2_unmap(struct kvm_vcpu *vcpu);
+void kvm_nested_s2_free(struct kvm *kvm);
+void kvm_nested_s2_wp(struct kvm *kvm);
+void kvm_nested_s2_clear(struct kvm *kvm);
+void kvm_nested_s2_flush(struct kvm *kvm);
static inline u64 kvm_get_vttbr(struct kvm_s2_vmid *vmid,
struct kvm_s2_mmu *mmu)
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2017 - Columbia University and Linaro Ltd.
* Author: Jintack Lim <jintack.lim@linaro.org>
+ * Author: Christoffer Dall <cdall@cs.columbia.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -21,6 +22,45 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_wp(struct kvm *kvm)
+{
+ struct kvm_nested_s2_mmu *nested_mmu;
+ struct list_head *nested_mmu_list = &kvm->arch.nested_mmu_list;
+
+ list_for_each_entry_rcu(nested_mmu, nested_mmu_list, list)
+ kvm_stage2_wp_range(kvm, &nested_mmu->mmu, 0, KVM_PHYS_SIZE);
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_clear(struct kvm *kvm)
+{
+ struct kvm_nested_s2_mmu *nested_mmu;
+ struct list_head *nested_mmu_list = &kvm->arch.nested_mmu_list;
+
+ list_for_each_entry_rcu(nested_mmu, nested_mmu_list, list)
+ kvm_unmap_stage2_range(kvm, &nested_mmu->mmu, 0, KVM_PHYS_SIZE);
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_flush(struct kvm *kvm)
+{
+ struct kvm_nested_s2_mmu *nested_mmu;
+ struct list_head *nested_mmu_list = &kvm->arch.nested_mmu_list;
+
+ list_for_each_entry_rcu(nested_mmu, nested_mmu_list, list)
+ kvm_stage2_flush_range(&nested_mmu->mmu, 0, KVM_PHYS_SIZE);
+}
+
+void kvm_nested_s2_free(struct kvm *kvm)
+{
+ struct kvm_nested_s2_mmu *nested_mmu;
+ struct list_head *nested_mmu_list = &kvm->arch.nested_mmu_list;
+
+ list_for_each_entry_rcu(nested_mmu, nested_mmu_list, list)
+ __kvm_free_stage2_pgd(kvm, &nested_mmu->mmu);
+}
+
static struct kvm_nested_s2_mmu *lookup_nested_mmu(struct kvm_vcpu *vcpu,
u64 vttbr)
{
@@ -187,6 +187,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
free_percpu(kvm->arch.last_vcpu_ran);
kvm->arch.last_vcpu_ran = NULL;
+ kvm_nested_s2_free(kvm);
+
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -926,8 +928,10 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* Ensure a rebooted VM will fault in RAM pages and detect if the
* guest MMU is turned off and flush the caches as needed.
*/
- if (vcpu->arch.has_run_once)
+ if (vcpu->arch.has_run_once) {
stage2_unmap_vm(vcpu->kvm);
+ kvm_nested_s2_clear(vcpu->kvm);
+ }
vcpu_reset_hcr(vcpu);
@@ -434,6 +434,8 @@ static void stage2_flush_vm(struct kvm *kvm)
kvm_for_each_memslot(memslot, slots)
stage2_flush_memslot(&kvm->arch.mmu, memslot);
+ kvm_nested_s2_flush(kvm);
+
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1268,6 +1270,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
spin_lock(&kvm->mmu_lock);
kvm_stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
+ kvm_nested_s2_wp(kvm);
spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);
}
@@ -1306,6 +1309,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
gfn_t gfn_offset, unsigned long mask)
{
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+ kvm_nested_s2_wp(kvm);
}
static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
@@ -1643,6 +1647,7 @@ static int handle_hva_to_gpa(struct kvm *kvm,
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
kvm_unmap_stage2_range(kvm, &kvm->arch.mmu, gpa, size);
+ kvm_nested_s2_clear(kvm);
return 0;
}
@@ -1682,6 +1687,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
* through this calling path.
*/
stage2_set_pte(&kvm->arch.mmu, NULL, gpa, pte, 0);
+ kvm_nested_s2_clear(kvm);
return 0;
}
@@ -1716,6 +1722,11 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
if (pte_none(*pte))
return 0;
+ /*
+ * TODO: Handle nested_mmu structures here using the reverse mapping in
+ * a later version of patch series.
+ */
+
return stage2_ptep_test_and_clear_young(pte);
}
@@ -1736,6 +1747,11 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *
if (!pte_none(*pte)) /* Just a page... */
return pte_young(*pte);
+ /*
+ * TODO: Handle nested_mmu structures here using the reverse mapping in
+ * a later version of patch series.
+ */
+
return 0;
}
@@ -1992,6 +2008,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
kvm_unmap_stage2_range(kvm, &kvm->arch.mmu, gpa, size);
+ kvm_nested_s2_clear(kvm);
spin_unlock(&kvm->mmu_lock);
}