@@ -207,37 +207,18 @@ static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
- if (is_tdp_mmu_active(vcpu)) {
+ if (is_tdp_mmu_active(vcpu))
kvm_tdp_mmu_walk_lockless_begin();
- } else {
- /*
- * Prevent page table teardown by making any free-er wait during
- * kvm_flush_remote_tlbs() IPI to all active vcpus.
- */
- local_irq_disable();
-
- /*
- * Make sure a following spte read is not reordered ahead of the write
- * to vcpu->mode.
- */
- smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
- }
+ else
+ kvm_shadow_mmu_walk_lockless_begin(vcpu);
}
void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
- if (is_tdp_mmu_active(vcpu)) {
+ if (is_tdp_mmu_active(vcpu))
kvm_tdp_mmu_walk_lockless_end();
- } else {
- /*
- * Make sure the write to vcpu->mode is not reordered in front
- * of reads to sptes. If it does,
- * kvm_shadow_mmu_commit_zap_page() can see us
- * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
- */
- smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
- local_irq_enable();
- }
+ else
+ kvm_shadow_mmu_walk_lockless_end(vcpu);
}
int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
@@ -3663,3 +3663,30 @@ void kvm_mmu_uninit_shadow_mmu(struct kvm *kvm)
kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
}
+
+void kvm_shadow_mmu_walk_lockless_begin(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Prevent page table teardown by making any free-er wait during
+ * kvm_flush_remote_tlbs() IPI to all active vcpus.
+ */
+ local_irq_disable();
+
+ /*
+ * Make sure a following spte read is not reordered ahead of the write
+ * to vcpu->mode.
+ */
+ smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
+}
+
+void kvm_shadow_mmu_walk_lockless_end(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Make sure the write to vcpu->mode is not reordered in front
+ * of reads to sptes. If it does,
+ * kvm_shadow_mmu_commit_zap_page() can see us
+ * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
+ */
+ smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
+ local_irq_enable();
+}
@@ -103,6 +103,9 @@ void kvm_shadow_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_init_shadow_mmu(struct kvm *kvm);
void kvm_mmu_uninit_shadow_mmu(struct kvm *kvm);
+void kvm_shadow_mmu_walk_lockless_begin(struct kvm_vcpu *vcpu);
+void kvm_shadow_mmu_walk_lockless_end(struct kvm_vcpu *vcpu);
+
/* Exports from paging_tmpl.h */
gpa_t paging32_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t vaddr, u64 access,
Split out the meat of kvm_shadow_mmu_walk_lockless_begin/end() to functions in shadow_mmu.c since there's no need for it in the common MMU code. Suggested-by: David Matlack <dmatlack@google.com> Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/mmu.c | 31 ++++++------------------------- arch/x86/kvm/mmu/shadow_mmu.c | 27 +++++++++++++++++++++++++++ arch/x86/kvm/mmu/shadow_mmu.h | 3 +++ 3 files changed, 36 insertions(+), 25 deletions(-)