diff mbox series

[v3,4/9] KVM: Add module param to make page tables NUMA aware

Message ID 20221222023457.1764-5-vipinsh@google.com (mailing list archive)
State New, archived
Headers show
Series NUMA aware page table's pages allocation | expand

Commit Message

Vipin Sharma Dec. 22, 2022, 2:34 a.m. UTC
Add a numa_aware_page_table module param to make page tables NUMA aware.

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 include/linux/kvm_host.h |  2 ++
 virt/kvm/kvm_main.c      | 22 ++++++++++++++++++++++
 2 files changed, 24 insertions(+)

Comments

David Matlack Dec. 29, 2022, 10:05 p.m. UTC | #1
On Wed, Dec 21, 2022 at 06:34:52PM -0800, Vipin Sharma wrote:
> Add a numa_aware_page_table module param to make page tables NUMA aware.

Generally it's not good practice to introduce dead code. So I would
request merging this with the next patch.
diff mbox series

Patch

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index efd9b38ea9a2..d48064503b88 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1358,6 +1358,8 @@  void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
 
 void kvm_flush_remote_tlbs(struct kvm *kvm);
 
+void *kvm_mmu_get_free_page(int nid, gfp_t gfp);
+
 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f2d762878b97..d96c8146e9ba 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -93,6 +93,13 @@  unsigned int halt_poll_ns_shrink;
 module_param(halt_poll_ns_shrink, uint, 0644);
 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
 
+/*
+ * If possible, allocate page table's pages on the same node the underlying
+ * physical page is pointing to.
+ */
+static bool __read_mostly numa_aware_pagetable = true;
+module_param_named(numa_aware_pagetable, numa_aware_pagetable, bool, 0644);
+
 /*
  * Ordering of locks:
  *
@@ -384,6 +391,21 @@  static void kvm_flush_shadow_all(struct kvm *kvm)
 	kvm_arch_guest_memory_reclaimed(kvm);
 }
 
+void *kvm_mmu_get_free_page(int nid, gfp_t gfp)
+{
+	#ifdef CONFIG_NUMA
+	struct page *spt_page;
+
+	if (numa_aware_pagetable) {
+		spt_page = alloc_pages_node(nid, gfp, 0);
+		if (spt_page)
+			return page_address(spt_page);
+	}
+	#endif // CONFIG_NUMA
+
+	return (void *)__get_free_page(gfp);
+}
+
 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
 					       gfp_t gfp_flags)