@@ -1358,6 +1358,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
void kvm_flush_remote_tlbs(struct kvm *kvm);
+void *kvm_mmu_get_free_page(int nid, gfp_t gfp);
+
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
@@ -93,6 +93,13 @@ unsigned int halt_poll_ns_shrink;
module_param(halt_poll_ns_shrink, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
+/*
+ * If possible, allocate page table's pages on the same node the underlying
+ * physical page is pointing to.
+ */
+static bool __read_mostly numa_aware_pagetable = true;
+module_param_named(numa_aware_pagetable, numa_aware_pagetable, bool, 0644);
+
/*
* Ordering of locks:
*
@@ -384,6 +391,21 @@ static void kvm_flush_shadow_all(struct kvm *kvm)
kvm_arch_guest_memory_reclaimed(kvm);
}
+void *kvm_mmu_get_free_page(int nid, gfp_t gfp)
+{
+ #ifdef CONFIG_NUMA
+ struct page *spt_page;
+
+ if (numa_aware_pagetable) {
+ spt_page = alloc_pages_node(nid, gfp, 0);
+ if (spt_page)
+ return page_address(spt_page);
+ }
+ #endif // CONFIG_NUMA
+
+ return (void *)__get_free_page(gfp);
+}
+
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
Add a numa_aware_page_table module param to make page tables NUMA aware. Signed-off-by: Vipin Sharma <vipinsh@google.com> --- include/linux/kvm_host.h | 2 ++ virt/kvm/kvm_main.c | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+)