diff mbox series

[v3,3/3] KVM: SVM: Consider NUMA affinity when allocating per-CPU save_area

Message ID 20240520120858.13117-4-lirongqing@baidu.com (mailing list archive)
State New, archived
Headers show
Series KVM: SVM: refine snp_safe_alloc_page() implementation | expand

Commit Message

Li RongQing May 20, 2024, 12:08 p.m. UTC
save_area of per-CPU svm_data are dominantly accessed from their
own local CPUs, so allocate them node-local for performance reason

so rename __snp_safe_alloc_page as snp_safe_alloc_page_node which
accepts numa node id as input parameter, svm_cpu_init call it with
node id switched from cpu id

Signed-off-by: Li RongQing <lirongqing@baidu.com>
---
 arch/x86/kvm/svm/sev.c |  6 +++---
 arch/x86/kvm/svm/svm.c |  2 +-
 arch/x86/kvm/svm/svm.h | 10 +++++-----
 3 files changed, 9 insertions(+), 9 deletions(-)

Comments

Tom Lendacky May 20, 2024, 6:26 p.m. UTC | #1
On 5/20/24 07:08, Li RongQing wrote:
> save_area of per-CPU svm_data are dominantly accessed from their
> own local CPUs, so allocate them node-local for performance reason
> 
> so rename __snp_safe_alloc_page as snp_safe_alloc_page_node which
> accepts numa node id as input parameter, svm_cpu_init call it with
> node id switched from cpu id
> 
> Signed-off-by: Li RongQing <lirongqing@baidu.com>

Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>

> ---
>   arch/x86/kvm/svm/sev.c |  6 +++---
>   arch/x86/kvm/svm/svm.c |  2 +-
>   arch/x86/kvm/svm/svm.h | 10 +++++-----
>   3 files changed, 9 insertions(+), 9 deletions(-)
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 4d53478..1c55159 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3380,13 +3380,13 @@  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
 	}
 }
 
-struct page *__snp_safe_alloc_page(gfp_t gfp)
+struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
 {
 	unsigned long pfn;
 	struct page *p;
 
 	if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
-		return alloc_page(gfp | __GFP_ZERO);
+		return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
 
 	/*
 	 * Allocate an SNP-safe page to workaround the SNP erratum where
@@ -3397,7 +3397,7 @@  struct page *__snp_safe_alloc_page(gfp_t gfp)
 	 * Allocate one extra page, choose a page which is not
 	 * 2MB-aligned, and free the other.
 	 */
-	p = alloc_pages(gfp | __GFP_ZERO, 1);
+	p = alloc_pages_node(node, gfp | __GFP_ZERO, 1);
 	if (!p)
 		return NULL;
 
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index adbd676..da5cdde 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -703,7 +703,7 @@  static int svm_cpu_init(int cpu)
 	int ret = -ENOMEM;
 
 	memset(sd, 0, sizeof(struct svm_cpu_data));
-	sd->save_area = __snp_safe_alloc_page(GFP_KERNEL);
+	sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
 	if (!sd->save_area)
 		return ret;
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index e0a1258..8983eab 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -694,11 +694,11 @@  void sev_guest_memory_reclaimed(struct kvm *kvm);
 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
 
 /* These symbols are used in common code and are stubbed below.  */
-struct page *__snp_safe_alloc_page(gfp_t gfp);
 
+struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
 static inline struct page *snp_safe_alloc_page(void)
 {
-	return __snp_safe_alloc_page(GFP_KERNEL_ACCOUNT);
+	return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
 }
 
 void sev_free_vcpu(struct kvm_vcpu *vcpu);
@@ -710,14 +710,14 @@  int sev_cpu_init(struct svm_cpu_data *sd);
 int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
 extern unsigned int max_sev_asid;
 #else
-static inline struct page *__snp_safe_alloc_page(gfp_t gfp)
+static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
 {
-	return alloc_page(gfp | __GFP_ZERO);
+	return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
 }
 
 static inline struct page *snp_safe_alloc_page(void)
 {
-	return __snp_safe_alloc_page(GFP_KERNEL_ACCOUNT);
+	return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
 }
 
 static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}