@@ -3179,13 +3179,13 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
}
-struct page *__snp_safe_alloc_page(gfp_t gfp)
+struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
{
unsigned long pfn;
struct page *p;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
- return alloc_page(gfp | __GFP_ZERO);
+ return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
/*
* Allocate an SNP-safe page to workaround the SNP erratum where
@@ -3196,7 +3196,7 @@ struct page *__snp_safe_alloc_page(gfp_t gfp)
* Allocate one extra page, choose a page which is not
* 2MB-aligned, and free the other.
*/
- p = alloc_pages(gfp | __GFP_ZERO, 1);
+ p = alloc_pages_node(node, gfp | __GFP_ZERO, 1);
if (!p)
return NULL;
@@ -703,7 +703,7 @@ static int svm_cpu_init(int cpu)
int ret = -ENOMEM;
memset(sd, 0, sizeof(struct svm_cpu_data));
- sd->save_area = __snp_safe_alloc_page(GFP_KERNEL);
+ sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
if (!sd->save_area)
return ret;
@@ -695,11 +695,11 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
void sev_es_unmap_ghcb(struct vcpu_svm *svm);
-struct page *__snp_safe_alloc_page(gfp_t gfp);
+struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
static inline struct page *snp_safe_alloc_page(void)
{
- return __snp_safe_alloc_page(GFP_KERNEL_ACCOUNT);
+ return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
}
/* vmenter.S */
save_area of per-CPU svm_data is dominantly accessed from their own local CPUs, allocate them node-local for performance reason so rename __snp_safe_alloc_page as snp_safe_alloc_page_node which accepts numa node id as input parameter, svm_cpu_init calls it with node id switched from cpu id Signed-off-by: Li RongQing <lirongqing@baidu.com> --- arch/x86/kvm/svm/sev.c | 6 +++--- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/svm/svm.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-)