diff mbox series

[v2] x86/sev: Make enc_dec_hypercall() accept a size instead of npages

Message ID 20230821225859.883120-1-srutherford@google.com (mailing list archive)
State New, archived
Headers show
Series [v2] x86/sev: Make enc_dec_hypercall() accept a size instead of npages | expand

Commit Message

Steve Rutherford Aug. 21, 2023, 10:58 p.m. UTC
enc_dec_hypercall() accepted a page count instead of a size, which
forced its callers to round up. As a result, non-page aligned
vaddrs caused pages to be spuriously marked as decrypted via the
encryption status hypercall, which in turn caused consistent
corruption of pages during live migration. Live migration requires
accurate encryption status information to avoid migrating pages
from the wrong perspective.

Fixes: 064ce6c550a0 ("mm: x86: Invoke hypercall when page encryption status is changed")
Signed-off-by: Steve Rutherford <srutherford@google.com>
---
 arch/x86/include/asm/mem_encrypt.h |  6 +++---
 arch/x86/kernel/kvm.c              |  4 +---
 arch/x86/mm/mem_encrypt_amd.c      | 13 ++++++-------
 3 files changed, 10 insertions(+), 13 deletions(-)

Comments

Tom Lendacky Aug. 22, 2023, 9:28 p.m. UTC | #1
On 8/21/23 17:58, Steve Rutherford wrote:
> enc_dec_hypercall() accepted a page count instead of a size, which
> forced its callers to round up. As a result, non-page aligned
> vaddrs caused pages to be spuriously marked as decrypted via the
> encryption status hypercall, which in turn caused consistent
> corruption of pages during live migration. Live migration requires
> accurate encryption status information to avoid migrating pages
> from the wrong perspective.
> 
> Fixes: 064ce6c550a0 ("mm: x86: Invoke hypercall when page encryption status is changed")
> Signed-off-by: Steve Rutherford <srutherford@google.com>

Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>

> ---
>   arch/x86/include/asm/mem_encrypt.h |  6 +++---
>   arch/x86/kernel/kvm.c              |  4 +---
>   arch/x86/mm/mem_encrypt_amd.c      | 13 ++++++-------
>   3 files changed, 10 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index 7f97a8a97e24..473b16d73b47 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -50,8 +50,8 @@ void __init sme_enable(struct boot_params *bp);
>   
>   int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
>   int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
> -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
> -					    bool enc);
> +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr,
> +					    unsigned long size, bool enc);
>   
>   void __init mem_encrypt_free_decrypted_mem(void);
>   
> @@ -85,7 +85,7 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
>   static inline int __init
>   early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
>   static inline void __init
> -early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
> +early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {}
>   
>   static inline void mem_encrypt_free_decrypted_mem(void) { }
>   
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 6a36db4f79fd..b8ab9ee5896c 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -966,10 +966,8 @@ static void __init kvm_init_platform(void)
>   		 * Ensure that _bss_decrypted section is marked as decrypted in the
>   		 * shared pages list.
>   		 */
> -		nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
> -					PAGE_SIZE);
>   		early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
> -						nr_pages, 0);
> +						__end_bss_decrypted - __start_bss_decrypted, 0);
>   
>   		/*
>   		 * If not booted using EFI, enable Live migration support.
> diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
> index 54bbd5163e8d..6faea41e99b6 100644
> --- a/arch/x86/mm/mem_encrypt_amd.c
> +++ b/arch/x86/mm/mem_encrypt_amd.c
> @@ -288,11 +288,10 @@ static bool amd_enc_cache_flush_required(void)
>   	return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
>   }
>   
> -static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
> +static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
>   {
>   #ifdef CONFIG_PARAVIRT
> -	unsigned long sz = npages << PAGE_SHIFT;
> -	unsigned long vaddr_end = vaddr + sz;
> +	unsigned long vaddr_end = vaddr + size;
>   
>   	while (vaddr < vaddr_end) {
>   		int psize, pmask, level;
> @@ -342,7 +341,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
>   		snp_set_memory_private(vaddr, npages);
>   
>   	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
> -		enc_dec_hypercall(vaddr, npages, enc);
> +		enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
>   
>   	return true;
>   }
> @@ -466,7 +465,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
>   
>   	ret = 0;
>   
> -	early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
> +	early_set_mem_enc_dec_hypercall(start, size, enc);
>   out:
>   	__flush_tlb_all();
>   	return ret;
> @@ -482,9 +481,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
>   	return early_set_memory_enc_dec(vaddr, size, true);
>   }
>   
> -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
> +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
>   {
> -	enc_dec_hypercall(vaddr, npages, enc);
> +	enc_dec_hypercall(vaddr, size, enc);
>   }
>   
>   void __init sme_early_init(void)
Gupta, Pankaj Aug. 24, 2023, 9:04 a.m. UTC | #2
On 8/22/2023 12:58 AM, Steve Rutherford wrote:
> enc_dec_hypercall() accepted a page count instead of a size, which
> forced its callers to round up. As a result, non-page aligned
> vaddrs caused pages to be spuriously marked as decrypted via the
> encryption status hypercall, which in turn caused consistent
> corruption of pages during live migration. Live migration requires
> accurate encryption status information to avoid migrating pages
> from the wrong perspective.
> 
> Fixes: 064ce6c550a0 ("mm: x86: Invoke hypercall when page encryption status is changed")
> Signed-off-by: Steve Rutherford <srutherford@google.com>
> ---
>   arch/x86/include/asm/mem_encrypt.h |  6 +++---
>   arch/x86/kernel/kvm.c              |  4 +---
>   arch/x86/mm/mem_encrypt_amd.c      | 13 ++++++-------
>   3 files changed, 10 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index 7f97a8a97e24..473b16d73b47 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -50,8 +50,8 @@ void __init sme_enable(struct boot_params *bp);
>   
>   int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
>   int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
> -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
> -					    bool enc);
> +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr,
> +					    unsigned long size, bool enc);
>   
>   void __init mem_encrypt_free_decrypted_mem(void);
>   
> @@ -85,7 +85,7 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
>   static inline int __init
>   early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
>   static inline void __init
> -early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
> +early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {}
>   
>   static inline void mem_encrypt_free_decrypted_mem(void) { }
>   
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 6a36db4f79fd..b8ab9ee5896c 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -966,10 +966,8 @@ static void __init kvm_init_platform(void)
>   		 * Ensure that _bss_decrypted section is marked as decrypted in the
>   		 * shared pages list.
>   		 */
> -		nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
> -					PAGE_SIZE);
>   		early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
> -						nr_pages, 0);
> +						__end_bss_decrypted - __start_bss_decrypted, 0);
>   
>   		/*
>   		 * If not booted using EFI, enable Live migration support.
> diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
> index 54bbd5163e8d..6faea41e99b6 100644
> --- a/arch/x86/mm/mem_encrypt_amd.c
> +++ b/arch/x86/mm/mem_encrypt_amd.c
> @@ -288,11 +288,10 @@ static bool amd_enc_cache_flush_required(void)
>   	return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
>   }
>   
> -static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
> +static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
>   {
>   #ifdef CONFIG_PARAVIRT
> -	unsigned long sz = npages << PAGE_SHIFT;
> -	unsigned long vaddr_end = vaddr + sz;
> +	unsigned long vaddr_end = vaddr + size;
>   
>   	while (vaddr < vaddr_end) {
>   		int psize, pmask, level;
> @@ -342,7 +341,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
>   		snp_set_memory_private(vaddr, npages);
>   
>   	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
> -		enc_dec_hypercall(vaddr, npages, enc);
> +		enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
>   
>   	return true;
>   }
> @@ -466,7 +465,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
>   
>   	ret = 0;
>   
> -	early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
> +	early_set_mem_enc_dec_hypercall(start, size, enc);
>   out:
>   	__flush_tlb_all();
>   	return ret;
> @@ -482,9 +481,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
>   	return early_set_memory_enc_dec(vaddr, size, true);
>   }
>   
> -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
> +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
>   {
> -	enc_dec_hypercall(vaddr, npages, enc);
> +	enc_dec_hypercall(vaddr, size, enc);
>   }
>   
>   void __init sme_early_init(void)

Also had this thought to avoid passing the page boundaries calculation 
with npages in-place of existing size based, but no strong opinions.

This seems even better. Thanks!

Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
Steve Rutherford Aug. 24, 2023, 10:37 p.m. UTC | #3
Reuploading v3 with `Cc: stable@vger.kernel.org`.


On Thu, Aug 24, 2023 at 2:04 AM Gupta, Pankaj <pankaj.gupta@amd.com> wrote:
>
> On 8/22/2023 12:58 AM, Steve Rutherford wrote:
> > enc_dec_hypercall() accepted a page count instead of a size, which
> > forced its callers to round up. As a result, non-page aligned
> > vaddrs caused pages to be spuriously marked as decrypted via the
> > encryption status hypercall, which in turn caused consistent
> > corruption of pages during live migration. Live migration requires
> > accurate encryption status information to avoid migrating pages
> > from the wrong perspective.
> >
> > Fixes: 064ce6c550a0 ("mm: x86: Invoke hypercall when page encryption status is changed")
> > Signed-off-by: Steve Rutherford <srutherford@google.com>
> > ---
> >   arch/x86/include/asm/mem_encrypt.h |  6 +++---
> >   arch/x86/kernel/kvm.c              |  4 +---
> >   arch/x86/mm/mem_encrypt_amd.c      | 13 ++++++-------
> >   3 files changed, 10 insertions(+), 13 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> > index 7f97a8a97e24..473b16d73b47 100644
> > --- a/arch/x86/include/asm/mem_encrypt.h
> > +++ b/arch/x86/include/asm/mem_encrypt.h
> > @@ -50,8 +50,8 @@ void __init sme_enable(struct boot_params *bp);
> >
> >   int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
> >   int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
> > -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
> > -                                         bool enc);
> > +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr,
> > +                                         unsigned long size, bool enc);
> >
> >   void __init mem_encrypt_free_decrypted_mem(void);
> >
> > @@ -85,7 +85,7 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
> >   static inline int __init
> >   early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
> >   static inline void __init
> > -early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
> > +early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {}
> >
> >   static inline void mem_encrypt_free_decrypted_mem(void) { }
> >
> > diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> > index 6a36db4f79fd..b8ab9ee5896c 100644
> > --- a/arch/x86/kernel/kvm.c
> > +++ b/arch/x86/kernel/kvm.c
> > @@ -966,10 +966,8 @@ static void __init kvm_init_platform(void)
> >                * Ensure that _bss_decrypted section is marked as decrypted in the
> >                * shared pages list.
> >                */
> > -             nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
> > -                                     PAGE_SIZE);
> >               early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
> > -                                             nr_pages, 0);
> > +                                             __end_bss_decrypted - __start_bss_decrypted, 0);
> >
> >               /*
> >                * If not booted using EFI, enable Live migration support.
> > diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
> > index 54bbd5163e8d..6faea41e99b6 100644
> > --- a/arch/x86/mm/mem_encrypt_amd.c
> > +++ b/arch/x86/mm/mem_encrypt_amd.c
> > @@ -288,11 +288,10 @@ static bool amd_enc_cache_flush_required(void)
> >       return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
> >   }
> >
> > -static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
> > +static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
> >   {
> >   #ifdef CONFIG_PARAVIRT
> > -     unsigned long sz = npages << PAGE_SHIFT;
> > -     unsigned long vaddr_end = vaddr + sz;
> > +     unsigned long vaddr_end = vaddr + size;
> >
> >       while (vaddr < vaddr_end) {
> >               int psize, pmask, level;
> > @@ -342,7 +341,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
> >               snp_set_memory_private(vaddr, npages);
> >
> >       if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
> > -             enc_dec_hypercall(vaddr, npages, enc);
> > +             enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
> >
> >       return true;
> >   }
> > @@ -466,7 +465,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
> >
> >       ret = 0;
> >
> > -     early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
> > +     early_set_mem_enc_dec_hypercall(start, size, enc);
> >   out:
> >       __flush_tlb_all();
> >       return ret;
> > @@ -482,9 +481,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
> >       return early_set_memory_enc_dec(vaddr, size, true);
> >   }
> >
> > -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
> > +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
> >   {
> > -     enc_dec_hypercall(vaddr, npages, enc);
> > +     enc_dec_hypercall(vaddr, size, enc);
> >   }
> >
> >   void __init sme_early_init(void)
>
> Also had this thought to avoid passing the page boundaries calculation
> with npages in-place of existing size based, but no strong opinions.
>
> This seems even better. Thanks!
>
> Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 7f97a8a97e24..473b16d73b47 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -50,8 +50,8 @@  void __init sme_enable(struct boot_params *bp);
 
 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
-void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
-					    bool enc);
+void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr,
+					    unsigned long size, bool enc);
 
 void __init mem_encrypt_free_decrypted_mem(void);
 
@@ -85,7 +85,7 @@  early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
 static inline int __init
 early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
 static inline void __init
-early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
+early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {}
 
 static inline void mem_encrypt_free_decrypted_mem(void) { }
 
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 6a36db4f79fd..b8ab9ee5896c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -966,10 +966,8 @@  static void __init kvm_init_platform(void)
 		 * Ensure that _bss_decrypted section is marked as decrypted in the
 		 * shared pages list.
 		 */
-		nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
-					PAGE_SIZE);
 		early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
-						nr_pages, 0);
+						__end_bss_decrypted - __start_bss_decrypted, 0);
 
 		/*
 		 * If not booted using EFI, enable Live migration support.
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 54bbd5163e8d..6faea41e99b6 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -288,11 +288,10 @@  static bool amd_enc_cache_flush_required(void)
 	return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
 }
 
-static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
+static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
 {
 #ifdef CONFIG_PARAVIRT
-	unsigned long sz = npages << PAGE_SHIFT;
-	unsigned long vaddr_end = vaddr + sz;
+	unsigned long vaddr_end = vaddr + size;
 
 	while (vaddr < vaddr_end) {
 		int psize, pmask, level;
@@ -342,7 +341,7 @@  static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
 		snp_set_memory_private(vaddr, npages);
 
 	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
-		enc_dec_hypercall(vaddr, npages, enc);
+		enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
 
 	return true;
 }
@@ -466,7 +465,7 @@  static int __init early_set_memory_enc_dec(unsigned long vaddr,
 
 	ret = 0;
 
-	early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
+	early_set_mem_enc_dec_hypercall(start, size, enc);
 out:
 	__flush_tlb_all();
 	return ret;
@@ -482,9 +481,9 @@  int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
 	return early_set_memory_enc_dec(vaddr, size, true);
 }
 
-void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
+void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
 {
-	enc_dec_hypercall(vaddr, npages, enc);
+	enc_dec_hypercall(vaddr, size, enc);
 }
 
 void __init sme_early_init(void)