diff mbox series

[RFC,v7,23/64] x86/fault: Add support to dump RMP entry on fault

Message ID 20221214194056.161492-24-michael.roth@amd.com (mailing list archive)
State New
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth Dec. 14, 2022, 7:40 p.m. UTC
From: Brijesh Singh <brijesh.singh@amd.com>

When SEV-SNP is enabled globally, a write from the host goes through the
RMP check. If the hardware encounters the check failure, then it raises
the #PF (with RMP set). Dump the RMP entry at the faulting pfn to help
the debug.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/include/asm/sev.h |  2 ++
 arch/x86/kernel/sev.c      | 43 ++++++++++++++++++++++++++++++++++++++
 arch/x86/mm/fault.c        |  7 ++++++-
 3 files changed, 51 insertions(+), 1 deletion(-)

Comments

Alper Gun Jan. 13, 2023, 10:56 p.m. UTC | #1
On Wed, Dec 14, 2022 at 11:52 AM Michael Roth <michael.roth@amd.com> wrote:
>
> From: Brijesh Singh <brijesh.singh@amd.com>
>
> When SEV-SNP is enabled globally, a write from the host goes through the
> RMP check. If the hardware encounters the check failure, then it raises
> the #PF (with RMP set). Dump the RMP entry at the faulting pfn to help
> the debug.
>
> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> Signed-off-by: Michael Roth <michael.roth@amd.com>
> ---
>  arch/x86/include/asm/sev.h |  2 ++
>  arch/x86/kernel/sev.c      | 43 ++++++++++++++++++++++++++++++++++++++
>  arch/x86/mm/fault.c        |  7 ++++++-
>  3 files changed, 51 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
> index 4eeedcaca593..2916f4150ac7 100644
> --- a/arch/x86/include/asm/sev.h
> +++ b/arch/x86/include/asm/sev.h
> @@ -215,6 +215,7 @@ int snp_lookup_rmpentry(u64 pfn, int *level);
>  int psmash(u64 pfn);
>  int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int asid, bool immutable);
>  int rmp_make_shared(u64 pfn, enum pg_level level);
> +void sev_dump_rmpentry(u64 pfn);
>  #else
>  static inline void sev_es_ist_enter(struct pt_regs *regs) { }
>  static inline void sev_es_ist_exit(void) { }
> @@ -247,6 +248,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int as
>         return -ENODEV;
>  }
>  static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
> +static inline void sev_dump_rmpentry(u64 pfn) {}
>  #endif
>
>  #endif
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index e2b38c3551be..1dd1b36bdfea 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -2508,6 +2508,49 @@ static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
>         return entry;
>  }
>
> +void sev_dump_rmpentry(u64 pfn)
> +{
> +       unsigned long pfn_end;
> +       struct rmpentry *e;
> +       int level;
> +
> +       e = __snp_lookup_rmpentry(pfn, &level);
> +       if (!e) {
if (IS_ERR(e)) {

> +               pr_info("failed to read RMP entry pfn 0x%llx\n", pfn);
> +               return;
> +       }
> +
> +       if (rmpentry_assigned(e)) {
> +               pr_info("RMPEntry paddr 0x%llx [assigned=%d immutable=%d pagesize=%d gpa=0x%lx"
> +                       " asid=%d vmsa=%d validated=%d]\n", pfn << PAGE_SHIFT,
> +                       rmpentry_assigned(e), e->info.immutable, rmpentry_pagesize(e),
> +                       (unsigned long)e->info.gpa, e->info.asid, e->info.vmsa,
> +                       e->info.validated);
> +               return;
> +       }
> +
> +       /*
> +        * If the RMP entry at the faulting pfn was not assigned, then not sure
> +        * what caused the RMP violation. To get some useful debug information,
> +        * iterate through the entire 2MB region, and dump the RMP entries if
> +        * one of the bit in the RMP entry is set.
> +        */
> +       pfn = pfn & ~(PTRS_PER_PMD - 1);
> +       pfn_end = pfn + PTRS_PER_PMD;
> +
> +       while (pfn < pfn_end) {
> +               e = __snp_lookup_rmpentry(pfn, &level);
> +               if (!e)
> +                       return;
if (IS_ERR(e))
      continue;
> +
> +               if (e->low || e->high)
> +                       pr_info("RMPEntry paddr 0x%llx: [high=0x%016llx low=0x%016llx]\n",
> +                               pfn << PAGE_SHIFT, e->high, e->low);
> +               pfn++;
> +       }
> +}
> +EXPORT_SYMBOL_GPL(sev_dump_rmpentry);
> +
>  /*
>   * Return 1 if the RMP entry is assigned, 0 if it exists but is not assigned,
>   * and -errno if there is no corresponding RMP entry.
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index ded53879f98d..f2b16dcfbd9a 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -536,6 +536,8 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
>  static void
>  show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
>  {
> +       unsigned long pfn;
> +
>         if (!oops_may_print())
>                 return;
>
> @@ -608,7 +610,10 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad
>                 show_ldttss(&gdt, "TR", tr);
>         }
>
> -       dump_pagetable(address);
> +       pfn = dump_pagetable(address);
> +
> +       if (error_code & X86_PF_RMP)
> +               sev_dump_rmpentry(pfn);
>  }
>
>  static noinline void
> --
> 2.25.1
>
Kalra, Ashish Jan. 13, 2023, 11:49 p.m. UTC | #2
On 1/13/2023 4:56 PM, Alper Gun wrote:
> On Wed, Dec 14, 2022 at 11:52 AM Michael Roth <michael.roth@amd.com> wrote:
>>
>> From: Brijesh Singh <brijesh.singh@amd.com>
>>
>> When SEV-SNP is enabled globally, a write from the host goes through the
>> RMP check. If the hardware encounters the check failure, then it raises
>> the #PF (with RMP set). Dump the RMP entry at the faulting pfn to help
>> the debug.
>>
>> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
>> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
>> Signed-off-by: Michael Roth <michael.roth@amd.com>
>> ---
>>   arch/x86/include/asm/sev.h |  2 ++
>>   arch/x86/kernel/sev.c      | 43 ++++++++++++++++++++++++++++++++++++++
>>   arch/x86/mm/fault.c        |  7 ++++++-
>>   3 files changed, 51 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
>> index 4eeedcaca593..2916f4150ac7 100644
>> --- a/arch/x86/include/asm/sev.h
>> +++ b/arch/x86/include/asm/sev.h
>> @@ -215,6 +215,7 @@ int snp_lookup_rmpentry(u64 pfn, int *level);
>>   int psmash(u64 pfn);
>>   int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int asid, bool immutable);
>>   int rmp_make_shared(u64 pfn, enum pg_level level);
>> +void sev_dump_rmpentry(u64 pfn);
>>   #else
>>   static inline void sev_es_ist_enter(struct pt_regs *regs) { }
>>   static inline void sev_es_ist_exit(void) { }
>> @@ -247,6 +248,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int as
>>          return -ENODEV;
>>   }
>>   static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
>> +static inline void sev_dump_rmpentry(u64 pfn) {}
>>   #endif
>>
>>   #endif
>> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
>> index e2b38c3551be..1dd1b36bdfea 100644
>> --- a/arch/x86/kernel/sev.c
>> +++ b/arch/x86/kernel/sev.c
>> @@ -2508,6 +2508,49 @@ static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
>>          return entry;
>>   }
>>
>> +void sev_dump_rmpentry(u64 pfn)
>> +{
>> +       unsigned long pfn_end;
>> +       struct rmpentry *e;
>> +       int level;
>> +
>> +       e = __snp_lookup_rmpentry(pfn, &level);
>> +       if (!e) {
> if (IS_ERR(e)) {
> 

Yes, this needs to be fixed to IS_ERR(e)

>> +               pr_info("failed to read RMP entry pfn 0x%llx\n", pfn);
>> +               return;
>> +       }
>> +
>> +       if (rmpentry_assigned(e)) {
>> +               pr_info("RMPEntry paddr 0x%llx [assigned=%d immutable=%d pagesize=%d gpa=0x%lx"
>> +                       " asid=%d vmsa=%d validated=%d]\n", pfn << PAGE_SHIFT,
>> +                       rmpentry_assigned(e), e->info.immutable, rmpentry_pagesize(e),
>> +                       (unsigned long)e->info.gpa, e->info.asid, e->info.vmsa,
>> +                       e->info.validated);
>> +               return;
>> +       }
>> +
>> +       /*
>> +        * If the RMP entry at the faulting pfn was not assigned, then not sure
>> +        * what caused the RMP violation. To get some useful debug information,
>> +        * iterate through the entire 2MB region, and dump the RMP entries if
>> +        * one of the bit in the RMP entry is set.
>> +        */
>> +       pfn = pfn & ~(PTRS_PER_PMD - 1);
>> +       pfn_end = pfn + PTRS_PER_PMD;
>> +
>> +       while (pfn < pfn_end) {
>> +               e = __snp_lookup_rmpentry(pfn, &level);
>> +               if (!e)
>> +                       return;
> if (IS_ERR(e))
>        continue;

Again, this is correct, but then it should be :

if (IS_ERR(e)) {
	pfn++;
	continue;
}

Thanks,
Ashish

>> +
>> +               if (e->low || e->high)
>> +                       pr_info("RMPEntry paddr 0x%llx: [high=0x%016llx low=0x%016llx]\n",
>> +                               pfn << PAGE_SHIFT, e->high, e->low);
>> +               pfn++;
>> +       }
>> +}
>> +EXPORT_SYMBOL_GPL(sev_dump_rmpentry);
>> +
>>   /*
>>    * Return 1 if the RMP entry is assigned, 0 if it exists but is not assigned,
>>    * and -errno if there is no corresponding RMP entry.
>> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
>> index ded53879f98d..f2b16dcfbd9a 100644
>> --- a/arch/x86/mm/fault.c
>> +++ b/arch/x86/mm/fault.c
>> @@ -536,6 +536,8 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
>>   static void
>>   show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
>>   {
>> +       unsigned long pfn;
>> +
>>          if (!oops_may_print())
>>                  return;
>>
>> @@ -608,7 +610,10 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad
>>                  show_ldttss(&gdt, "TR", tr);
>>          }
>>
>> -       dump_pagetable(address);
>> +       pfn = dump_pagetable(address);
>> +
>> +       if (error_code & X86_PF_RMP)
>> +               sev_dump_rmpentry(pfn);
>>   }
>>
>>   static noinline void
>> --
>> 2.25.1
>>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 4eeedcaca593..2916f4150ac7 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -215,6 +215,7 @@  int snp_lookup_rmpentry(u64 pfn, int *level);
 int psmash(u64 pfn);
 int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int asid, bool immutable);
 int rmp_make_shared(u64 pfn, enum pg_level level);
+void sev_dump_rmpentry(u64 pfn);
 #else
 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
 static inline void sev_es_ist_exit(void) { }
@@ -247,6 +248,7 @@  static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int as
 	return -ENODEV;
 }
 static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
+static inline void sev_dump_rmpentry(u64 pfn) {}
 #endif
 
 #endif
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index e2b38c3551be..1dd1b36bdfea 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -2508,6 +2508,49 @@  static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
 	return entry;
 }
 
+void sev_dump_rmpentry(u64 pfn)
+{
+	unsigned long pfn_end;
+	struct rmpentry *e;
+	int level;
+
+	e = __snp_lookup_rmpentry(pfn, &level);
+	if (!e) {
+		pr_info("failed to read RMP entry pfn 0x%llx\n", pfn);
+		return;
+	}
+
+	if (rmpentry_assigned(e)) {
+		pr_info("RMPEntry paddr 0x%llx [assigned=%d immutable=%d pagesize=%d gpa=0x%lx"
+			" asid=%d vmsa=%d validated=%d]\n", pfn << PAGE_SHIFT,
+			rmpentry_assigned(e), e->info.immutable, rmpentry_pagesize(e),
+			(unsigned long)e->info.gpa, e->info.asid, e->info.vmsa,
+			e->info.validated);
+		return;
+	}
+
+	/*
+	 * If the RMP entry at the faulting pfn was not assigned, then not sure
+	 * what caused the RMP violation. To get some useful debug information,
+	 * iterate through the entire 2MB region, and dump the RMP entries if
+	 * one of the bit in the RMP entry is set.
+	 */
+	pfn = pfn & ~(PTRS_PER_PMD - 1);
+	pfn_end = pfn + PTRS_PER_PMD;
+
+	while (pfn < pfn_end) {
+		e = __snp_lookup_rmpentry(pfn, &level);
+		if (!e)
+			return;
+
+		if (e->low || e->high)
+			pr_info("RMPEntry paddr 0x%llx: [high=0x%016llx low=0x%016llx]\n",
+				pfn << PAGE_SHIFT, e->high, e->low);
+		pfn++;
+	}
+}
+EXPORT_SYMBOL_GPL(sev_dump_rmpentry);
+
 /*
  * Return 1 if the RMP entry is assigned, 0 if it exists but is not assigned,
  * and -errno if there is no corresponding RMP entry.
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index ded53879f98d..f2b16dcfbd9a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -536,6 +536,8 @@  static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
 static void
 show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 {
+	unsigned long pfn;
+
 	if (!oops_may_print())
 		return;
 
@@ -608,7 +610,10 @@  show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad
 		show_ldttss(&gdt, "TR", tr);
 	}
 
-	dump_pagetable(address);
+	pfn = dump_pagetable(address);
+
+	if (error_code & X86_PF_RMP)
+		sev_dump_rmpentry(pfn);
 }
 
 static noinline void