@@ -23,6 +23,9 @@ struct real_mode_header {
u32 trampoline_header;
#ifdef CONFIG_AMD_MEM_ENCRYPT
u32 sev_es_trampoline_start;
+ u32 sev_ap_park;
+ u32 sev_ap_park_seg;
+ u32 sev_ap_park_gdt;
#endif
#ifdef CONFIG_X86_64
u32 trampoline_start64;
@@ -35,6 +35,7 @@
#include <asm/fpu/xcr.h>
#include <asm/processor.h>
#include <asm/realmode.h>
+#include <asm/tlbflush.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/svm.h>
@@ -1147,8 +1148,9 @@ void __init snp_set_wakeup_secondary_cpu(void)
void __init sev_es_setup_ap_jump_table_data(void *base, u32 pa)
{
struct sev_ap_jump_table_header *header;
+ u64 *ap_jumptable_gdt, *sev_ap_park_gdt;
struct desc_ptr *gdt_descr;
- u64 *ap_jumptable_gdt;
+ int idx;
header = base;
@@ -1158,9 +1160,16 @@ void __init sev_es_setup_ap_jump_table_data(void *base, u32 pa)
* real-mode.
*/
ap_jumptable_gdt = (u64 *)(base + header->ap_jumptable_gdt);
- ap_jumptable_gdt[SEV_APJT_CS16 / 8] = GDT_ENTRY(0x9b, pa, 0xffff);
- ap_jumptable_gdt[SEV_APJT_DS16 / 8] = GDT_ENTRY(0x93, pa, 0xffff);
- ap_jumptable_gdt[SEV_RM_DS / 8] = GDT_ENTRY(0x93, 0, 0xffff);
+ sev_ap_park_gdt = __va(real_mode_header->sev_ap_park_gdt);
+
+ idx = SEV_APJT_CS16 / 8;
+ ap_jumptable_gdt[idx] = sev_ap_park_gdt[idx] = GDT_ENTRY(0x9b, pa, 0xffff);
+
+ idx = SEV_APJT_DS16 / 8;
+ ap_jumptable_gdt[idx] = sev_ap_park_gdt[idx] = GDT_ENTRY(0x93, pa, 0xffff);
+
+ idx = SEV_RM_DS / 8;
+ ap_jumptable_gdt[idx] = GDT_ENTRY(0x93, 0x0, 0xffff);
/* Write correct GDT base address into GDT descriptor */
gdt_descr = (struct desc_ptr *)(base + header->ap_jumptable_gdt);
@@ -1349,6 +1358,38 @@ void setup_ghcb(void)
}
#ifdef CONFIG_HOTPLUG_CPU
+void __noreturn sev_jumptable_ap_park(void)
+{
+ local_irq_disable();
+
+ write_cr3(real_mode_header->trampoline_pgd);
+
+ /* Exiting long mode will fail if CR4.PCIDE is set. */
+ if (cpu_feature_enabled(X86_FEATURE_PCID))
+ cr4_clear_bits(X86_CR4_PCIDE);
+
+ /*
+ * Set all GPRs except EAX, EBX, ECX, and EDX to reset state to prepare
+ * for software reset.
+ */
+ asm volatile("xorl %%r15d, %%r15d\n"
+ "xorl %%r14d, %%r14d\n"
+ "xorl %%r13d, %%r13d\n"
+ "xorl %%r12d, %%r12d\n"
+ "xorl %%r11d, %%r11d\n"
+ "xorl %%r10d, %%r10d\n"
+ "xorl %%r9d, %%r9d\n"
+ "xorl %%r8d, %%r8d\n"
+ "xorl %%esi, %%esi\n"
+ "xorl %%edi, %%edi\n"
+ "xorl %%esp, %%esp\n"
+ "xorl %%ebp, %%ebp\n"
+ "ljmpl *%0" : :
+ "m" (real_mode_header->sev_ap_park));
+ unreachable();
+}
+STACK_FRAME_NON_STANDARD(sev_jumptable_ap_park);
+
static void sev_es_ap_hlt_loop(void)
{
struct ghcb_state state;
@@ -1385,8 +1426,10 @@ static void sev_es_play_dead(void)
play_dead_common();
/* IRQs now disabled */
-
- sev_es_ap_hlt_loop();
+ if (sev_ap_jumptable_blob_installed)
+ sev_jumptable_ap_park();
+ else
+ sev_es_ap_hlt_loop();
/*
* If we get here, the VCPU was woken up again. Jump to CPU
@@ -19,11 +19,12 @@ wakeup-objs += video-vga.o
wakeup-objs += video-vesa.o
wakeup-objs += video-bios.o
-realmode-y += header.o
-realmode-y += trampoline_$(BITS).o
-realmode-y += stack.o
-realmode-y += reboot.o
-realmode-$(CONFIG_ACPI_SLEEP) += $(wakeup-objs)
+realmode-y += header.o
+realmode-y += trampoline_$(BITS).o
+realmode-y += stack.o
+realmode-y += reboot.o
+realmode-$(CONFIG_ACPI_SLEEP) += $(wakeup-objs)
+realmode-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o
targets += $(realmode-y)
@@ -22,6 +22,9 @@ SYM_DATA_START(real_mode_header)
.long pa_trampoline_header
#ifdef CONFIG_AMD_MEM_ENCRYPT
.long pa_sev_es_trampoline_start
+ .long pa_sev_ap_park_asm
+ .long __KERNEL32_CS
+ .long pa_sev_ap_park_gdt;
#endif
#ifdef CONFIG_X86_64
.long pa_trampoline_start64
new file mode 100644
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/page_types.h>
+#include <asm/processor-flags.h>
+#include <asm/msr-index.h>
+#include <asm/sev-ap-jumptable.h>
+#include "realmode.h"
+
+ .section ".text32", "ax"
+ .code32
+/*
+ * The following code switches to 16-bit protected mode and sets up the
+ * execution environment for the AP jump table blob. Then it jumps to the AP
+ * jump table to park the AP.
+ *
+ * The code was copied from reboot.S and modified to fit the SEV-ES requirements
+ * for AP parking. When this code is entered, all registers except %EAX-%EDX are
+ * in reset state.
+ *
+ * %EAX, %EBX, %ECX, %EDX and EFLAGS are undefined. Only use registers %EAX-%EDX and
+ * %ESP in this code.
+ */
+SYM_CODE_START(sev_ap_park_asm)
+
+ /* Switch to trampoline GDT as it is guaranteed < 4 GiB */
+ movl $__KERNEL_DS, %eax
+ movl %eax, %ds
+ lgdt pa_tr_gdt
+
+ /* Disable paging to drop us out of long mode */
+ movl %cr0, %eax
+ btcl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
+ ljmpl $__KERNEL32_CS, $pa_sev_ap_park_paging_off
+
+SYM_INNER_LABEL(sev_ap_park_paging_off, SYM_L_GLOBAL)
+ /* Clear EFER */
+ xorl %eax, %eax
+ xorl %edx, %edx
+ movl $MSR_EFER, %ecx
+ wrmsr
+
+ /* Clear CR3 */
+ xorl %ecx, %ecx
+ movl %ecx, %cr3
+
+ /* Set up the IDT for real mode. */
+ lidtl pa_machine_real_restart_idt
+
+ /* Load the GDT with the 16-bit segments for the AP jump table */
+ lgdtl pa_sev_ap_park_gdt
+
+ /* Setup code and data segments for AP jump table */
+ movw $SEV_APJT_DS16, %ax
+ movw %ax, %ds
+ movw %ax, %ss
+
+ /* Jump to the AP jump table into 16 bit protected mode */
+ ljmpw $SEV_APJT_CS16, $SEV_APJT_ENTRY
+SYM_CODE_END(sev_ap_park_asm)
+
+ .data
+ .balign 16
+SYM_DATA_START(sev_ap_park_gdt)
+ /* Self-pointer */
+ .word sev_ap_park_gdt_end - sev_ap_park_gdt - 1
+ .long pa_sev_ap_park_gdt
+ .word 0
+
+ /*
+ * Offset 0x8
+ * 32 bit code segment descriptor pointing to AP jump table base
+ * Setup at runtime in sev_es_setup_ap_jump_table_data().
+ */
+ .quad 0
+
+ /*
+ * Offset 0x10
+ * 32 bit data segment descriptor pointing to AP jump table base
+ * Setup at runtime in sev_es_setup_ap_jump_table_data().
+ */
+ .quad 0
+SYM_DATA_END_LABEL(sev_ap_park_gdt, SYM_L_GLOBAL, sev_ap_park_gdt_end)