@@ -70,6 +70,7 @@
/* GHCBData[63:12] */ \
(((u64)(v) & GENMASK_ULL(63, 12)) >> 12)
+#ifndef __ASSEMBLY__
/*
* SNP Page State Change Operation
*
@@ -160,6 +161,8 @@ struct snp_psc_desc {
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
+#endif /* __ASSEMBLY__ */
+
/*
* Error codes related to GHCB input that can be communicated back to the guest
* by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
@@ -135,6 +135,10 @@ struct snp_secrets_page_layout {
#ifdef CONFIG_AMD_MEM_ENCRYPT
extern struct static_key_false sev_es_enable_key;
+static inline bool sev_es_active(void)
+{
+ return static_branch_unlikely(&sev_es_enable_key);
+}
extern void __sev_es_ist_enter(struct pt_regs *regs);
extern void __sev_es_ist_exit(void);
static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
@@ -198,6 +202,7 @@ bool snp_init(struct boot_params *bp);
void __init __noreturn snp_abort(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
#else
+static inline bool sev_es_active(void) { return false; }
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
@@ -204,7 +204,10 @@ extern unsigned int smpboot_control;
/* Control bits for startup_64 */
#define STARTUP_APICID_CPUID_0B 0x80000000
#define STARTUP_APICID_CPUID_01 0x40000000
+#define STARTUP_APICID_SEV_ES 0x20000000
-#define STARTUP_PARALLEL_MASK (STARTUP_APICID_CPUID_01 | STARTUP_APICID_CPUID_0B)
+#define STARTUP_PARALLEL_MASK (STARTUP_APICID_CPUID_01 | \
+ STARTUP_APICID_CPUID_0B | \
+ STARTUP_APICID_SEV_ES)
#endif /* _ASM_X86_SMP_H */
@@ -26,6 +26,7 @@
#include <asm/nospec-branch.h>
#include <asm/fixmap.h>
#include <asm/smp.h>
+#include <asm/sev-common.h>
/*
* We are not able to switch in one step to the final KERNEL ADDRESS SPACE
@@ -242,6 +243,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
*
* Bit 31 STARTUP_APICID_CPUID_0B flag (use CPUID 0x0b)
* Bit 30 STARTUP_APICID_CPUID_01 flag (use CPUID 0x01)
+ * Bit 29 STARTUP_APICID_SEV_ES flag (CPUID 0x0b via GHCB MSR)
* Bit 0-24 CPU# if STARTUP_APICID_CPUID_xx flags are not set
*/
movl smpboot_control(%rip), %ecx
@@ -249,6 +251,10 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
jnz .Luse_cpuid_0b
testl $STARTUP_APICID_CPUID_01, %ecx
jnz .Luse_cpuid_01
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ testl $STARTUP_APICID_SEV_ES, %ecx
+ jnz .Luse_sev_cpuid_0b
+#endif
andl $0x0FFFFFFF, %ecx
jmp .Lsetup_cpu
@@ -259,6 +265,30 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
shr $24, %edx
jmp .Lsetup_AP
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+.Luse_sev_cpuid_0b:
+ /* Set the GHCB MSR to request CPUID 0xB_EDX */
+ movl $MSR_AMD64_SEV_ES_GHCB, %ecx
+ movl $(GHCB_CPUID_REQ_EDX << 30) | GHCB_MSR_CPUID_REQ, %eax
+ movl $0x0B, %edx
+ wrmsr
+
+ /* Perform GHCB MSR protocol */
+ rep; vmmcall /* vmgexit */
+
+ /*
+ * Get the result. After the RDMSR:
+ * EAX should be 0xc0000005
+ * EDX should have the CPUID register value and since EDX
+ * is the target register, no need to move the result.
+ */
+ rdmsr
+ andl $GHCB_MSR_INFO_MASK, %eax
+ cmpl $GHCB_MSR_CPUID_RESP, %eax
+ jne 1f
+ jmp .Lsetup_AP
+#endif
+
.Luse_cpuid_0b:
mov $0x0B, %eax
xorl %ecx, %ecx
@@ -1515,15 +1515,29 @@ void __init smp_prepare_cpus_common(void)
* We can do 64-bit AP bringup in parallel if the CPU reports its APIC
* ID in CPUID (either leaf 0x0B if we need the full APIC ID in X2APIC
* mode, or leaf 0x01 if 8 bits are sufficient). Otherwise it's too
- * hard. And not for SEV-ES guests because they can't use CPUID that
- * early.
+ * hard.
*/
static bool prepare_parallel_bringup(void)
{
- if (IS_ENABLED(CONFIG_X86_32) || cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
+ bool has_sev_es = sev_es_active();
+
+ if (IS_ENABLED(CONFIG_X86_32))
return false;
- if (x2apic_mode) {
+ /*
+ * Encrypted guests other than SEV-ES (in the future) will need to
+ * implement an early way of finding the APIC ID, since they will
+ * presumably block direct CPUID too. Be kind to our future selves
+ * by warning here instead of just letting them break. Parallel
+ * startup doesn't have to be in the first round of enabling patches
+ * for any such technology.
+ */
+ if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT) && !has_sev_es) {
+ pr_info("Disabling parallel bringup due to guest memory encryption\n");
+ return false;
+ }
+
+ if (x2apic_mode || has_sev_es) {
if (boot_cpu_data.cpuid_level < 0x0b)
return false;
@@ -1532,8 +1546,13 @@ static bool prepare_parallel_bringup(void)
return false;
}
- pr_debug("Using CPUID 0xb for parallel CPU startup\n");
- smpboot_control = STARTUP_APICID_CPUID_0B;
+ if (has_sev_es) {
+ pr_debug("Using SEV-ES CPUID 0xb for parallel CPU startup\n");
+ smpboot_control = STARTUP_APICID_SEV_ES;
+ } else {
+ pr_debug("Using CPUID 0xb for parallel CPU startup\n");
+ smpboot_control = STARTUP_APICID_CPUID_0B;
+ }
} else {
/* Without X2APIC, what's in CPUID 0x01 should suffice. */
if (boot_cpu_data.cpuid_level < 0x01)