@@ -207,6 +207,7 @@ struct snp_psc_desc {
#define GHCB_TERM_SVSM_VMPL0 8 /* SVSM is present but has set VMPL to 0 */
#define GHCB_TERM_SVSM_CAA 9 /* SVSM is present but CAA is not page aligned */
#define GHCB_TERM_SECURE_TSC 10 /* Secure TSC initialization failed */
+#define GHCB_TERM_SECURE_TSC_KVMCLOCK 11 /* KVM clock selected instead of Secure TSC */
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
@@ -494,6 +494,7 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
void __init snp_secure_tsc_prepare(void);
void __init snp_secure_tsc_init(void);
+void __noreturn sev_es_terminate(unsigned int set, unsigned int reason);
#else /* !CONFIG_AMD_MEM_ENCRYPT */
@@ -538,6 +539,7 @@ static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_
struct snp_guest_request_ioctl *rio) { return -ENODEV; }
static inline void __init snp_secure_tsc_prepare(void) { }
static inline void __init snp_secure_tsc_init(void) { }
+static inline void sev_es_terminate(unsigned int set, unsigned int reason) { }
#endif /* CONFIG_AMD_MEM_ENCRYPT */
@@ -117,8 +117,7 @@ static bool __init sev_es_check_cpu_features(void)
return true;
}
-static void __head __noreturn
-sev_es_terminate(unsigned int set, unsigned int reason)
+void __head __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
{
u64 val = GHCB_MSR_TERM_REQ;
@@ -21,6 +21,7 @@
#include <asm/hypervisor.h>
#include <asm/x86_init.h>
#include <asm/kvmclock.h>
+#include <asm/sev.h>
static int kvmclock __initdata = 1;
static int kvmclock_vsyscall __initdata = 1;
@@ -150,6 +151,14 @@ bool kvm_check_and_clear_guest_paused(void)
static int kvm_cs_enable(struct clocksource *cs)
{
+ /*
+ * For a guest with SecureTSC enabled, the TSC should be the only clock
+ * source. Abort the guest when kvmclock is selected as the clock
+ * source.
+ */
+ if (WARN_ON(cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC)))
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC_KVMCLOCK);
+
vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
return 0;
}
SecureTSC enabled guests should use TSC as the only clock source, terminate the guest with appropriate code when clock source switches to hypervisor controlled kvmclock. Signed-off-by: Nikunj A Dadhania <nikunj@amd.com> --- arch/x86/include/asm/sev-common.h | 1 + arch/x86/include/asm/sev.h | 2 ++ arch/x86/coco/sev/shared.c | 3 +-- arch/x86/kernel/kvmclock.c | 9 +++++++++ 4 files changed, 13 insertions(+), 2 deletions(-)