@@ -163,6 +163,7 @@ struct snp_psc_desc {
#define GHCB_TERM_NOT_VMPL0 3 /* SNP guest is not running at VMPL-0 */
#define GHCB_TERM_CPUID 4 /* CPUID-validation failure */
#define GHCB_TERM_CPUID_HV 5 /* CPUID failure during hypervisor fallback */
+#define GHCB_TERM_SECURE_TSC 6 /* Secure TSC initialization failed */
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
@@ -173,6 +173,9 @@ enum msg_type {
SNP_MSG_VMRK_REQ,
SNP_MSG_VMRK_RSP,
+ SNP_MSG_TSC_INFO_REQ = 17,
+ SNP_MSG_TSC_INFO_RSP,
+
SNP_MSG_TYPE_MAX
};
@@ -204,6 +207,22 @@ struct snp_guest_msg {
#define SNP_GUEST_MSG_SIZE 4096
#define SNP_GUEST_MSG_PAYLOAD_SIZE (SNP_GUEST_MSG_SIZE - sizeof(struct snp_guest_msg))
+#define SNP_TSC_INFO_REQ_SZ 128
+#define SNP_TSC_INFO_RESP_SZ 128
+
+struct snp_tsc_info_req {
+ u8 rsvd[SNP_TSC_INFO_REQ_SZ];
+} __packed;
+
+struct snp_tsc_info_resp {
+ u32 status;
+ u32 rsvd1;
+ u64 tsc_scale;
+ u64 tsc_offset;
+ u32 tsc_factor;
+ u8 rsvd2[100];
+} __packed;
+
struct snp_guest_dev {
struct device *dev;
struct miscdevice misc;
@@ -225,6 +244,7 @@ struct snp_guest_dev {
struct snp_report_req report;
struct snp_derived_key_req derived_key;
struct snp_ext_report_req ext_report;
+ struct snp_tsc_info_req tsc_info;
} req;
unsigned int vmpck_id;
};
@@ -362,6 +382,7 @@ static inline void *alloc_shared_pages(size_t sz)
return page_address(page);
}
+void __init snp_secure_tsc_prepare(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
@@ -395,6 +416,7 @@ static inline int snp_send_guest_request(struct snp_guest_dev *snp_dev, struct s
struct snp_guest_request_ioctl *rio) { return -EINVAL; }
static inline void free_shared_pages(void *buf, size_t sz) { }
static inline void *alloc_shared_pages(size_t sz) { return NULL; }
+static inline void __init snp_secure_tsc_prepare(void) { }
#endif
#ifdef CONFIG_KVM_AMD_SEV
@@ -410,7 +410,9 @@ struct sev_es_save_area {
u8 reserved_0x298[80];
u32 pkru;
u32 tsc_aux;
- u8 reserved_0x2f0[24];
+ u64 tsc_scale;
+ u64 tsc_offset;
+ u8 reserved_0x300[8];
u64 rcx;
u64 rdx;
u64 rbx;
@@ -542,7 +544,7 @@ static inline void __unused_size_checks(void)
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x1c0);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x248);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x298);
- BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x2f0);
+ BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x300);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x320);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x380);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x3f0);
@@ -96,6 +96,10 @@ static u64 sev_hv_features __ro_after_init;
/* Secrets page physical address from the CC blob */
static u64 secrets_pa __ro_after_init;
+/* Secure TSC values read using TSC_INFO SNP Guest request */
+static u64 snp_tsc_scale __ro_after_init;
+static u64 snp_tsc_offset __ro_after_init;
+
/* #VC handler runtime per-CPU data */
struct sev_es_runtime_data {
struct ghcb ghcb_page;
@@ -1045,6 +1049,12 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
vmsa->vmpl = 0;
vmsa->sev_features = sev_status >> 2;
+ /* Set Secure TSC parameters */
+ if (cc_platform_has(CC_ATTR_GUEST_SECURE_TSC)) {
+ vmsa->tsc_scale = snp_tsc_scale;
+ vmsa->tsc_offset = snp_tsc_offset;
+ }
+
/* Switch the page over to a VMSA page now that it is initialized */
ret = snp_set_vmsa(vmsa, true);
if (ret) {
@@ -2660,3 +2670,83 @@ void snp_guest_messaging_exit(struct snp_guest_dev *snp_dev)
iounmap(snp_dev->secrets);
}
EXPORT_SYMBOL_GPL(snp_guest_messaging_exit);
+
+static struct snp_guest_dev tsc_snp_dev __initdata;
+
+static int __init snp_get_tsc_info(void)
+{
+ struct snp_tsc_info_req *tsc_req = &tsc_snp_dev.req.tsc_info;
+ static u8 buf[SNP_TSC_INFO_RESP_SZ + AUTHTAG_LEN];
+ struct snp_guest_request_ioctl rio;
+ struct snp_tsc_info_resp tsc_resp;
+ struct snp_guest_req req;
+ int rc;
+
+ /*
+ * The intermediate response buffer is used while decrypting the
+ * response payload. Make sure that it has enough space to cover the
+ * authtag.
+ */
+ BUILD_BUG_ON(sizeof(buf) < (sizeof(tsc_resp) + AUTHTAG_LEN));
+
+ if (!snp_assign_vmpck(&tsc_snp_dev, 0))
+ return -EINVAL;
+
+ rc = snp_guest_messaging_init(&tsc_snp_dev);
+ if (rc)
+ return rc;
+
+ memset(tsc_req, 0, sizeof(*tsc_req));
+ memset(&req, 0, sizeof(req));
+ memset(&rio, 0, sizeof(rio));
+ memset(buf, 0, sizeof(buf));
+
+ req.msg_version = MSG_HDR_VER;
+ req.msg_type = SNP_MSG_TSC_INFO_REQ;
+ req.vmpck_id = tsc_snp_dev.vmpck_id;
+ req.req_buf = tsc_req;
+ req.req_sz = sizeof(*tsc_req);
+ req.resp_buf = buf;
+ req.resp_sz = sizeof(tsc_resp) + AUTHTAG_LEN;
+ req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
+
+ rc = snp_send_guest_request(&tsc_snp_dev, &req, &rio);
+ if (rc)
+ goto err_req;
+
+ memcpy(&tsc_resp, buf, sizeof(tsc_resp));
+ pr_debug("%s: response status %x scale %llx offset %llx factor %x\n",
+ __func__, tsc_resp.status, tsc_resp.tsc_scale, tsc_resp.tsc_offset,
+ tsc_resp.tsc_factor);
+
+ if (tsc_resp.status == 0) {
+ snp_tsc_scale = tsc_resp.tsc_scale;
+ snp_tsc_offset = tsc_resp.tsc_offset;
+ } else {
+ pr_err("Failed to get TSC info, response status %x\n", tsc_resp.status);
+ rc = -EIO;
+ }
+
+err_req:
+ /* The response buffer contains the sensitive data, explicitly clear it. */
+ memzero_explicit(buf, sizeof(buf));
+ memzero_explicit(&tsc_resp, sizeof(tsc_resp));
+ memzero_explicit(&req, sizeof(req));
+
+ snp_guest_messaging_exit(&tsc_snp_dev);
+
+ return rc;
+}
+
+void __init snp_secure_tsc_prepare(void)
+{
+ if (!cc_platform_has(CC_ATTR_GUEST_SECURE_TSC))
+ return;
+
+ if (snp_get_tsc_info()) {
+ pr_alert("Unable to retrieve Secure TSC info from ASP\n");
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC);
+ }
+
+ pr_debug("SecureTSC enabled");
+}
@@ -94,6 +94,10 @@ void __init mem_encrypt_init(void)
/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
swiotlb_update_mem_attributes();
+ /* Initialize SNP Secure TSC */
+ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ snp_secure_tsc_prepare();
+
print_mem_encrypt_feature_info();
}