@@ -1370,6 +1370,65 @@ static int config_global_keyid(u64 global_keyid)
return seamcall_on_each_package_serialized(&sc);
}
+/* Initialize one TDMR */
+static int init_tdmr(struct tdmr_info *tdmr)
+{
+ u64 next;
+
+ /*
+ * Initializing PAMT entries might be time-consuming (in
+ * proportion to the size of the requested TDMR). To avoid long
+ * latency in one SEAMCALL, TDH.SYS.TDMR.INIT only initializes
+ * an (implementation-defined) subset of PAMT entries in one
+ * invocation.
+ *
+ * Call TDH.SYS.TDMR.INIT iteratively until all PAMT entries
+ * of the requested TDMR are initialized (if next-to-initialize
+ * address matches the end address of the TDMR).
+ */
+ do {
+ struct tdx_module_output out;
+ int ret;
+
+ ret = seamcall(TDH_SYS_TDMR_INIT, tdmr->base, 0, 0, 0,
+ NULL, &out);
+ if (ret)
+ return ret;
+ /*
+ * RDX contains 'next-to-initialize' address if
+ * TDH.SYS.TDMR.INT succeeded.
+ */
+ next = out.rdx;
+ if (need_resched())
+ cond_resched();
+ } while (next < tdmr->base + tdmr->size);
+
+ return 0;
+}
+
+/* Initialize all TDMRs */
+static int init_tdmrs(struct tdmr_info **tdmr_array, int tdmr_num)
+{
+ int i;
+
+ /*
+ * Initialize TDMRs one-by-one for simplicity, though the TDX
+ * architecture does allow different TDMRs to be initialized in
+ * parallel on multiple CPUs. Parallel initialization could
+ * be added later when the time spent in the serialized scheme
+ * becomes a real concern.
+ */
+ for (i = 0; i < tdmr_num; i++) {
+ int ret;
+
+ ret = init_tdmr(tdmr_array[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int init_tdx_module(void)
{
struct tdmr_info **tdmr_array;
@@ -1451,11 +1510,12 @@ static int init_tdx_module(void)
if (ret)
goto out_free_pamts;
- /*
- * Return -EFAULT until all steps of TDX module
- * initialization are done.
- */
- ret = -EFAULT;
+ /* Initialize TDMRs to complete the TDX module initialization */
+ ret = init_tdmrs(tdmr_array, tdmr_num);
+ if (ret)
+ goto out_free_pamts;
+
+ tdx_module_status = TDX_MODULE_INITIALIZED;
out_free_pamts:
/*
* Free PAMTs allocated in construct_tdmrs() when TDX module
@@ -1478,6 +1538,11 @@ static int init_tdx_module(void)
free_tdmrs(tdmr_array, tdmr_num);
kfree(tdmr_array);
out:
+ if (ret)
+ pr_info("Failed to initialize TDX module.\n");
+ else
+ pr_info("TDX module initialized.\n");
+
return ret;
}
@@ -126,6 +126,7 @@ struct tdmr_info {
#define TDH_SYS_INFO 32
#define TDH_SYS_INIT 33
#define TDH_SYS_LP_INIT 35
+#define TDH_SYS_TDMR_INIT 36
#define TDH_SYS_LP_SHUTDOWN 44
#define TDH_SYS_CONFIG 45
Initialize TDMRs via TDH.SYS.TDMR.INIT as the last step to complete the TDX initialization. All TDMRs need to be initialized using TDH.SYS.TDMR.INIT SEAMCALL before the TDX memory can be used to run any TD guest. The SEAMCALL internally uses the global KeyID to initialize PAMTs in order to crypto protect them from malicious host kernel. TDH.SYS.TDMR.INIT can be done any cpu. The time of initializing TDMR is proportional to the size of the TDMR. To avoid long latency caused in one SEAMCALL, TDH.SYS.TDMR.INIT only initializes an (implementation-specific) subset of PAMT entries of one TDMR in one invocation. The caller is responsible for calling TDH.SYS.TDMR.INIT iteratively until all PAMT entries of the requested TDMR are initialized. Current implementation initializes TDMRs one by one. It takes ~100ms on a 2-socket machine with 2.2GHz CPUs and 64GB memory when the system is idle. Each TDH.SYS.TDMR.INIT takes ~7us on average. TDX does allow different TDMRs to be initialized concurrently on multiple CPUs. This parallel scheme could be introduced later when the total initialization time becomes a real concern, e.g. on a platform with a much bigger memory size. Signed-off-by: Kai Huang <kai.huang@intel.com> --- arch/x86/virt/vmx/tdx.c | 75 ++++++++++++++++++++++++++++++++++++++--- arch/x86/virt/vmx/tdx.h | 1 + 2 files changed, 71 insertions(+), 5 deletions(-)