@@ -6,11 +6,21 @@
struct vm_area_struct;
+/* Values for mktme_status */
+#define MKTME_DISABLED 0
+#define MKTME_ENUMERATED 1
+#define MKTME_ENABLED 2
+#define MKTME_UNINITIALIZED 3
+
+extern int mktme_status;
+
#ifdef CONFIG_X86_INTEL_MKTME
extern phys_addr_t mktme_keyid_mask;
extern int mktme_nr_keyids;
extern int mktme_keyid_shift;
+void mktme_disable(void);
+
#define prep_encrypted_page prep_encrypted_page
void prep_encrypted_page(struct page *page, int order, int keyid, bool zero);
@@ -28,6 +38,8 @@ extern struct page_ext_operations page_mktme_ops;
#define page_keyid page_keyid
int page_keyid(const struct page *page);
+void mktme_disable(void);
+
#else
#define mktme_keyid_mask ((phys_addr_t)0)
#define mktme_nr_keyids 0
@@ -508,11 +508,7 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
-/* Values for mktme_status (SW only construct) */
-#define MKTME_ENABLED 0
-#define MKTME_DISABLED 1
-#define MKTME_UNINITIALIZED 2
-static int mktme_status = MKTME_UNINITIALIZED;
+int mktme_status __ro_after_init = MKTME_UNINITIALIZED;
static void detect_tme(struct cpuinfo_x86 *c)
{
@@ -568,11 +564,11 @@ static void detect_tme(struct cpuinfo_x86 *c)
if (mktme_status == MKTME_UNINITIALIZED) {
/* MKTME is usable */
- mktme_status = MKTME_ENABLED;
+ mktme_status = MKTME_ENUMERATED;
}
#ifdef CONFIG_X86_INTEL_MKTME
- if (mktme_status == MKTME_ENABLED && nr_keyids) {
+ if (mktme_status == MKTME_ENUMERATED && nr_keyids) {
mktme_nr_keyids = nr_keyids;
mktme_keyid_shift = c->x86_phys_bits - keyid_bits;
@@ -591,10 +587,7 @@ static void detect_tme(struct cpuinfo_x86 *c)
* Maybe needed if there's inconsistent configuation
* between CPUs.
*/
- physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
- mktme_keyid_mask = 0;
- mktme_keyid_shift = 0;
- mktme_nr_keyids = 0;
+ mktme_disable();
}
#endif
@@ -6,6 +6,15 @@ phys_addr_t mktme_keyid_mask;
int mktme_nr_keyids;
int mktme_keyid_shift;
+void mktme_disable(void)
+{
+ physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
+ mktme_keyid_mask = 0;
+ mktme_keyid_shift = 0;
+ mktme_nr_keyids = 0;
+ mktme_status = MKTME_DISABLED;
+}
+
int page_keyid(const struct page *page)
{
if (mktme_status != MKTME_ENABLED)
Separate MKTME enumaration from enabling. We need to postpone enabling until initialization is complete. The new helper mktme_disable() allows to disable MKTME even if it's enumerated successfully. MKTME initialization may fail and this functionallity allows system to boot regardless of the failure. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- arch/x86/include/asm/mktme.h | 12 ++++++++++++ arch/x86/kernel/cpu/intel.c | 15 ++++----------- arch/x86/mm/mktme.c | 9 +++++++++ 3 files changed, 25 insertions(+), 11 deletions(-)