@@ -18,6 +18,8 @@ static inline bool mktme_enabled(void)
return static_branch_unlikely(&mktme_enabled_key);
}
+void mktme_disable(void);
+
extern struct page_ext_operations page_mktme_ops;
#define page_keyid page_keyid
@@ -68,6 +70,9 @@ static inline bool mktme_enabled(void)
{
return false;
}
+
+static inline void mktme_disable(void) {}
+
#endif
#endif
@@ -618,10 +618,7 @@ static void detect_tme(struct cpuinfo_x86 *c)
* We must not allow onlining secondary CPUs with non-matching
* configuration.
*/
- physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
- mktme_keyid_mask = 0;
- mktme_keyid_shift = 0;
- mktme_nr_keyids = 0;
+ mktme_disable();
}
#endif
@@ -15,6 +15,16 @@ int mktme_keyid_shift;
DEFINE_STATIC_KEY_FALSE(mktme_enabled_key);
EXPORT_SYMBOL_GPL(mktme_enabled_key);
+void mktme_disable(void)
+{
+ physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
+ mktme_keyid_mask = 0;
+ mktme_keyid_shift = 0;
+ mktme_nr_keyids = 0;
+ if (mktme_enabled())
+ static_branch_disable(&mktme_enabled_key);
+}
+
static bool need_page_mktme(void)
{
/* Make sure keyid doesn't collide with extended page flags */
The new helper mktme_disable() allows to disable MKTME even if it's enumerated successfully. MKTME initialization may fail and this functionality allows system to boot regardless of the failure. MKTME needs per-KeyID direct mapping. It requires a lot more virtual address space which may be a problem in 4-level paging mode. If the system has more physical memory than we can handle with MKTME the feature allows to fail MKTME, but boot the system successfully. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- arch/x86/include/asm/mktme.h | 5 +++++ arch/x86/kernel/cpu/intel.c | 5 +---- arch/x86/mm/mktme.c | 10 ++++++++++ 3 files changed, 16 insertions(+), 4 deletions(-)