@@ -32,9 +32,13 @@ struct iwkey {
#ifdef CONFIG_X86_KEYLOCKER
void setup_keylocker(struct cpuinfo_x86 *c);
void destroy_keylocker_data(void);
+void restore_keylocker(void);
+extern bool valid_keylocker(void);
#else
static inline void setup_keylocker(struct cpuinfo_x86 *c) { }
static inline void destroy_keylocker_data(void) { }
+static inline void restore_keylocker(void) { }
+static inline bool valid_keylocker(void) { return false; }
#endif
#endif /*__ASSEMBLY__ */
@@ -11,20 +11,48 @@
#include <asm/fpu/api.h>
#include <asm/keylocker.h>
#include <asm/tlbflush.h>
+#include <asm/msr.h>
static __initdata struct keylocker_setup_data {
+ bool initialized;
struct iwkey key;
} kl_setup;
+/*
+ * This flag is set with wrapping key load. When the key restore
+ * fails, it is reset. This restore state is exported to the crypto
+ * library, then Key Locker will not be used there. So, the feature is
+ * soft-disabled with this flag.
+ */
+static bool valid_kl;
+
+bool valid_keylocker(void)
+{
+ return valid_kl;
+}
+#if IS_MODULE(CONFIG_CRYPTO_AES_KL)
+EXPORT_SYMBOL_GPL(valid_keylocker);
+#endif
+
static void __init generate_keylocker_data(void)
{
get_random_bytes(&kl_setup.key.integrity_key, sizeof(kl_setup.key.integrity_key));
get_random_bytes(&kl_setup.key.encryption_key, sizeof(kl_setup.key.encryption_key));
}
+/*
+ * This is invoked when the boot-up is finished, which means wrapping
+ * key is loaded. Then, the 'valid_kl' flag is set here as the feature
+ * is enabled.
+ */
void __init destroy_keylocker_data(void)
{
+ if (!cpu_feature_enabled(X86_FEATURE_KEYLOCKER))
+ return;
+
memzero_explicit(&kl_setup.key, sizeof(kl_setup.key));
+ kl_setup.initialized = true;
+ valid_kl = true;
}
static void __init load_keylocker(void)
@@ -34,6 +62,27 @@ static void __init load_keylocker(void)
kernel_fpu_end();
}
+/**
+ * copy_keylocker - Copy the wrapping key from the backup.
+ *
+ * Request hardware to copy the key in non-volatile storage to the CPU
+ * state.
+ *
+ * Returns: -EBUSY if the copy fails, 0 if successful.
+ */
+static int copy_keylocker(void)
+{
+ u64 status;
+
+ wrmsrl(MSR_IA32_COPY_IWKEY_TO_LOCAL, 1);
+
+ rdmsrl(MSR_IA32_IWKEY_COPY_STATUS, status);
+ if (status & BIT(0))
+ return 0;
+ else
+ return -EBUSY;
+}
+
/**
* setup_keylocker - Enable the feature.
* @c: A pointer to struct cpuinfo_x86
@@ -52,6 +101,7 @@ void __ref setup_keylocker(struct cpuinfo_x86 *c)
if (c == &boot_cpu_data) {
u32 eax, ebx, ecx, edx;
+ bool backup_available;
cpuid_count(KEYLOCKER_CPUID, 0, &eax, &ebx, &ecx, &edx);
/*
@@ -65,13 +115,53 @@ void __ref setup_keylocker(struct cpuinfo_x86 *c)
goto disable;
}
+ backup_available = !!(ebx & KEYLOCKER_CPUID_EBX_BACKUP);
+ /*
+ * The wrapping key in CPU state is volatile in S3/4
+ * states. So ensure the backup capability along with
+ * S-states.
+ */
+ if (!backup_available && IS_ENABLED(CONFIG_SUSPEND)) {
+ pr_debug("x86/keylocker: No key backup support with possible S3/4.\n");
+ goto disable;
+ }
+
generate_keylocker_data();
+ load_keylocker();
+
+ /* Backup a wrapping key in non-volatile media. */
+ if (backup_available)
+ wrmsrl(MSR_IA32_BACKUP_IWKEY_TO_PLATFORM, 1);
+
+ pr_info("x86/keylocker: Enabled.\n");
+ return;
}
- load_keylocker();
+ /*
+ * At boot time, the key is loaded directly from the memory.
+ * Otherwise, this path performs the key recovery on each CPU
+ * wake-up. Then, the backup in the platform-scoped state is
+ * copied to the CPU state.
+ */
+ if (!kl_setup.initialized) {
+ load_keylocker();
+ return;
+ } else if (valid_kl) {
+ int rc;
- pr_info_once("x86/keylocker: Enabled.\n");
- return;
+ rc = copy_keylocker();
+ if (!rc)
+ return;
+
+ /*
+ * The key copy failed here. The subsequent feature
+ * use will have inconsistent keys and failures. So,
+ * invalidate the feature via the flag like with the
+ * backup failure.
+ */
+ valid_kl = false;
+ pr_err_once("x86/keylocker: Invalid copy status (rc: %d).\n", rc);
+ }
disable:
setup_clear_cpu_cap(X86_FEATURE_KEYLOCKER);
@@ -80,3 +170,43 @@ void __ref setup_keylocker(struct cpuinfo_x86 *c)
/* Make sure the feature disabled for kexec-reboot. */
cr4_clear_bits(X86_CR4_KEYLOCKER);
}
+
+/**
+ * restore_keylocker - Restore the wrapping key.
+ *
+ * The boot CPU executes this while other CPUs restore it through the
+ * setup function.
+ */
+void restore_keylocker(void)
+{
+ u64 backup_status;
+ int rc;
+
+ if (!cpu_feature_enabled(X86_FEATURE_KEYLOCKER) || !valid_kl)
+ return;
+
+ /*
+ * The IA32_IWKEYBACKUP_STATUS MSR contains a bitmap that
+ * indicates an invalid backup if bit 0 is set and a read (or
+ * write) error if bit 2 is set.
+ */
+ rdmsrl(MSR_IA32_IWKEY_BACKUP_STATUS, backup_status);
+ if (backup_status & BIT(0)) {
+ rc = copy_keylocker();
+ if (rc)
+ pr_err("x86/keylocker: Invalid copy state (rc: %d).\n", rc);
+ else
+ return;
+ } else {
+ pr_err("x86/keylocker: The key backup access failed with %s.\n",
+ (backup_status & BIT(2)) ? "read error" : "invalid status");
+ }
+
+ /*
+ * Now the backup key is not available. Invalidate the feature
+ * via the flag to avoid any subsequent use. But keep the
+ * feature with zeroed wrapping key instead of disabling it.
+ */
+ pr_err("x86/keylocker: Failed to restore wrapping key.\n");
+ valid_kl = false;
+}
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/cpu_device_id.h>
#include <asm/microcode.h>
+#include <asm/keylocker.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -264,6 +265,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
x86_platform.restore_sched_clock_state();
cache_bp_restore();
perf_restore_debug_store();
+ restore_keylocker();
c = &cpu_data(smp_processor_id());
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))