@@ -754,6 +754,7 @@
#define MSR_IA32_TSC_DEADLINE 0x000006E0
+#define MSR_IA32_PKRS 0x000006E1
#define MSR_TSX_FORCE_ABORT 0x0000010F
@@ -12,4 +12,24 @@
*/
#define PKR_AD_KEY(pkey) (PKR_AD_BIT << ((pkey) * PKR_BITS_PER_PKEY))
+/*
+ * Define a default PKRS value for each task.
+ *
+ * Key 0 has no restriction. All other keys are set to the most restrictive
+ * value which is access disabled (AD=1).
+ *
+ * NOTE: This needs to be a macro to be used as part of the INIT_THREAD macro.
+ */
+#define INIT_PKRS_VALUE (PKR_AD_KEY(1) | PKR_AD_KEY(2) | PKR_AD_KEY(3) | \
+ PKR_AD_KEY(4) | PKR_AD_KEY(5) | PKR_AD_KEY(6) | \
+ PKR_AD_KEY(7) | PKR_AD_KEY(8) | PKR_AD_KEY(9) | \
+ PKR_AD_KEY(10) | PKR_AD_KEY(11) | PKR_AD_KEY(12) | \
+ PKR_AD_KEY(13) | PKR_AD_KEY(14) | PKR_AD_KEY(15))
+
+#ifdef CONFIG_ARCH_HAS_SUPERVISOR_PKEYS
+void write_pkrs(u32 new_pkrs);
+#else
+static inline void write_pkrs(u32 new_pkrs) { }
+#endif
+
#endif /*_ASM_X86_PKEYS_INTERNAL_H */
@@ -18,6 +18,7 @@ struct vm86;
#include <asm/cpufeatures.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
+#include <asm/pkeys_common.h>
#include <asm/percpu.h>
#include <asm/msr.h>
#include <asm/desc_defs.h>
@@ -526,6 +527,12 @@ struct thread_struct {
unsigned long cr2;
unsigned long trap_nr;
unsigned long error_code;
+
+#ifdef CONFIG_ARCH_HAS_SUPERVISOR_PKEYS
+ /* Saved Protection key register for supervisor mappings */
+ u32 saved_pkrs;
+#endif
+
#ifdef CONFIG_VM86
/* Virtual 86 mode info */
struct vm86 *vm86;
@@ -843,8 +850,15 @@ static inline void spin_lock_prefetch(const void *x)
#define STACK_TOP TASK_SIZE_LOW
#define STACK_TOP_MAX TASK_SIZE_MAX
+#ifdef CONFIG_ARCH_HAS_SUPERVISOR_PKEYS
+#define INIT_THREAD_PKRS .saved_pkrs = INIT_PKRS_VALUE
+#else
+#define INIT_THREAD_PKRS 0
+#endif
+
#define INIT_THREAD { \
.addr_limit = KERNEL_DS, \
+ INIT_THREAD_PKRS, \
}
extern unsigned long KSTK_ESP(struct task_struct *task);
@@ -58,6 +58,7 @@
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/uv/uv.h>
+#include <linux/pkeys.h>
#include "cpu.h"
@@ -1503,6 +1504,7 @@ static void setup_pks(void)
if (!cpu_feature_enabled(X86_FEATURE_PKS))
return;
+ write_pkrs(INIT_PKRS_VALUE);
cr4_set_bits(X86_CR4_PKS);
}
@@ -43,6 +43,7 @@
#include <asm/io_bitmap.h>
#include <asm/proto.h>
#include <asm/frame.h>
+#include <asm/pkeys_common.h>
#include "process.h"
@@ -187,6 +188,27 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
return ret;
}
+#ifdef CONFIG_ARCH_HAS_SUPERVISOR_PKEYS
+DECLARE_PER_CPU(u32, pkrs_cache);
+static inline void pks_init_task(struct task_struct *tsk)
+{
+ /* New tasks get the most restrictive PKRS value */
+ tsk->thread.saved_pkrs = INIT_PKRS_VALUE;
+}
+static inline void pks_sched_in(void)
+{
+ /*
+ * PKRS is only temporarily changed during specific code paths. Only a
+ * preemption during these windows away from the default value would
+ * require updating the MSR. write_pkrs() handles this optimization.
+ */
+ write_pkrs(current->thread.saved_pkrs);
+}
+#else
+static inline void pks_init_task(struct task_struct *tsk) { }
+static inline void pks_sched_in(void) { }
+#endif
+
void flush_thread(void)
{
struct task_struct *tsk = current;
@@ -195,6 +217,8 @@ void flush_thread(void)
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
fpu__clear_all(&tsk->thread.fpu);
+
+ pks_init_task(tsk);
}
void disable_TSC(void)
@@ -644,6 +668,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
if ((tifp ^ tifn) & _TIF_SLD)
switch_to_sld(tifn);
+
+ pks_sched_in();
}
/*
@@ -231,3 +231,34 @@ u32 update_pkey_val(u32 pk_reg, int pkey, unsigned int flags)
return pk_reg;
}
+
+DEFINE_PER_CPU(u32, pkrs_cache);
+
+/**
+ * write_pkrs() optimizes MSR writes by maintaining a per cpu cache which can
+ * be checked quickly.
+ *
+ * It should also be noted that the underlying WRMSR(MSR_IA32_PKRS) is not
+ * serializing but still maintains ordering properties similar to WRPKRU.
+ * The current SDM section on PKRS needs updating but should be the same as
+ * that of WRPKRU. So to quote from the WRPKRU text:
+ *
+ * WRPKRU will never execute transiently. Memory accesses
+ * affected by PKRU register will not execute (even transiently)
+ * until all prior executions of WRPKRU have completed execution
+ * and updated the PKRU register.
+ */
+void write_pkrs(u32 new_pkrs)
+{
+ u32 *pkrs;
+
+ if (!static_cpu_has(X86_FEATURE_PKS))
+ return;
+
+ pkrs = get_cpu_ptr(&pkrs_cache);
+ if (*pkrs != new_pkrs) {
+ *pkrs = new_pkrs;
+ wrmsrl(MSR_IA32_PKRS, new_pkrs);
+ }
+ put_cpu_ptr(pkrs);
+}