@@ -16,6 +16,9 @@
*/
#define NET_IP_ALIGN 0
+#define MTE_CTRL_GCR_USER_EXCL_SHIFT 0
+#define MTE_CTRL_GCR_USER_EXCL_MASK 0xffff
+
#ifndef __ASSEMBLY__
#include <linux/build_bug.h>
@@ -151,7 +154,7 @@ struct thread_struct {
struct ptrauth_keys_kernel keys_kernel;
#endif
#ifdef CONFIG_ARM64_MTE
- u64 gcr_user_excl;
+ u64 mte_ctrl;
#endif
u64 sctlr_user;
};
@@ -49,7 +49,7 @@ int main(void)
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
#ifdef CONFIG_ARM64_MTE
- DEFINE(THREAD_GCR_EL1_USER, offsetof(struct task_struct, thread.gcr_user_excl));
+ DEFINE(THREAD_MTE_CTRL, offsetof(struct task_struct, thread.mte_ctrl));
#endif
BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
@@ -182,7 +182,7 @@ alternative_else_nop_endif
* the RRND (bit[16]) setting.
*/
mrs_s \tmp2, SYS_GCR_EL1
- bfi \tmp2, \tmp, #0, #16
+ bfxil \tmp2, \tmp, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
msr_s SYS_GCR_EL1, \tmp2
#endif
.endm
@@ -205,7 +205,7 @@ alternative_else_nop_endif
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
- ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER]
+ ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
mte_set_gcr \tmp, \tmp2
1:
@@ -199,7 +199,7 @@ static void update_gcr_el1_excl(u64 excl)
static void set_gcr_el1_excl(u64 excl)
{
- current->thread.gcr_user_excl = excl;
+ current->thread.mte_ctrl = excl;
/*
* SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
@@ -263,8 +263,8 @@ void mte_suspend_exit(void)
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
- u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
- SYS_GCR_EL1_EXCL_MASK;
+ u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
+ SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT;
if (!system_supports_mte())
return 0;
@@ -285,10 +285,10 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
if (task != current) {
task->thread.sctlr_user = sctlr;
- task->thread.gcr_user_excl = gcr_excl;
+ task->thread.mte_ctrl = mte_ctrl;
} else {
set_task_sctlr_el1(sctlr);
- set_gcr_el1_excl(gcr_excl);
+ set_gcr_el1_excl(mte_ctrl);
}
return 0;
@@ -297,7 +297,9 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
long get_mte_ctrl(struct task_struct *task)
{
unsigned long ret;
- u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
+ u64 mte_ctrl = task->thread.mte_ctrl;
+ u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
+ SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
We are going to use this field to store more data. To prepare for that, rename it and change the users to rely on the bit position of gcr_user_excl in mte_ctrl. Link: https://linux-review.googlesource.com/id/Ie1fd18e480100655f5d22137f5b22f4f3a9f9e2e Signed-off-by: Peter Collingbourne <pcc@google.com> --- arch/arm64/include/asm/processor.h | 5 ++++- arch/arm64/kernel/asm-offsets.c | 2 +- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/kernel/mte.c | 14 ++++++++------ 4 files changed, 15 insertions(+), 10 deletions(-)