@@ -124,6 +124,7 @@ struct thread_struct {
struct __riscv_v_ext_state vstate;
unsigned long align_ctl;
struct __riscv_v_ext_state kernel_vstate;
+ u8 pmlen;
};
/* Whitelist the fstate from the task_struct for hardened usercopy */
@@ -9,8 +9,38 @@
#define _ASM_RISCV_UACCESS_H
#include <asm/asm-extable.h>
+#include <asm/cpufeature.h>
#include <asm/pgtable.h> /* for TASK_SIZE */
+#ifdef CONFIG_RISCV_ISA_POINTER_MASKING
+static inline unsigned long __untagged_addr(unsigned long addr)
+{
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SxNPM)) {
+ u8 shift = current->thread.pmlen;
+
+ /*
+ * Virtual addresses are sign-extended, while
+ * physical addresses are zero-extended.
+ */
+ if (IS_ENABLED(CONFIG_MMU))
+ return (long)(addr << shift) >> shift;
+ else
+ return (addr << shift) >> shift;
+ }
+
+ return addr;
+}
+
+#define untagged_addr(addr) ({ \
+ unsigned long __addr = (__force unsigned long)(addr); \
+ (__force __typeof__(addr))__untagged_addr(__addr); \
+})
+
+#define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size))
+#else
+#define untagged_addr(addr) addr
+#endif
+
/*
* User space memory access functions
*/
@@ -130,7 +160,7 @@ do { \
*/
#define __get_user(x, ptr) \
({ \
- const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
long __gu_err = 0; \
\
__chk_user_ptr(__gu_ptr); \
@@ -246,7 +276,7 @@ do { \
*/
#define __put_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
__typeof__(*__gu_ptr) __val = (x); \
long __pu_err = 0; \
\
@@ -293,13 +323,13 @@ unsigned long __must_check __asm_copy_from_user(void *to,
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- return __asm_copy_from_user(to, from, n);
+ return __asm_copy_from_user(to, untagged_addr(from), n);
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return __asm_copy_to_user(to, from, n);
+ return __asm_copy_to_user(untagged_addr(to), from, n);
}
extern long strncpy_from_user(char *dest, const char __user *src, long count);
@@ -314,7 +344,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
might_fault();
return access_ok(to, n) ?
- __clear_user(to, n) : n;
+ __clear_user(untagged_addr(to), n) : n;
}
#define __get_kernel_nofault(dst, src, type, err_label) \
@@ -162,6 +162,7 @@ static void flush_tagged_addr_state(void)
return;
current->thread.envcfg &= ~ENVCFG_PMM;
+ current->thread.pmlen = 0;
sync_envcfg(current);
#endif
@@ -255,9 +256,14 @@ void __init arch_task_cache_init(void)
static bool have_user_pmlen_7;
static bool have_user_pmlen_16;
+/*
+ * Control the relaxed ABI allowing tagged user addresses into the kernel.
+ */
+static unsigned int tagged_addr_disabled;
+
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
{
- unsigned long valid_mask = PR_PMLEN_MASK;
+ unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE;
struct thread_info *ti = task_thread_info(task);
u8 pmlen;
@@ -288,12 +294,25 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
return -EINVAL;
}
+ /*
+ * Do not allow the enabling of the tagged address ABI if globally
+ * disabled via sysctl abi.tagged_addr_disabled, if pointer masking
+ * is disabled for userspace.
+ */
+ if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen))
+ return -EINVAL;
+
task->thread.envcfg &= ~ENVCFG_PMM;
if (pmlen == 7)
task->thread.envcfg |= ENVCFG_PMM_PMLEN_7;
else if (pmlen == 16)
task->thread.envcfg |= ENVCFG_PMM_PMLEN_16;
+ if (arg & PR_TAGGED_ADDR_ENABLE)
+ task->thread.pmlen = pmlen;
+ else
+ task->thread.pmlen = 0;
+
if (task == current)
sync_envcfg(current);
@@ -308,6 +327,13 @@ long get_tagged_addr_ctrl(struct task_struct *task)
if (is_compat_thread(ti))
return -EINVAL;
+ if (task->thread.pmlen)
+ ret = PR_TAGGED_ADDR_ENABLE;
+
+ /*
+ * The task's pmlen is only set if the tagged address ABI is enabled,
+ * so the effective PMLEN must be extracted from envcfg.PMM.
+ */
switch (task->thread.envcfg & ENVCFG_PMM) {
case ENVCFG_PMM_PMLEN_7:
ret |= FIELD_PREP(PR_PMLEN_MASK, 7);
@@ -326,6 +352,24 @@ static bool try_to_set_pmm(unsigned long value)
return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
}
+/*
+ * Global sysctl to disable the tagged user addresses support. This control
+ * only prevents the tagged address ABI enabling via prctl() and does not
+ * disable it for tasks that already opted in to the relaxed ABI.
+ */
+
+static struct ctl_table tagged_addr_sysctl_table[] = {
+ {
+ .procname = "tagged_addr_disabled",
+ .mode = 0644,
+ .data = &tagged_addr_disabled,
+ .maxlen = sizeof(int),
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+};
+
static int __init tagged_addr_init(void)
{
if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SxNPM))
@@ -339,6 +383,9 @@ static int __init tagged_addr_init(void)
have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
+ if (!register_sysctl("abi", tagged_addr_sysctl_table))
+ return -EINVAL;
+
return 0;
}
core_initcall(tagged_addr_init);
When pointer masking is enabled for userspace, the kernel can accept tagged pointers as arguments to some system calls. Allow this by untagging the pointers in access_ok() and the uaccess routines. The software untagging in the uaccess routines is required because U-mode and S-mode have entirely separate pointer masking configurations. Signed-off-by: Samuel Holland <samuel.holland@sifive.com> --- arch/riscv/include/asm/processor.h | 1 + arch/riscv/include/asm/uaccess.h | 40 +++++++++++++++++++++--- arch/riscv/kernel/process.c | 49 +++++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 6 deletions(-)