@@ -68,3 +68,15 @@ Misaligned accesses
Misaligned scalar accesses are supported in userspace, but they may perform
poorly. Misaligned vector accesses are only supported if the Zicclsm extension
is supported.
+
+Pointer masking
+---------------
+
+Support for pointer masking in userspace (the Supm extension) is provided via
+the ``PR_SET_TAGGED_ADDR_CTRL`` and ``PR_GET_TAGGED_ADDR_CTRL`` ``prctl()``
+operations. Pointer masking is disabled by default. To enable it, userspace
+must call ``PR_SET_TAGGED_ADDR_CTRL`` with the ``PR_PMLEN`` field set to the
+number of mask/tag bits needed by the application. ``PR_PMLEN`` is interpreted
+as a lower bound; if the kernel is unable to satisfy the request, the
+``PR_SET_TAGGED_ADDR_CTRL`` operation will fail. The actual number of tag bits
+is returned in ``PR_PMLEN`` by the ``PR_GET_TAGGED_ADDR_CTRL`` operation.
@@ -531,6 +531,17 @@ config RISCV_ISA_C
If you don't know what to do here, say Y.
+config RISCV_ISA_SUPM
+ bool "Supm extension for userspace pointer masking"
+ depends on 64BIT
+ default y
+ help
+ Add support for pointer masking in userspace (Supm) when the
+ underlying hardware extension (Smnpm or Ssnpm) is detected at boot.
+
+ If this option is disabled, userspace will be unable to use
+ the prctl(PR_{SET,GET}_TAGGED_ADDR_CTRL) API.
+
config RISCV_ISA_SVNAPOT
bool "Svnapot extension support for supervisor mode NAPOT pages"
depends on 64BIT && MMU
@@ -178,6 +178,14 @@ extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
#define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2)
extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread);
+#ifdef CONFIG_RISCV_ISA_SUPM
+/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
+long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
+long get_tagged_addr_ctrl(struct task_struct *task);
+#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg)
+#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PROCESSOR_H */
@@ -70,6 +70,17 @@ static __always_inline bool has_fpu(void) { return false; }
#define __switch_to_fpu(__prev, __next) do { } while (0)
#endif
+static inline void envcfg_update_bits(struct task_struct *task,
+ unsigned long mask, unsigned long val)
+{
+ unsigned long envcfg;
+
+ envcfg = (task->thread.envcfg & ~mask) | val;
+ task->thread.envcfg = envcfg;
+ if (task == current)
+ csr_write(CSR_ENVCFG, envcfg);
+}
+
static inline void __switch_to_envcfg(struct task_struct *next)
{
asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0",
@@ -7,6 +7,7 @@
* Copyright (C) 2017 SiFive
*/
+#include <linux/bitfield.h>
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -180,6 +181,10 @@ void flush_thread(void)
memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
#endif
+#ifdef CONFIG_RISCV_ISA_SUPM
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
+ envcfg_update_bits(current, ENVCFG_PMM, ENVCFG_PMM_PMLEN_0);
+#endif
}
void arch_release_task_struct(struct task_struct *tsk)
@@ -242,3 +247,89 @@ void __init arch_task_cache_init(void)
{
riscv_v_setup_ctx_cache();
}
+
+#ifdef CONFIG_RISCV_ISA_SUPM
+enum {
+ PMLEN_0 = 0,
+ PMLEN_7 = 7,
+ PMLEN_16 = 16,
+};
+
+static bool have_user_pmlen_7;
+static bool have_user_pmlen_16;
+
+long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
+{
+ unsigned long valid_mask = PR_PMLEN_MASK;
+ struct thread_info *ti = task_thread_info(task);
+ unsigned long pmm;
+ u8 pmlen;
+
+ if (is_compat_thread(ti))
+ return -EINVAL;
+
+ if (arg & ~valid_mask)
+ return -EINVAL;
+
+ /*
+ * Prefer the smallest PMLEN that satisfies the user's request,
+ * in case choosing a larger PMLEN has a performance impact.
+ */
+ pmlen = FIELD_GET(PR_PMLEN_MASK, arg);
+ if (pmlen == PMLEN_0)
+ pmm = ENVCFG_PMM_PMLEN_0;
+ else if (pmlen <= PMLEN_7 && have_user_pmlen_7)
+ pmm = ENVCFG_PMM_PMLEN_7;
+ else if (pmlen <= PMLEN_16 && have_user_pmlen_16)
+ pmm = ENVCFG_PMM_PMLEN_16;
+ else
+ return -EINVAL;
+
+ envcfg_update_bits(task, ENVCFG_PMM, pmm);
+
+ return 0;
+}
+
+long get_tagged_addr_ctrl(struct task_struct *task)
+{
+ struct thread_info *ti = task_thread_info(task);
+ long ret = 0;
+
+ if (is_compat_thread(ti))
+ return -EINVAL;
+
+ switch (task->thread.envcfg & ENVCFG_PMM) {
+ case ENVCFG_PMM_PMLEN_7:
+ ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7);
+ break;
+ case ENVCFG_PMM_PMLEN_16:
+ ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_16);
+ break;
+ }
+
+ return ret;
+}
+
+static bool try_to_set_pmm(unsigned long value)
+{
+ csr_set(CSR_ENVCFG, value);
+ return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
+}
+
+static int __init tagged_addr_init(void)
+{
+ if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
+ return 0;
+
+ /*
+ * envcfg.PMM is a WARL field. Detect which values are supported.
+ * Assume the supported PMLEN values are the same on all harts.
+ */
+ csr_clear(CSR_ENVCFG, ENVCFG_PMM);
+ have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
+ have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
+
+ return 0;
+}
+core_initcall(tagged_addr_init);
+#endif /* CONFIG_RISCV_ISA_SUPM */
@@ -230,7 +230,7 @@ struct prctl_mm_map {
# define PR_PAC_APDBKEY (1UL << 3)
# define PR_PAC_APGAKEY (1UL << 4)
-/* Tagged user address controls for arm64 */
+/* Tagged user address controls for arm64 and RISC-V */
#define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
@@ -244,6 +244,9 @@ struct prctl_mm_map {
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
/* Unused; kept only for source compatibility */
# define PR_MTE_TCF_SHIFT 1
+/* RISC-V pointer masking tag length */
+# define PR_PMLEN_SHIFT 24
+# define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT)
/* Control reclaim behavior when allocating memory */
#define PR_SET_IO_FLUSHER 57