@@ -344,6 +344,19 @@ config RISCV_ISA_C
If you don't know what to do here, say Y.
+config RISCV_ISA_SVPBMT
+ bool "SVPBMT extension support"
+ depends on 64BIT && MMU
+ select RISCV_ALTERNATIVE
+ default y
+ help
+ Adds support to dynamically detect the presence of the SVPBMT extension
+ (Supervisor-mode: page-based memory types) and enable its usage.
+
+ The SVPBMT extension is only available on 64Bit cpus.
+
+ If you don't know what to do here, say Y.
+
config FPU
bool "FPU support"
default y
@@ -42,6 +42,9 @@ void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage);
+void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned int stage);
+
#else /* CONFIG_RISCV_ALTERNATIVE */
static inline void apply_boot_alternatives(void) { }
@@ -14,6 +14,9 @@
#define ERRATA_SIFIVE_NUMBER 2
#endif
+#define CPUFEATURE_SVPBMT 0
+#define CPUFEATURE_NUMBER 1
+
#ifdef __ASSEMBLY__
#define ALT_INSN_FAULT(x) \
@@ -34,6 +37,18 @@ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
: : "r" (addr) : "memory")
+/*
+ * _val is marked as "will be overwritten", so need to set it to 0
+ * in the default case.
+ */
+#define ALT_SVPBMT_SHIFT 61
+#define ALT_SVPBMT(_val, prot) \
+asm(ALTERNATIVE("li %0, 0\t\nnop", "li %0, %1\t\nslli %0,%0,%2", 0, \
+ CPUFEATURE_SVPBMT, CONFIG_RISCV_ISA_SVPBMT) \
+ : "=r"(_val) \
+ : "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \
+ "I"(ALT_SVPBMT_SHIFT))
+
#endif /* __ASSEMBLY__ */
#endif
@@ -52,6 +52,7 @@ extern unsigned long elf_hwcap;
*/
enum riscv_isa_ext_id {
RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE,
+ RISCV_ISA_EXT_SVPBMT,
RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
};
@@ -24,4 +24,13 @@
*/
#define _PAGE_PFN_MASK GENMASK(31, 10)
+#define _PAGE_NOCACHE 0
+#define _PAGE_IO 0
+#define _PAGE_MTMASK 0
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL))
+
#endif /* _ASM_RISCV_PGTABLE_32_H */
@@ -8,6 +8,7 @@
#include <linux/bits.h>
#include <linux/const.h>
+#include <asm/errata_list.h>
extern bool pgtable_l4_enabled;
extern bool pgtable_l5_enabled;
@@ -73,6 +74,52 @@ typedef struct {
*/
#define _PAGE_PFN_MASK GENMASK(53, 10)
+/*
+ * [62:61] Svpbmt Memory Type definitions:
+ *
+ * 00 - PMA Normal Cacheable, No change to implied PMA memory type
+ * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory
+ * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory
+ * 11 - Rsvd Reserved for future standard use
+ */
+#define _PAGE_NOCACHE_SVPBMT (1UL << 61)
+#define _PAGE_IO_SVPBMT (1UL << 62)
+#define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
+
+static inline u64 riscv_page_mtmask(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_MTMASK);
+ return val;
+}
+
+static inline u64 riscv_page_nocache(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_NOCACHE);
+ return val;
+}
+
+static inline u64 riscv_page_io(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_IO);
+ return val;
+}
+
+#define _PAGE_NOCACHE riscv_page_nocache()
+#define _PAGE_IO riscv_page_io()
+#define _PAGE_MTMASK riscv_page_mtmask()
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL | \
+ _PAGE_MTMASK))
+
static inline int pud_present(pud_t pud)
{
return (pud_val(pud) & _PAGE_PRESENT);
@@ -29,10 +29,6 @@
#define _PAGE_PFN_SHIFT 10
-/* Set of bits to preserve across pte_modify() */
-#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
- _PAGE_WRITE | _PAGE_EXEC | \
- _PAGE_USER | _PAGE_GLOBAL))
/*
* when all of R/W/X are zero, the PTE is a pointer to the next level
* of the page table; otherwise, it is a leaf PTE.
@@ -179,11 +179,8 @@ extern struct pt_alloc_ops pt_ops __initdata;
#define PAGE_TABLE __pgprot(_PAGE_TABLE)
-/*
- * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
- * change the properties of memory regions.
- */
-#define _PAGE_IOREMAP _PAGE_KERNEL
+#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
+#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
extern pgd_t swapper_pg_dir[];
@@ -523,6 +520,28 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
return ptep_test_and_clear_young(vma, address, ptep);
}
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_IO;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_NOCACHE;
+
+ return __pgprot(prot);
+}
+
/*
* THP functions
*/
@@ -63,6 +63,8 @@ static void __init_or_module _apply_alternatives(struct alt_entry *begin,
struct alt_entry *end,
unsigned int stage)
{
+ riscv_cpufeature_patch_func(begin, end, stage);
+
if (!vendor_patch_func)
return;
@@ -88,6 +88,7 @@ int riscv_of_parent_hartid(struct device_node *node)
*/
static struct riscv_isa_ext_data isa_ext_arr[] = {
__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
+ __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
__RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
};
@@ -8,9 +8,15 @@
#include <linux/bitmap.h>
#include <linux/ctype.h>
+#include <linux/libfdt.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <asm/processor.h>
+#include <asm/alternative.h>
+#include <asm/errata_list.h>
#include <asm/hwcap.h>
+#include <asm/patch.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/switch_to.h>
@@ -192,6 +198,7 @@ void __init riscv_fill_hwcap(void)
set_bit(*ext - 'a', this_isa);
} else {
SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
+ SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
}
#undef SET_ISA_EXT_MAP
}
@@ -237,3 +244,69 @@ void __init riscv_fill_hwcap(void)
static_branch_enable(&cpu_hwcap_fpu);
#endif
}
+
+#ifdef CONFIG_RISCV_ALTERNATIVE
+struct cpufeature_info {
+ char name[ERRATA_STRING_LENGTH_MAX];
+ bool (*check_func)(unsigned int stage);
+};
+
+static bool __init_or_module cpufeature_svpbmt_check_func(unsigned int stage)
+{
+#ifdef CONFIG_RISCV_ISA_SVPBMT
+ return riscv_isa_extension_available(NULL, SVPBMT);
+#endif
+
+ return false;
+}
+
+static const struct cpufeature_info __initdata_or_module
+cpufeature_list[CPUFEATURE_NUMBER] = {
+ {
+ .name = "svpbmt",
+ .check_func = cpufeature_svpbmt_check_func
+ },
+};
+
+static u32 __init_or_module cpufeature_probe(unsigned int stage)
+{
+ const struct cpufeature_info *info;
+ u32 cpu_req_feature = 0;
+ int idx;
+
+ for (idx = 0; idx < CPUFEATURE_NUMBER; idx++) {
+ info = &cpufeature_list[idx];
+
+ if (info->check_func(stage))
+ cpu_req_feature |= (1U << idx);
+ }
+
+ return cpu_req_feature;
+}
+
+void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
+ struct alt_entry *end,
+ unsigned int stage)
+{
+ u32 cpu_req_feature = cpufeature_probe(stage);
+ u32 cpu_apply_feature = 0;
+ struct alt_entry *alt;
+ u32 tmp;
+
+ for (alt = begin; alt < end; alt++) {
+ if (alt->vendor_id != 0)
+ continue;
+ if (alt->errata_id >= CPUFEATURE_NUMBER) {
+ WARN(1, "This feature id:%d is not in kernel cpufeature list",
+ alt->errata_id);
+ continue;
+ }
+
+ tmp = (1U << alt->errata_id);
+ if (cpu_req_feature & tmp) {
+ patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
+ cpu_apply_feature |= tmp;
+ }
+ }
+}
+#endif