@@ -9,6 +9,7 @@
#define CSR_SEPC 0x141
#define CSR_SCAUSE 0x142
#define CSR_STVAL 0x143
+#define CSR_SATP 0x180
/* Exception cause high bit - is an interrupt if set */
#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
@@ -73,6 +73,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
}
#endif
+#define ioremap ioremap
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
+
#include <asm-generic/io.h>
#endif /* _ASMRISCV_IO_H_ */
new file mode 100644
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASMRISCV_MMU_H_
+#define _ASMRISCV_MMU_H_
+#include <libcflat.h>
+#include <asm/csr.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+static inline pgd_t *current_pgtable(void)
+{
+ return (pgd_t *)((csr_read(CSR_SATP) & SATP_PPN) << PAGE_SHIFT);
+}
+
+void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
+ phys_addr_t phys_start, phys_addr_t phys_end,
+ pgprot_t prot, bool flush);
+void __mmu_enable(unsigned long satp);
+void mmu_enable(unsigned long mode, pgd_t *pgtable);
+void mmu_disable(void);
+
+void setup_mmu(void);
+
+static inline void local_flush_tlb_page(unsigned long addr)
+{
+ asm volatile("sfence.vma %0" : : "r" (addr) : "memory");
+}
+
+/*
+ * Get the pte pointer for a virtual address, even if it's not mapped.
+ * Constructs upper levels of the table as necessary.
+ */
+pte_t *get_pte(pgd_t *pgtable, uintptr_t vaddr);
+
+#endif /* _ASMRISCV_MMU_H_ */
@@ -2,6 +2,20 @@
#ifndef _ASMRISCV_PAGE_H_
#define _ASMRISCV_PAGE_H_
+#ifndef __ASSEMBLY__
+
+typedef unsigned long pgd_t;
+typedef unsigned long pte_t;
+typedef unsigned long pgprot_t;
+typedef unsigned long pteval_t;
+
+#define pte_val(x) ((pteval_t)(x))
+#define pgprot_val(x) ((pteval_t)(x))
+#define __pte(x) ((pte_t)(x))
+#define __pgprot(x) ((pgprot_t)(x))
+
+#endif /* !__ASSEMBLY__ */
+
#include <asm-generic/page.h>
#endif /* _ASMRISCV_PAGE_H_ */
new file mode 100644
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASMRISCV_PGTABLE_H_
+#define _ASMRISCV_PGTABLE_H_
+#include <linux/const.h>
+
+#if __riscv_xlen == 32
+#define SATP_PPN _AC(0x003FFFFF, UL)
+#define SATP_MODE_32 _AC(0x80000000, UL)
+#define SATP_MODE_SHIFT 31
+#define NR_LEVELS 2
+#define PGDIR_BITS 10
+#define PGDIR_MASK _AC(0x3FF, UL)
+#define PTE_PPN _AC(0xFFFFFC00, UL)
+
+#define SATP_MODE_DEFAULT SATP_MODE_32
+
+#else
+#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
+#define SATP_MODE_39 _AC(0x8000000000000000, UL)
+#define SATP_MODE_SHIFT 60
+#define NR_LEVELS 3
+#define PGDIR_BITS 9
+#define PGDIR_MASK _AC(0x1FF, UL)
+#define PTE_PPN _AC(0x3FFFFFFFFFFC00, UL)
+
+#define SATP_MODE_DEFAULT SATP_MODE_39
+
+#endif
+
+#define PPN_SHIFT 10
+
+#define _PAGE_PRESENT (1 << 0)
+#define _PAGE_READ (1 << 1)
+#define _PAGE_WRITE (1 << 2)
+#define _PAGE_EXEC (1 << 3)
+#define _PAGE_USER (1 << 4)
+#define _PAGE_GLOBAL (1 << 5)
+#define _PAGE_ACCESSED (1 << 6)
+#define _PAGE_DIRTY (1 << 7)
+#define _PAGE_SOFT (3 << 8)
+
+#endif /* _ASMRISCV_PGTABLE_H_ */
new file mode 100644
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
+ */
+#include <libcflat.h>
+#include <alloc_page.h>
+#include <memregions.h>
+#include <asm/csr.h>
+#include <asm/io.h>
+#include <asm/mmu.h>
+#include <asm/page.h>
+
+static pgd_t *__initial_pgtable;
+
+static int pte_index(uintptr_t vaddr, int level)
+{
+ return (vaddr >> (PGDIR_BITS * level + PAGE_SHIFT)) & PGDIR_MASK;
+}
+
+static pte_t *pteval_to_ptep(pteval_t pteval)
+{
+ return (pte_t *)(((pteval & PTE_PPN) >> PPN_SHIFT) << PAGE_SHIFT);
+}
+
+static pteval_t ptep_to_pteval(pte_t *ptep)
+{
+ return ((pteval_t)ptep >> PAGE_SHIFT) << PPN_SHIFT;
+}
+
+pte_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
+{
+ pte_t *ptep = (pte_t *)pgtable;
+
+ assert(pgtable && !((uintptr_t)pgtable & ~PAGE_MASK));
+
+ for (int level = NR_LEVELS - 1; level > 0; --level) {
+ pte_t *next = &ptep[pte_index(vaddr, level)];
+ if (!pte_val(*next)) {
+ void *page = alloc_page();
+ *next = __pte(ptep_to_pteval(page) | _PAGE_PRESENT);
+ }
+ ptep = pteval_to_ptep(pte_val(*next));
+ }
+ ptep = &ptep[pte_index(vaddr, 0)];
+
+ return ptep;
+}
+
+static pteval_t *__install_page(pgd_t *pgtable, phys_addr_t paddr,
+ uintptr_t vaddr, pgprot_t prot, bool flush)
+{
+ phys_addr_t ppn = (paddr >> PAGE_SHIFT) << PPN_SHIFT;
+ pteval_t pte = (pteval_t)ppn;
+ pte_t *ptep;
+
+ assert(!(ppn & ~PTE_PPN));
+
+ ptep = get_pte(pgtable, vaddr);
+ *ptep = __pte(pte | pgprot_val(prot) | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+
+ if (flush)
+ local_flush_tlb_page(vaddr);
+
+ return (pteval_t *)ptep;
+}
+
+void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
+ phys_addr_t phys_start, phys_addr_t phys_end,
+ pgprot_t prot, bool flush)
+{
+ phys_addr_t paddr = phys_start & PAGE_MASK;
+ uintptr_t vaddr = virt_offset & PAGE_MASK;
+ uintptr_t virt_end = phys_end - paddr + vaddr;
+
+ assert(phys_start < phys_end);
+
+ for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE)
+ __install_page(pgtable, paddr, vaddr, prot, flush);
+}
+
+void mmu_disable(void)
+{
+ __asm__ __volatile__ (
+ " csrw " xstr(CSR_SATP) ", zero\n"
+ " sfence.vma\n"
+ : : : "memory");
+}
+
+void __mmu_enable(unsigned long satp)
+{
+ __asm__ __volatile__ (
+ " sfence.vma\n"
+ " csrw " xstr(CSR_SATP) ", %0\n"
+ : : "r" (satp) : "memory");
+}
+
+void mmu_enable(unsigned long mode, pgd_t *pgtable)
+{
+ unsigned long ppn = (unsigned long)pgtable >> PAGE_SHIFT;
+ unsigned long satp = mode | ppn;
+
+ assert(!(ppn & ~SATP_PPN));
+ __mmu_enable(satp);
+}
+
+void setup_mmu(void)
+{
+ struct mem_region *r;
+ pgd_t *pgtable;
+
+ if (!__initial_pgtable)
+ __initial_pgtable = alloc_page();
+ pgtable = __initial_pgtable;
+
+ for (r = mem_regions; r->end; ++r) {
+ if (r->flags & (MR_F_IO | MR_F_RESERVED))
+ continue;
+ if (r->flags & MR_F_CODE) {
+ mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
+ __pgprot(_PAGE_READ | _PAGE_EXEC), false);
+ } else {
+ mmu_set_range_ptes(pgtable, r->start, r->start, r->end,
+ __pgprot(_PAGE_READ | _PAGE_WRITE), false);
+ }
+ }
+
+ mmu_enable(SATP_MODE_DEFAULT, pgtable);
+}
+
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
+{
+ phys_addr_t start = phys_addr & PAGE_MASK;
+ phys_addr_t end = PAGE_ALIGN(phys_addr + size);
+ pgd_t *pgtable = current_pgtable();
+ bool flush = true;
+
+ assert(sizeof(long) == 8 || !(phys_addr >> 32));
+
+ if (!pgtable) {
+ if (!__initial_pgtable)
+ __initial_pgtable = alloc_page();
+ pgtable = __initial_pgtable;
+ flush = false;
+ }
+
+ mmu_set_range_ptes(pgtable, start, start, end,
+ __pgprot(_PAGE_READ | _PAGE_WRITE), flush);
+
+ return (void __iomem *)(unsigned long)phys_addr;
+}
@@ -14,6 +14,7 @@
#include <memregions.h>
#include <on-cpus.h>
#include <asm/csr.h>
+#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/setup.h>
@@ -171,5 +172,7 @@ void setup(const void *fdt, phys_addr_t freemem_start)
setup_env(env, initrd_size);
}
+ setup_mmu();
+
banner();
}
@@ -29,6 +29,7 @@ cflatobjs += lib/memregions.o
cflatobjs += lib/on-cpus.o
cflatobjs += lib/riscv/bitops.o
cflatobjs += lib/riscv/io.o
+cflatobjs += lib/riscv/mmu.o
cflatobjs += lib/riscv/processor.o
cflatobjs += lib/riscv/sbi.o
cflatobjs += lib/riscv/setup.o